+2014-06-24 Trevor Saunders <tsaunders@mozilla.com>
+
+ * hash-table.h: Remove a layer of indirection from hash_table so that
+ it contains the hash table's data instead of a pointer to the data.
+ * alloc-pool.c, asan.c, attribs.c, bitmap.c, cfg.c,
+ config/arm/arm.c, config/i386/winnt.c, config/ia64/ia64.c,
+ config/mips/mips.c, config/sol2.c, coverage.c, cselib.c,
+ data-streamer-out.c, dse.c, dwarf2cfi.c, dwarf2out.c, except.c,
+ fold-const.c, gcse.c, ggc-common.c,
+ gimple-ssa-strength-reduction.c, gimplify.c,
+ graphite-clast-to-gimple.c, graphite-dependences.c,
+ graphite-htab.h, graphite.c, haifa-sched.c, ipa-devirt.c,
+ ipa-profile.c, ira-color.c, ira-costs.c, loop-invariant.c,
+ loop-iv.c, loop-unroll.c, lto-streamer-in.c, lto-streamer-out.c,
+ lto-streamer.c, lto-streamer.h, passes.c, plugin.c,
+ postreload-gcse.c, sese.c, statistics.c, store-motion.c,
+ trans-mem.c, tree-browser.c, tree-cfg.c, tree-complex.c,
+ tree-eh.c, tree-into-ssa.c, tree-parloops.c, tree-sra.c,
+ tree-ssa-ccp.c, tree-ssa-coalesce.c, tree-ssa-dom.c,
+ tree-ssa-live.c, tree-ssa-loop-im.c,
+ tree-ssa-loop-ivopts.c, tree-ssa-phiopt.c, tree-ssa-pre.c,
+ tree-ssa-reassoc.c, tree-ssa-sccvn.c, tree-ssa-strlen.c,
+ tree-ssa-structalias.c, tree-ssa-tail-merge.c,
+ tree-ssa-threadupdate.c, tree-ssa-uncprop.c,
+tree-vect-data-refs.c, tree-vect-loop.c, tree-vectorizer.c,
+tree-vectorizer.h, valtrack.c, valtrack.h, var-tracking.c,
+vtable-verify.c, vtable-verify.h: Adjust.
+
2014-06-24 Richard Biener <rguenther@suse.de>
PR tree-optimization/61572
}
/* Hashtable mapping alloc_pool names to descriptors. */
-static hash_table <alloc_pool_hasher> alloc_pool_hash;
+static hash_table<alloc_pool_hasher> *alloc_pool_hash;
/* For given name, return descriptor, create new if needed. */
static struct alloc_pool_descriptor *
{
struct alloc_pool_descriptor **slot;
- if (!alloc_pool_hash.is_created ())
- alloc_pool_hash.create (10);
+ if (!alloc_pool_hash)
+ alloc_pool_hash = new hash_table<alloc_pool_hasher> (10);
- slot = alloc_pool_hash.find_slot_with_hash (name,
- htab_hash_pointer (name), INSERT);
+ slot = alloc_pool_hash->find_slot_with_hash (name,
+ htab_hash_pointer (name),
+ INSERT);
if (*slot)
return *slot;
*slot = XCNEW (struct alloc_pool_descriptor);
if (! GATHER_STATISTICS)
return;
- if (!alloc_pool_hash.is_created ())
+ if (!alloc_pool_hash)
return;
fprintf (stderr, "\nAlloc-pool Kind Elt size Pools Allocated (elts) Peak (elts) Leak (elts)\n");
fprintf (stderr, "--------------------------------------------------------------------------------------------------------------\n");
info.total_created = 0;
info.total_allocated = 0;
- alloc_pool_hash.traverse <struct output_info *,
- print_alloc_pool_statistics> (&info);
+ alloc_pool_hash->traverse <struct output_info *,
+ print_alloc_pool_statistics> (&info);
fprintf (stderr, "--------------------------------------------------------------------------------------------------------------\n");
fprintf (stderr, "%-22s %7lu %10lu\n",
"Total", info.total_created, info.total_allocated);
&& operand_equal_p (m1->start, m2->start, 0));
}
-static hash_table <asan_mem_ref_hasher> asan_mem_ref_ht;
+static hash_table<asan_mem_ref_hasher> *asan_mem_ref_ht;
/* Returns a reference to the hash table containing memory references.
This function ensures that the hash table is created. Note that
this hash table is updated by the function
update_mem_ref_hash_table. */
-static hash_table <asan_mem_ref_hasher> &
+static hash_table<asan_mem_ref_hasher> *
get_mem_ref_hash_table ()
{
- if (!asan_mem_ref_ht.is_created ())
- asan_mem_ref_ht.create (10);
+ if (!asan_mem_ref_ht)
+ asan_mem_ref_ht = new hash_table<asan_mem_ref_hasher> (10);
return asan_mem_ref_ht;
}
static void
empty_mem_ref_hash_table ()
{
- if (asan_mem_ref_ht.is_created ())
- asan_mem_ref_ht.empty ();
+ if (asan_mem_ref_ht)
+ asan_mem_ref_ht->empty ();
}
/* Free the memory references hash table. */
static void
free_mem_ref_resources ()
{
- if (asan_mem_ref_ht.is_created ())
- asan_mem_ref_ht.dispose ();
+ delete asan_mem_ref_ht;
+ asan_mem_ref_ht = NULL;
if (asan_mem_ref_alloc_pool)
{
asan_mem_ref r;
asan_mem_ref_init (&r, ref, access_size);
- return (get_mem_ref_hash_table ().find (&r) != NULL);
+ return (get_mem_ref_hash_table ()->find (&r) != NULL);
}
/* Return true iff the memory reference REF has been instrumented. */
static void
update_mem_ref_hash_table (tree ref, HOST_WIDE_INT access_size)
{
- hash_table <asan_mem_ref_hasher> ht = get_mem_ref_hash_table ();
+ hash_table<asan_mem_ref_hasher> *ht = get_mem_ref_hash_table ();
asan_mem_ref r;
asan_mem_ref_init (&r, ref, access_size);
- asan_mem_ref **slot = ht.find_slot (&r, INSERT);
+ asan_mem_ref **slot = ht->find_slot (&r, INSERT);
if (*slot == NULL)
*slot = asan_mem_ref_new (ref, access_size);
}
{
const char *ns;
vec<attribute_spec> attributes;
- hash_table <attribute_hasher> attribute_hash;
+ hash_table<attribute_hasher> *attribute_hash;
};
/* The table of scope attributes. */
sa.ns = ns;
sa.attributes.create (64);
result = attributes_table.safe_push (sa);
- result->attribute_hash.create (200);
+ result->attribute_hash = new hash_table<attribute_hasher> (200);
}
/* Really add the attributes to their namespace now. */
gcc_assert (attr != NULL && name_space != NULL);
- gcc_assert (name_space->attribute_hash.is_created ());
+ gcc_assert (name_space->attribute_hash);
str.str = attr->name;
str.length = strlen (str.str);
gcc_assert (str.length > 0 && str.str[0] != '_');
slot = name_space->attribute_hash
- .find_slot_with_hash (&str, substring_hash (str.str, str.length),
- INSERT);
+ ->find_slot_with_hash (&str, substring_hash (str.str, str.length),
+ INSERT);
gcc_assert (!*slot || attr->name[0] == '*');
*slot = CONST_CAST (struct attribute_spec *, attr);
}
attr.str = IDENTIFIER_POINTER (name);
attr.length = IDENTIFIER_LENGTH (name);
extract_attribute_substring (&attr);
- return attrs->attribute_hash.find_with_hash (&attr,
- substring_hash (attr.str, attr.length));
+ return attrs->attribute_hash->find_with_hash (&attr,
+ substring_hash (attr.str,
+ attr.length));
}
/* Return the spec for the attribute named NAME. If NAME is a TREE_LIST,
}
/* Hashtable mapping bitmap names to descriptors. */
-static hash_table <bitmap_desc_hasher> bitmap_desc_hash;
+static hash_table<bitmap_desc_hasher> *bitmap_desc_hash;
/* For given file and line, return descriptor, create new if needed. */
static bitmap_descriptor
loc.function = function;
loc.line = line;
- if (!bitmap_desc_hash.is_created ())
- bitmap_desc_hash.create (10);
+ if (!bitmap_desc_hash)
+ bitmap_desc_hash = new hash_table<bitmap_desc_hasher> (10);
- slot = bitmap_desc_hash.find_slot_with_hash (&loc,
- htab_hash_pointer (file) + line,
- INSERT);
+ slot
+ = bitmap_desc_hash->find_slot_with_hash (&loc,
+ htab_hash_pointer (file) + line,
+ INSERT);
if (*slot)
return *slot;
if (! GATHER_STATISTICS)
return;
- if (!bitmap_desc_hash.is_created ())
+ if (!bitmap_desc_hash)
return;
fprintf (stderr,
fprintf (stderr, "---------------------------------------------------------------------------------\n");
info.count = 0;
info.size = 0;
- bitmap_desc_hash.traverse <output_info *, print_statistics> (&info);
+ bitmap_desc_hash->traverse <output_info *, print_statistics> (&info);
fprintf (stderr, "---------------------------------------------------------------------------------\n");
fprintf (stderr,
"%-41s %9"PRId64" %15"PRId64"\n",
+2014-06-24 Trevor Saunders <tsaunders@mozilla.com>
+
+ * c-decl.c: Adjust.
+
2014-06-24 Jakub Jelinek <jakub@redhat.com>
* c-parser.c (c_parser_omp_for_loop): For
static void
detect_field_duplicates_hash (tree fieldlist,
- hash_table <pointer_hash <tree_node> > htab)
+ hash_table<pointer_hash <tree_node> > *htab)
{
tree x, y;
tree_node **slot;
for (x = fieldlist; x ; x = DECL_CHAIN (x))
if ((y = DECL_NAME (x)) != 0)
{
- slot = htab.find_slot (y, INSERT);
+ slot = htab->find_slot (y, INSERT);
if (*slot)
{
error ("duplicate member %q+D", x);
&& TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)
{
tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x)));
- slot = htab.find_slot (xn, INSERT);
+ slot = htab->find_slot (xn, INSERT);
if (*slot)
error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x)));
*slot = xn;
}
else
{
- hash_table <pointer_hash <tree_node> > htab;
- htab.create (37);
-
- detect_field_duplicates_hash (fieldlist, htab);
- htab.dispose ();
+ hash_table<pointer_hash <tree_node> > htab (37);
+ detect_field_duplicates_hash (fieldlist, &htab);
}
}
/* Data structures used to maintain mapping between basic blocks and
copies. */
-static hash_table <bb_copy_hasher> bb_original;
-static hash_table <bb_copy_hasher> bb_copy;
+static hash_table<bb_copy_hasher> *bb_original;
+static hash_table<bb_copy_hasher> *bb_copy;
/* And between loops and copies. */
-static hash_table <bb_copy_hasher> loop_copy;
+static hash_table<bb_copy_hasher> *loop_copy;
static alloc_pool original_copy_bb_pool;
original_copy_bb_pool
= create_alloc_pool ("original_copy",
sizeof (struct htab_bb_copy_original_entry), 10);
- bb_original.create (10);
- bb_copy.create (10);
- loop_copy.create (10);
+ bb_original = new hash_table<bb_copy_hasher> (10);
+ bb_copy = new hash_table<bb_copy_hasher> (10);
+ loop_copy = new hash_table<bb_copy_hasher> (10);
}
/* Free the data structures to maintain mapping between blocks and
free_original_copy_tables (void)
{
gcc_assert (original_copy_bb_pool);
- bb_copy.dispose ();
- bb_original.dispose ();
- loop_copy.dispose ();
+ delete bb_copy;
+ bb_copy = NULL;
+ delete bb_original;
+ bb_copy = NULL;
+ delete loop_copy;
+ loop_copy = NULL;
free_alloc_pool (original_copy_bb_pool);
original_copy_bb_pool = NULL;
}
/* Removes the value associated with OBJ from table TAB. */
static void
-copy_original_table_clear (hash_table <bb_copy_hasher> tab, unsigned obj)
+copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
{
htab_bb_copy_original_entry **slot;
struct htab_bb_copy_original_entry key, *elt;
return;
key.index1 = obj;
- slot = tab.find_slot (&key, NO_INSERT);
+ slot = tab->find_slot (&key, NO_INSERT);
if (!slot)
return;
elt = *slot;
- tab.clear_slot (slot);
+ tab->clear_slot (slot);
pool_free (original_copy_bb_pool, elt);
}
Do nothing when data structures are not initialized. */
static void
-copy_original_table_set (hash_table <bb_copy_hasher> tab,
+copy_original_table_set (hash_table<bb_copy_hasher> *tab,
unsigned obj, unsigned val)
{
struct htab_bb_copy_original_entry **slot;
return;
key.index1 = obj;
- slot = tab.find_slot (&key, INSERT);
+ slot = tab->find_slot (&key, INSERT);
if (!*slot)
{
*slot = (struct htab_bb_copy_original_entry *)
gcc_assert (original_copy_bb_pool);
key.index1 = bb->index;
- entry = bb_original.find (&key);
+ entry = bb_original->find (&key);
if (entry)
return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
else
gcc_assert (original_copy_bb_pool);
key.index1 = bb->index;
- entry = bb_copy.find (&key);
+ entry = bb_copy->find (&key);
if (entry)
return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
else
gcc_assert (original_copy_bb_pool);
key.index1 = loop->num;
- entry = loop_copy.find (&key);
+ entry = loop_copy->find (&key);
if (entry)
return get_loop (cfun, entry->index2);
else
return hash_rtx (p1, VOIDmode, NULL, NULL, FALSE);
}
-typedef hash_table <libcall_hasher> libcall_table_type;
+typedef hash_table<libcall_hasher> libcall_table_type;
static void
-add_libcall (libcall_table_type htab, rtx libcall)
+add_libcall (libcall_table_type *htab, rtx libcall)
{
- *htab.find_slot (libcall, INSERT) = libcall;
+ *htab->find_slot (libcall, INSERT) = libcall;
}
static bool
arm_libcall_uses_aapcs_base (const_rtx libcall)
{
static bool init_done = false;
- static libcall_table_type libcall_htab;
+ static libcall_table_type *libcall_htab = NULL;
if (!init_done)
{
init_done = true;
- libcall_htab.create (31);
+ libcall_htab = new libcall_table_type (31);
add_libcall (libcall_htab,
convert_optab_libfunc (sfloat_optab, SFmode, SImode));
add_libcall (libcall_htab,
DFmode));
}
- return libcall && libcall_htab.find (libcall) != NULL;
+ return libcall && libcall_htab->find (libcall) != NULL;
}
static rtx
unsigned int
i386_pe_section_type_flags (tree decl, const char *name, int reloc)
{
- static hash_table <pointer_hash <unsigned int> > htab;
+ static hash_table<pointer_hash<unsigned int> > *htab = NULL;
unsigned int flags;
unsigned int **slot;
/* The names we put in the hashtable will always be the unique
versions given to us by the stringtable, so we can just use
their addresses as the keys. */
- if (!htab.is_created ())
- htab.create (31);
+ if (!htab)
+ htab = new hash_table<pointer_hash<unsigned int> > (31);
if (decl && TREE_CODE (decl) == FUNCTION_DECL)
flags = SECTION_CODE;
flags |= SECTION_LINKONCE;
/* See if we already have an entry for this section. */
- slot = htab.find_slot ((const unsigned int *)name, INSERT);
+ slot = htab->find_slot ((const unsigned int *)name, INSERT);
if (!*slot)
{
*slot = (unsigned int *) xmalloc (sizeof (unsigned int));
i386_find_on_wrapper_list (const char *target)
{
static char first_time = 1;
- static hash_table <wrapped_symbol_hasher> wrappers;
+ static hash_table<wrapped_symbol_hasher> *wrappers;
if (first_time)
{
char *bufptr;
/* Breaks up the char array into separated strings
strings and enter them into the hash table. */
- wrappers.create (8);
+ wrappers = new hash_table_c<wrapped_symbol_hasher> (8);
for (bufptr = wrapper_list_buffer; *bufptr; ++bufptr)
{
char *found = NULL;
if (*bufptr)
*bufptr = 0;
if (found)
- *wrappers.find_slot (found, INSERT) = found;
+ *wrappers->find_slot (found, INSERT) = found;
}
first_time = 0;
}
- return wrappers.find (target);
+ return wrappers->find (target);
}
#endif /* CXX_WRAP_SPEC_LIST */
/* Hash table of the bundle states. The key is dfa_state and insn_num
of the bundle states. */
-static hash_table <bundle_state_hasher> bundle_state_table;
+static hash_table<bundle_state_hasher> *bundle_state_table;
/* The function inserts the BUNDLE_STATE into the hash table. The
function returns nonzero if the bundle has been inserted into the
{
struct bundle_state **entry_ptr;
- entry_ptr = bundle_state_table.find_slot (bundle_state, INSERT);
+ entry_ptr = bundle_state_table->find_slot (bundle_state, INSERT);
if (*entry_ptr == NULL)
{
bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
static void
initiate_bundle_state_table (void)
{
- bundle_state_table.create (50);
+ bundle_state_table = new hash_table<bundle_state_hasher> (50);
}
/* Finish work with the hash table. */
static void
finish_bundle_state_table (void)
{
- bundle_state_table.dispose ();
+ delete bundle_state_table;
+ bundle_state_table = NULL;
}
\f
return rtx_equal_p (entry->base, value);
}
-typedef hash_table <mips_lo_sum_offset_hasher> mips_offset_table;
+typedef hash_table<mips_lo_sum_offset_hasher> mips_offset_table;
/* Look up symbolic constant X in HTAB, which is a hash table of
mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
paired with a recorded LO_SUM, otherwise record X in the table. */
static bool
-mips_lo_sum_offset_lookup (mips_offset_table htab, rtx x,
+mips_lo_sum_offset_lookup (mips_offset_table *htab, rtx x,
enum insert_option option)
{
rtx base, offset;
base = UNSPEC_ADDRESS (base);
/* Look up the base in the hash table. */
- slot = htab.find_slot_with_hash (base, mips_hash_base (base), option);
+ slot = htab->find_slot_with_hash (base, mips_hash_base (base), option);
if (slot == NULL)
return false;
mips_record_lo_sum (rtx *loc, void *data)
{
if (GET_CODE (*loc) == LO_SUM)
- mips_lo_sum_offset_lookup (*(mips_offset_table*) data,
+ mips_lo_sum_offset_lookup ((mips_offset_table*) data,
XEXP (*loc, 1), INSERT);
return 0;
}
LO_SUMs in the current function. */
static bool
-mips_orphaned_high_part_p (mips_offset_table htab, rtx insn)
+mips_orphaned_high_part_p (mips_offset_table *htab, rtx insn)
{
enum mips_symbol_type type;
rtx x, set;
{
rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
int hilo_delay;
- mips_offset_table htab;
/* Force all instructions to be split into their final form. */
split_all_insns_noflow ();
if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
cfun->machine->all_noreorder_p = false;
- htab.create (37);
+ mips_offset_table htab (37);
/* Make a first pass over the instructions, recording all the LO_SUMs. */
for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
FOR_EACH_SUBINSN (subinsn, insn)
if (INSN_P (subinsn))
{
- if (mips_orphaned_high_part_p (htab, subinsn))
+ if (mips_orphaned_high_part_p (&htab, subinsn))
{
PATTERN (subinsn) = gen_nop ();
INSN_CODE (subinsn) = CODE_FOR_nop;
{
/* INSN is a single instruction. Delete it if it's an
orphaned high-part relocation. */
- if (mips_orphaned_high_part_p (htab, insn))
+ if (mips_orphaned_high_part_p (&htab, insn))
delete_insn (insn);
/* Also delete cache barriers if the last instruction
was an annulled branch. INSN will not be speculatively
}
}
}
-
- htab.dispose ();
}
/* Return true if the function has a long branch instruction. */
/* Hash table of group signature symbols. */
-static hash_table <comdat_entry_hasher> solaris_comdat_htab;
+static hash_table<comdat_entry_hasher> *solaris_comdat_htab;
/* Output assembly to switch to COMDAT group section NAME with attributes
FLAGS and group signature symbol DECL, using Sun as syntax. */
identify the missing ones without changing the affected frontents,
remember the signature symbols and emit those not marked
TREE_SYMBOL_REFERENCED in solaris_file_end. */
- if (!solaris_comdat_htab.is_created ())
- solaris_comdat_htab.create (37);
+ if (!solaris_comdat_htab)
+ solaris_comdat_htab = new hash_table<comdat_entry_hasher> (37);
entry.sig = signature;
- slot = solaris_comdat_htab.find_slot (&entry, INSERT);
+ slot = solaris_comdat_htab->find_slot (&entry, INSERT);
if (*slot == NULL)
{
void
solaris_file_end (void)
{
- if (!solaris_comdat_htab.is_created ())
+ if (!solaris_comdat_htab)
return;
- solaris_comdat_htab.traverse <void *, solaris_define_comdat_signature> (NULL);
+ solaris_comdat_htab->traverse <void *, solaris_define_comdat_signature>
+ (NULL);
}
void
}
/* Hash table of count data. */
-static hash_table <counts_entry> counts_hash;
+static hash_table<counts_entry> *counts_hash;
/* Read in the counts file, if available. */
tag = gcov_read_unsigned ();
bbg_file_stamp = crc32_unsigned (bbg_file_stamp, tag);
- counts_hash.create (10);
+ counts_hash = new hash_table<counts_entry> (10);
while ((tag = gcov_read_unsigned ()))
{
gcov_unsigned_t length;
elt.ident = fn_ident;
elt.ctr = GCOV_COUNTER_FOR_TAG (tag);
- slot = counts_hash.find_slot (&elt, INSERT);
+ slot = counts_hash->find_slot (&elt, INSERT);
entry = *slot;
if (!entry)
{
error ("checksum is (%x,%x) instead of (%x,%x)",
entry->lineno_checksum, entry->cfg_checksum,
lineno_checksum, cfg_checksum);
- counts_hash.dispose ();
+ delete counts_hash;
+ counts_hash = NULL;
break;
}
else if (entry->summary.num != n_counts)
{
error ("Profile data for function %u is corrupted", fn_ident);
error ("number of counters is %d instead of %d", entry->summary.num, n_counts);
- counts_hash.dispose ();
+ delete counts_hash;
+ counts_hash = NULL;
break;
}
else if (elt.ctr >= GCOV_COUNTERS_SUMMABLE)
{
error (is_error < 0 ? "%qs has overflowed" : "%qs is corrupted",
da_file_name);
- counts_hash.dispose ();
+ delete counts_hash;
+ counts_hash = NULL;
break;
}
}
counts_entry_t *entry, elt;
/* No hash table, no counts. */
- if (!counts_hash.is_created ())
+ if (!counts_hash)
{
static int warned = 0;
elt.ident = current_function_funcdef_no + 1;
elt.ctr = counter;
- entry = counts_hash.find (&elt);
+ entry = counts_hash->find (&elt);
if (!entry || !entry->summary.num)
/* The function was not emitted, or is weak and not chosen in the
final executable. Silently fail, because there's nothing we
+2014-06-24 Trevor Saunders <tsaunders@mozilla.com>
+
+ * class.c, semantics.c, tree.c, vtable-class-hierarchy.c:
+ Adjust.
+
2014-06-24 Jakub Jelinek <jakub@redhat.com>
* parser.c (cp_parser_omp_for_loop): For
}
\f
/* Hash table to avoid endless recursion when handling references. */
-static hash_table <pointer_hash <tree_node> > fixed_type_or_null_ref_ht;
+static hash_table<pointer_hash<tree_node> > *fixed_type_or_null_ref_ht;
/* Return the dynamic type of INSTANCE, if known.
Used to determine whether the virtual function table is needed
else if (TREE_CODE (TREE_TYPE (instance)) == REFERENCE_TYPE)
{
/* We only need one hash table because it is always left empty. */
- if (!fixed_type_or_null_ref_ht.is_created ())
- fixed_type_or_null_ref_ht.create (37);
+ if (!fixed_type_or_null_ref_ht)
+ fixed_type_or_null_ref_ht
+ = new hash_table<pointer_hash<tree_node> > (37);
/* Reference variables should be references to objects. */
if (nonnull)
if (VAR_P (instance)
&& DECL_INITIAL (instance)
&& !type_dependent_expression_p_push (DECL_INITIAL (instance))
- && !fixed_type_or_null_ref_ht.find (instance))
+ && !fixed_type_or_null_ref_ht->find (instance))
{
tree type;
tree_node **slot;
- slot = fixed_type_or_null_ref_ht.find_slot (instance, INSERT);
+ slot = fixed_type_or_null_ref_ht->find_slot (instance, INSERT);
*slot = instance;
type = RECUR (DECL_INITIAL (instance));
- fixed_type_or_null_ref_ht.remove_elt (instance);
+ fixed_type_or_null_ref_ht->remove_elt (instance);
return type;
}
struct nrv_data
{
+ nrv_data () : visited (37) {}
+
tree var;
tree result;
- hash_table <pointer_hash <tree_node> > visited;
+ hash_table<pointer_hash <tree_node> > visited;
};
/* Helper function for walk_tree, used by finalize_nrv below. */
data.var = var;
data.result = result;
- data.visited.create (37);
cp_walk_tree (tp, finalize_nrv_r, &data, 0);
- data.visited.dispose ();
}
\f
/* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */
verify_stmt_tree_r (tree* tp, int * /*walk_subtrees*/, void* data)
{
tree t = *tp;
- hash_table <pointer_hash <tree_node> > *statements
- = static_cast <hash_table <pointer_hash <tree_node> > *> (data);
+ hash_table<pointer_hash <tree_node> > *statements
+ = static_cast <hash_table<pointer_hash <tree_node> > *> (data);
tree_node **slot;
if (!STATEMENT_CODE_P (TREE_CODE (t)))
void
verify_stmt_tree (tree t)
{
- hash_table <pointer_hash <tree_node> > statements;
- statements.create (37);
+ hash_table<pointer_hash <tree_node> > statements (37);
cp_walk_tree (&t, verify_stmt_tree_r, &statements, NULL);
- statements.dispose ();
}
/* Check if the type T depends on a type with no linkage and if so, return
if (vtbl_ptr_array->length() > 0
|| (current->is_used
- || (current->registered.size() > 0)))
+ || (current->registered->size() > 0)))
{
insert_call_to_register_pair (vtbl_ptr_array,
arg1, arg2, size_hint_arg, str1,
{
struct vtbl_map_node *current = vtbl_map_nodes_vec[i];
if (!current->is_used
- && current->registered.size() == 0)
+ && current->registered->size() == 0)
unused_vtbl_map_vars++;
}
}
/* A table that enables us to look up elts by their value. */
-static hash_table <cselib_hasher> cselib_hash_table;
+static hash_table<cselib_hasher> *cselib_hash_table;
/* A table to hold preserved values. */
-static hash_table <cselib_hasher> cselib_preserved_hash_table;
+static hash_table<cselib_hasher> *cselib_preserved_hash_table;
/* This is a global so we don't have to pass this through every function.
It is used in new_elt_loc_list to set SETTING_INSN. */
GET_MODE (v->val_rtx), v->val_rtx, VOIDmode
};
cselib_val **slot
- = cselib_preserved_hash_table.find_slot_with_hash (&lookup,
+ = cselib_preserved_hash_table->find_slot_with_hash (&lookup,
v->hash, INSERT);
gcc_assert (!*slot);
*slot = v;
}
- cselib_hash_table.clear_slot (x);
+ cselib_hash_table->clear_slot (x);
return 1;
}
}
if (cselib_preserve_constants)
- cselib_hash_table.traverse <void *, preserve_constants_and_equivs> (NULL);
+ cselib_hash_table->traverse <void *, preserve_constants_and_equivs>
+ (NULL);
else
{
- cselib_hash_table.empty ();
+ cselib_hash_table->empty ();
gcc_checking_assert (!cselib_any_perm_equivs);
}
cselib_val **slot = NULL;
cselib_hasher::compare_type lookup = { mode, x, memmode };
if (cselib_preserve_constants)
- slot = cselib_preserved_hash_table.find_slot_with_hash (&lookup, hash,
- NO_INSERT);
+ slot = cselib_preserved_hash_table->find_slot_with_hash (&lookup, hash,
+ NO_INSERT);
if (!slot)
- slot = cselib_hash_table.find_slot_with_hash (&lookup, hash, insert);
+ slot = cselib_hash_table->find_slot_with_hash (&lookup, hash, insert);
return slot;
}
cselib_discard_hook (v);
CSELIB_VAL_PTR (v->val_rtx) = NULL;
- cselib_hash_table.clear_slot (x);
+ cselib_hash_table->clear_slot (x);
unchain_one_value (v);
n_useless_values--;
}
do
{
values_became_useless = 0;
- cselib_hash_table.traverse <void *, discard_useless_locs> (NULL);
+ cselib_hash_table->traverse <void *, discard_useless_locs> (NULL);
}
while (values_became_useless);
n_debug_values -= n_useless_debug_values;
n_useless_debug_values = 0;
- cselib_hash_table.traverse <void *, discard_useless_values> (NULL);
+ cselib_hash_table->traverse <void *, discard_useless_values> (NULL);
gcc_assert (!n_useless_values);
}
quadratic behavior for very large hashtables with very few
useless elements. */
&& ((unsigned int)n_useless_values
- > (cselib_hash_table.elements () - n_debug_values) / 4))
+ > (cselib_hash_table->elements () - n_debug_values) / 4))
remove_useless_values ();
}
}
used_regs = XNEWVEC (unsigned int, cselib_nregs);
n_used_regs = 0;
- cselib_hash_table.create (31);
+ cselib_hash_table = new hash_table<cselib_hasher> (31);
if (cselib_preserve_constants)
- cselib_preserved_hash_table.create (31);
+ cselib_preserved_hash_table = new hash_table<cselib_hasher> (31);
next_uid = 1;
}
free_alloc_pool (cselib_val_pool);
free_alloc_pool (value_pool);
cselib_clear_table ();
- cselib_hash_table.dispose ();
+ delete cselib_hash_table;
+ cselib_hash_table = NULL;
if (preserved)
- cselib_preserved_hash_table.dispose ();
+ delete cselib_preserved_hash_table;
+ cselib_preserved_hash_table = NULL;
free (used_regs);
used_regs = 0;
n_useless_values = 0;
dump_cselib_table (FILE *out)
{
fprintf (out, "cselib hash table:\n");
- cselib_hash_table.traverse <FILE *, dump_cselib_val> (out);
+ cselib_hash_table->traverse <FILE *, dump_cselib_val> (out);
fprintf (out, "cselib preserved hash table:\n");
- cselib_preserved_hash_table.traverse <FILE *, dump_cselib_val> (out);
+ cselib_preserved_hash_table->traverse <FILE *, dump_cselib_val> (out);
if (first_containing_mem != &dummy_val)
{
fputs ("first mem ", out);
s_slot.len = len;
s_slot.slot_num = 0;
- slot = ob->string_hash_table.find_slot (&s_slot, INSERT);
+ slot = ob->string_hash_table->find_slot (&s_slot, INSERT);
if (*slot == NULL)
{
struct lto_output_stream *string_stream = ob->string_stream;
}
/* Tables of group_info structures, hashed by base value. */
-static hash_table <invariant_group_base_hasher> rtx_group_table;
+static hash_table<invariant_group_base_hasher> *rtx_group_table;
/* Get the GROUP for BASE. Add a new group if it is not there. */
/* Find the store_base_info structure for BASE, creating a new one
if necessary. */
tmp_gi.rtx_base = base;
- slot = rtx_group_table.find_slot (&tmp_gi, INSERT);
+ slot = rtx_group_table->find_slot (&tmp_gi, INSERT);
gi = (group_info_t) *slot;
}
else
= create_alloc_pool ("deferred_change_pool",
sizeof (struct deferred_change), 10);
- rtx_group_table.create (11);
+ rtx_group_table = new hash_table<invariant_group_base_hasher> (11);
bb_table = XNEWVEC (bb_info_t, last_basic_block_for_fn (cfun));
rtx_group_next_id = 0;
BITMAP_FREE (regs_live);
cselib_finish ();
- rtx_group_table.empty ();
+ rtx_group_table->empty ();
}
\f
end_alias_analysis ();
free (bb_table);
- rtx_group_table.dispose ();
+ delete rtx_group_table;
+ rtx_group_table = NULL;
rtx_group_vec.release ();
BITMAP_FREE (all_blocks);
BITMAP_FREE (scratch);
/* The variables making up the pseudo-cfg, as described above. */
static vec<dw_trace_info> trace_info;
static vec<dw_trace_info_ref> trace_work_list;
-static hash_table <trace_info_hasher> trace_index;
+static hash_table<trace_info_hasher> *trace_index;
/* A vector of call frame insns for the CIE. */
cfi_vec cie_cfi_vec;
{
dw_trace_info dummy;
dummy.head = insn;
- return trace_index.find_with_hash (&dummy, INSN_UID (insn));
+ return trace_index->find_with_hash (&dummy, INSN_UID (insn));
}
static bool
/* Create the trace index after we've finished building trace_info,
avoiding stale pointer problems due to reallocation. */
- trace_index.create (trace_info.length ());
+ trace_index
+ = new hash_table<trace_info_hasher> (trace_info.length ());
dw_trace_info *tp;
FOR_EACH_VEC_ELT (trace_info, i, tp)
{
rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
tp->switch_sections ? " (section switch)" : "");
- slot = trace_index.find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
+ slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
gcc_assert (*slot == NULL);
*slot = tp;
}
}
trace_info.release ();
- trace_index.dispose ();
+ delete trace_index;
+ trace_index = NULL;
return 0;
}
}
}
-typedef hash_table <cu_hash_table_entry_hasher> cu_hash_type;
+typedef hash_table<cu_hash_table_entry_hasher> cu_hash_type;
/* Check whether we have already seen this CU and set up SYM_NUM
accordingly. */
static int
-check_duplicate_cu (dw_die_ref cu, cu_hash_type htable, unsigned int *sym_num)
+check_duplicate_cu (dw_die_ref cu, cu_hash_type *htable, unsigned int *sym_num)
{
struct cu_hash_table_entry dummy;
struct cu_hash_table_entry **slot, *entry, *last = &dummy;
dummy.max_comdat_num = 0;
- slot = htable.find_slot_with_hash (cu,
- htab_hash_string (cu->die_id.die_symbol),
- INSERT);
+ slot = htable->find_slot_with_hash (cu,
+ htab_hash_string (cu->die_id.die_symbol),
+ INSERT);
entry = *slot;
for (; entry; last = entry, entry = entry->next)
/* Record SYM_NUM to record of CU in HTABLE. */
static void
-record_comdat_symbol_number (dw_die_ref cu, cu_hash_type htable,
+record_comdat_symbol_number (dw_die_ref cu, cu_hash_type *htable,
unsigned int sym_num)
{
struct cu_hash_table_entry **slot, *entry;
- slot = htable.find_slot_with_hash (cu,
- htab_hash_string (cu->die_id.die_symbol),
- NO_INSERT);
+ slot = htable->find_slot_with_hash (cu,
+ htab_hash_string (cu->die_id.die_symbol),
+ NO_INSERT);
entry = *slot;
entry->max_comdat_num = sym_num;
dw_die_ref c;
dw_die_ref unit = NULL;
limbo_die_node *node, **pnode;
- cu_hash_type cu_hash_table;
c = die->die_child;
if (c) do {
#endif
assign_symbol_names (die);
- cu_hash_table.create (10);
+ cu_hash_type cu_hash_table (10);
for (node = limbo_die_list, pnode = &limbo_die_list;
node;
node = node->next)
int is_dupl;
compute_section_prefix (node->die);
- is_dupl = check_duplicate_cu (node->die, cu_hash_table,
+ is_dupl = check_duplicate_cu (node->die, &cu_hash_table,
&comdat_symbol_number);
assign_symbol_names (node->die);
if (is_dupl)
else
{
pnode = &node->next;
- record_comdat_symbol_number (node->die, cu_hash_table,
+ record_comdat_symbol_number (node->die, &cu_hash_table,
comdat_symbol_number);
}
}
- cu_hash_table.dispose ();
}
/* Return non-zero if this DIE is a declaration. */
return entry1->orig == entry2;
}
-typedef hash_table <decl_table_entry_hasher> decl_hash_type;
+typedef hash_table<decl_table_entry_hasher> decl_hash_type;
/* Copy DIE and its ancestors, up to, but not including, the compile unit
or type unit entry, to a new tree. Adds the new tree to UNIT and returns
to check if the ancestor has already been copied into UNIT. */
static dw_die_ref
-copy_ancestor_tree (dw_die_ref unit, dw_die_ref die, decl_hash_type decl_table)
+copy_ancestor_tree (dw_die_ref unit, dw_die_ref die,
+ decl_hash_type *decl_table)
{
dw_die_ref parent = die->die_parent;
dw_die_ref new_parent = unit;
decl_table_entry **slot = NULL;
struct decl_table_entry *entry = NULL;
- if (decl_table.is_created ())
+ if (decl_table)
{
/* Check if the entry has already been copied to UNIT. */
- slot = decl_table.find_slot_with_hash (die, htab_hash_pointer (die),
- INSERT);
+ slot = decl_table->find_slot_with_hash (die, htab_hash_pointer (die),
+ INSERT);
if (*slot != HTAB_EMPTY_ENTRY)
{
entry = *slot;
copy = clone_as_declaration (die);
add_child_die (new_parent, copy);
- if (decl_table.is_created ())
+ if (decl_table)
{
/* Record the pointer to the copy. */
entry->copy = copy;
if (decl->die_parent != NULL
&& !is_unit_die (decl->die_parent))
{
- new_decl = copy_ancestor_tree (unit, decl, decl_hash_type ());
+ new_decl = copy_ancestor_tree (unit, decl, NULL);
if (new_decl != NULL)
{
remove_AT (new_decl, DW_AT_signature);
Enter all the cloned children into the hash table decl_table. */
static dw_die_ref
-clone_tree_partial (dw_die_ref die, decl_hash_type decl_table)
+clone_tree_partial (dw_die_ref die, decl_hash_type *decl_table)
{
dw_die_ref c;
dw_die_ref clone;
else
clone = clone_die (die);
- slot = decl_table.find_slot_with_hash (die,
- htab_hash_pointer (die), INSERT);
+ slot = decl_table->find_slot_with_hash (die,
+ htab_hash_pointer (die), INSERT);
/* Assert that DIE isn't in the hash table yet. If it would be there
before, the ancestors would be necessarily there as well, therefore
type_unit). */
static void
-copy_decls_walk (dw_die_ref unit, dw_die_ref die, decl_hash_type decl_table)
+copy_decls_walk (dw_die_ref unit, dw_die_ref die, decl_hash_type *decl_table)
{
dw_die_ref c;
dw_attr_ref a;
if (targ->die_mark != 0 || targ->comdat_type_p)
continue;
- slot = decl_table.find_slot_with_hash (targ, htab_hash_pointer (targ),
- INSERT);
+ slot = decl_table->find_slot_with_hash (targ,
+ htab_hash_pointer (targ),
+ INSERT);
if (*slot != HTAB_EMPTY_ENTRY)
{
static void
copy_decls_for_unworthy_types (dw_die_ref unit)
{
- decl_hash_type decl_table;
-
mark_dies (unit);
- decl_table.create (10);
- copy_decls_walk (unit, unit, decl_table);
- decl_table.dispose ();
+ decl_hash_type decl_table (10);
+ copy_decls_walk (unit, unit, &decl_table);
unmark_dies (unit);
}
return r1->type == r2->type;
}
-typedef hash_table <external_ref_hasher> external_ref_hash_type;
+typedef hash_table<external_ref_hasher> external_ref_hash_type;
/* Return a pointer to the external_ref for references to DIE. */
static struct external_ref *
-lookup_external_ref (external_ref_hash_type map, dw_die_ref die)
+lookup_external_ref (external_ref_hash_type *map, dw_die_ref die)
{
struct external_ref ref, *ref_p;
external_ref **slot;
ref.type = die;
- slot = map.find_slot (&ref, INSERT);
+ slot = map->find_slot (&ref, INSERT);
if (*slot != HTAB_EMPTY_ENTRY)
return *slot;
references, remember how many we've seen. */
static void
-optimize_external_refs_1 (dw_die_ref die, external_ref_hash_type map)
+optimize_external_refs_1 (dw_die_ref die, external_ref_hash_type *map)
{
dw_die_ref c;
dw_attr_ref a;
them which will be applied in build_abbrev_table. This is useful because
references to local DIEs are smaller. */
-static external_ref_hash_type
+static external_ref_hash_type *
optimize_external_refs (dw_die_ref die)
{
- external_ref_hash_type map;
- map.create (10);
+ external_ref_hash_type *map = new external_ref_hash_type (10);
optimize_external_refs_1 (die, map);
- map.traverse <dw_die_ref, dwarf2_build_local_stub> (die);
+ map->traverse <dw_die_ref, dwarf2_build_local_stub> (die);
return map;
}
die are visited recursively. */
static void
-build_abbrev_table (dw_die_ref die, external_ref_hash_type extern_map)
+build_abbrev_table (dw_die_ref die, external_ref_hash_type *extern_map)
{
unsigned long abbrev_id;
unsigned int n_alloc;
{
const char *secname, *oldsym;
char *tmp;
- external_ref_hash_type extern_map;
/* Unless we are outputting main CU, we may throw away empty ones. */
if (!output_if_empty && die->die_child == NULL)
this CU so we know which get local refs. */
mark_dies (die);
- extern_map = optimize_external_refs (die);
+ external_ref_hash_type *extern_map = optimize_external_refs (die);
build_abbrev_table (die, extern_map);
- extern_map.dispose ();
+ delete extern_map;
/* Initialize the beginning DIE offset - and calculate sizes/offsets. */
next_die_offset = DWARF_COMPILE_UNIT_HEADER_SIZE;
#if defined (OBJECT_FORMAT_ELF)
tree comdat_key;
#endif
- external_ref_hash_type extern_map;
/* First mark all the DIEs in this CU so we know which get local refs. */
mark_dies (node->root_die);
- extern_map = optimize_external_refs (node->root_die);
+ external_ref_hash_type *extern_map = optimize_external_refs (node->root_die);
build_abbrev_table (node->root_die, extern_map);
- extern_map.dispose ();
+ delete extern_map;
+ extern_map = NULL;
/* Initialize the beginning DIE offset - and calculate sizes/offsets. */
next_die_offset = DWARF_COMDAT_TYPE_UNIT_HEADER_SIZE;
return !strcmp (entry1->info, entry2->info);
}
-typedef hash_table <macinfo_entry_hasher> macinfo_hash_type;
+typedef hash_table<macinfo_entry_hasher> macinfo_hash_type;
/* Output a single .debug_macinfo entry. */
static unsigned
optimize_macinfo_range (unsigned int idx, vec<macinfo_entry, va_gc> *files,
- macinfo_hash_type *macinfo_htab)
+ macinfo_hash_type **macinfo_htab)
{
macinfo_entry *first, *second, *cur, *inc;
char linebuf[sizeof (HOST_WIDE_INT) * 3 + 1];
inc->code = DW_MACRO_GNU_transparent_include;
inc->lineno = 0;
inc->info = ggc_strdup (grp_name);
- if (!macinfo_htab->is_created ())
- macinfo_htab->create (10);
+ if (!*macinfo_htab)
+ *macinfo_htab = new macinfo_hash_type (10);
/* Avoid emitting duplicates. */
- slot = macinfo_htab->find_slot (inc, INSERT);
+ slot = (*macinfo_htab)->find_slot (inc, INSERT);
if (*slot != NULL)
{
inc->code = 0;
else
{
*slot = inc;
- inc->lineno = macinfo_htab->elements ();
+ inc->lineno = (*macinfo_htab)->elements ();
output_macinfo_op (inc);
}
return count;
unsigned long length = vec_safe_length (macinfo_table);
macinfo_entry *ref;
vec<macinfo_entry, va_gc> *files = NULL;
- macinfo_hash_type macinfo_htab;
+ macinfo_hash_type *macinfo_htab = NULL;
if (! length)
return;
ref->code = 0;
}
- if (!macinfo_htab.is_created ())
+ if (!macinfo_htab)
return;
- macinfo_htab.dispose ();
+ delete macinfo_htab;
+ macinfo_htab = NULL;
/* If any DW_MACRO_GNU_transparent_include were used, on those
DW_MACRO_GNU_transparent_include entries terminate the
return a == NULL && b == NULL;
}
-typedef hash_table <loc_list_hasher> loc_list_hash_type;
+typedef hash_table<loc_list_hasher> loc_list_hash_type;
/* Recursively optimize location lists referenced from DIE
children and share them whenever possible. */
static void
-optimize_location_lists_1 (dw_die_ref die, loc_list_hash_type htab)
+optimize_location_lists_1 (dw_die_ref die, loc_list_hash_type *htab)
{
dw_die_ref c;
dw_attr_ref a;
/* TODO: perform some optimizations here, before hashing
it and storing into the hash table. */
hash_loc_list (list);
- slot = htab.find_slot_with_hash (list, list->hash, INSERT);
+ slot = htab->find_slot_with_hash (list, list->hash, INSERT);
if (*slot == NULL)
*slot = list;
else
static void
optimize_location_lists (dw_die_ref die)
{
- loc_list_hash_type htab;
- htab.create (500);
- optimize_location_lists_1 (die, htab);
- htab.dispose ();
+ loc_list_hash_type htab (500);
+ optimize_location_lists_1 (die, &htab);
}
\f
/* Output stuff that dwarf requires at the end of every file,
{
limbo_die_node *node, *next_node;
comdat_type_node *ctnode;
- hash_table <comdat_type_hasher> comdat_type_table;
unsigned int i;
dw_die_ref main_comp_unit_die;
for (node = limbo_die_list; node; node = node->next)
output_comp_unit (node->die, 0);
- comdat_type_table.create (100);
+ hash_table<comdat_type_hasher> comdat_type_table (100);
for (ctnode = comdat_type_list; ctnode != NULL; ctnode = ctnode->next)
{
comdat_type_node **slot = comdat_type_table.find_slot (ctnode, INSERT);
output_comdat_type_unit (ctnode);
*slot = ctnode;
}
- comdat_type_table.dispose ();
/* The AT_pubnames attribute needs to go in all skeleton dies, including
both the main_cu and all skeleton TUs. Making this call unconditional
return entry->filter == data->filter && entry->next == data->next;
}
-typedef hash_table <action_record_hasher> action_hash_type;
+typedef hash_table<action_record_hasher> action_hash_type;
\f
static bool get_eh_region_and_lp_from_rtx (const_rtx, eh_region *,
eh_landing_pad *);
static void dw2_build_landing_pads (void);
-static int collect_one_action_chain (action_hash_type, eh_region);
+static int collect_one_action_chain (action_hash_type *, eh_region);
static int add_call_site (rtx, int, int);
static void push_uleb128 (vec<uchar, va_gc> **, unsigned int);
return TREE_HASH (entry->t);
}
-typedef hash_table <ttypes_filter_hasher> ttypes_hash_type;
+typedef hash_table<ttypes_filter_hasher> ttypes_hash_type;
/* Helper for ehspec hashing. */
return h;
}
-typedef hash_table <ehspec_hasher> ehspec_hash_type;
+typedef hash_table<ehspec_hasher> ehspec_hash_type;
/* Add TYPE (which may be NULL) to cfun->eh->ttype_data, using TYPES_HASH
to speed up the search. Return the filter value to be used. */
static int
-add_ttypes_entry (ttypes_hash_type ttypes_hash, tree type)
+add_ttypes_entry (ttypes_hash_type *ttypes_hash, tree type)
{
struct ttypes_filter **slot, *n;
- slot = ttypes_hash.find_slot_with_hash (type, (hashval_t) TREE_HASH (type),
+ slot = ttypes_hash->find_slot_with_hash (type, (hashval_t) TREE_HASH (type),
INSERT);
if ((n = *slot) == NULL)
to speed up the search. Return the filter value to be used. */
static int
-add_ehspec_entry (ehspec_hash_type ehspec_hash, ttypes_hash_type ttypes_hash,
+add_ehspec_entry (ehspec_hash_type *ehspec_hash, ttypes_hash_type *ttypes_hash,
tree list)
{
struct ttypes_filter **slot, *n;
struct ttypes_filter dummy;
dummy.t = list;
- slot = ehspec_hash.find_slot (&dummy, INSERT);
+ slot = ehspec_hash->find_slot (&dummy, INSERT);
if ((n = *slot) == NULL)
{
assign_filter_values (void)
{
int i;
- ttypes_hash_type ttypes;
- ehspec_hash_type ehspec;
eh_region r;
eh_catch c;
else
vec_alloc (cfun->eh->ehspec_data.other, 64);
- ttypes.create (31);
- ehspec.create (31);
+ ehspec_hash_type ehspec (31);
+ ttypes_hash_type ttypes (31);
for (i = 1; vec_safe_iterate (cfun->eh->region_array, i, &r); ++i)
{
for ( ; tp_node; tp_node = TREE_CHAIN (tp_node))
{
- int flt = add_ttypes_entry (ttypes, TREE_VALUE (tp_node));
+ int flt
+ = add_ttypes_entry (&ttypes, TREE_VALUE (tp_node));
tree flt_node = build_int_cst (integer_type_node, flt);
c->filter_list
{
/* Get a filter value for the NULL list also since it
will need an action record anyway. */
- int flt = add_ttypes_entry (ttypes, NULL);
+ int flt = add_ttypes_entry (&ttypes, NULL);
tree flt_node = build_int_cst (integer_type_node, flt);
c->filter_list
case ERT_ALLOWED_EXCEPTIONS:
r->u.allowed.filter
- = add_ehspec_entry (ehspec, ttypes, r->u.allowed.type_list);
+ = add_ehspec_entry (&ehspec, &ttypes, r->u.allowed.type_list);
break;
default:
break;
}
}
-
- ttypes.dispose ();
- ehspec.dispose ();
}
/* Emit SEQ into basic block just before INSN (that is assumed to be
static int
sjlj_assign_call_site_values (void)
{
- action_hash_type ar_hash;
+ action_hash_type ar_hash (31);
int i, disp_index;
eh_landing_pad lp;
vec_alloc (crtl->eh.action_record_data, 64);
- ar_hash.create (31);
disp_index = 0;
call_site_base = 1;
int action, call_site;
/* First: build the action table. */
- action = collect_one_action_chain (ar_hash, lp->region);
+ action = collect_one_action_chain (&ar_hash, lp->region);
/* Next: assign call-site values. If dwarf2 terms, this would be
the region number assigned by convert_to_eh_region_ranges, but
disp_index++;
}
- ar_hash.dispose ();
-
return disp_index;
}
}
\f
static int
-add_action_record (action_hash_type ar_hash, int filter, int next)
+add_action_record (action_hash_type *ar_hash, int filter, int next)
{
struct action_record **slot, *new_ar, tmp;
tmp.filter = filter;
tmp.next = next;
- slot = ar_hash.find_slot (&tmp, INSERT);
+ slot = ar_hash->find_slot (&tmp, INSERT);
if ((new_ar = *slot) == NULL)
{
}
static int
-collect_one_action_chain (action_hash_type ar_hash, eh_region region)
+collect_one_action_chain (action_hash_type *ar_hash, eh_region region)
{
int next;
convert_to_eh_region_ranges (void)
{
rtx insn, iter, note;
- action_hash_type ar_hash;
+ action_hash_type ar_hash (31);
int last_action = -3;
rtx last_action_insn = NULL_RTX;
rtx last_landing_pad = NULL_RTX;
vec_alloc (crtl->eh.action_record_data, 64);
- ar_hash.create (31);
-
for (iter = get_insns (); iter ; iter = NEXT_INSN (iter))
if (INSN_P (iter))
{
if (nothrow)
continue;
if (region)
- this_action = collect_one_action_chain (ar_hash, region);
+ this_action = collect_one_action_chain (&ar_hash, region);
else
this_action = -1;
call_site_base = saved_call_site_base;
- ar_hash.dispose ();
return 0;
}
#undef fold
static void fold_checksum_tree (const_tree, struct md5_ctx *,
- hash_table <pointer_hash <tree_node> >);
+ hash_table<pointer_hash<tree_node> > *);
static void fold_check_failed (const_tree, const_tree);
void print_fold_checksum (const_tree);
tree ret;
struct md5_ctx ctx;
unsigned char checksum_before[16], checksum_after[16];
- hash_table <pointer_hash <tree_node> > ht;
+ hash_table<pointer_hash<tree_node> > ht (32);
- ht.create (32);
md5_init_ctx (&ctx);
- fold_checksum_tree (expr, &ctx, ht);
+ fold_checksum_tree (expr, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before);
ht.empty ();
ret = fold_1 (expr);
md5_init_ctx (&ctx);
- fold_checksum_tree (expr, &ctx, ht);
+ fold_checksum_tree (expr, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after);
- ht.dispose ();
if (memcmp (checksum_before, checksum_after, 16))
fold_check_failed (expr, ret);
{
struct md5_ctx ctx;
unsigned char checksum[16], cnt;
- hash_table <pointer_hash <tree_node> > ht;
+ hash_table<pointer_hash<tree_node> > ht (32);
- ht.create (32);
md5_init_ctx (&ctx);
- fold_checksum_tree (expr, &ctx, ht);
+ fold_checksum_tree (expr, &ctx, &ht);
md5_finish_ctx (&ctx, checksum);
- ht.dispose ();
for (cnt = 0; cnt < 16; ++cnt)
fprintf (stderr, "%02x", checksum[cnt]);
putc ('\n', stderr);
static void
fold_checksum_tree (const_tree expr, struct md5_ctx *ctx,
- hash_table <pointer_hash <tree_node> > ht)
+ hash_table<pointer_hash <tree_node> > *ht)
{
tree_node **slot;
enum tree_code code;
recursive_label:
if (expr == NULL)
return;
- slot = ht.find_slot (expr, INSERT);
+ slot = ht->find_slot (expr, INSERT);
if (*slot != NULL)
return;
*slot = CONST_CAST_TREE (expr);
int i;
unsigned char checksum[16];
struct md5_ctx ctx;
- hash_table <pointer_hash <tree_node> > ht;
- ht.create (32);
+ hash_table<pointer_hash<tree_node> > ht (32);
md5_init_ctx (&ctx);
- fold_checksum_tree (t, &ctx, ht);
+ fold_checksum_tree (t, &ctx, &ht);
md5_finish_ctx (&ctx, checksum);
ht.empty ();
#ifdef ENABLE_FOLD_CHECKING
unsigned char checksum_before[16], checksum_after[16];
struct md5_ctx ctx;
- hash_table <pointer_hash <tree_node> > ht;
+ hash_table<pointer_hash<tree_node> > ht (32);
- ht.create (32);
md5_init_ctx (&ctx);
- fold_checksum_tree (op0, &ctx, ht);
+ fold_checksum_tree (op0, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before);
ht.empty ();
#endif
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
- fold_checksum_tree (op0, &ctx, ht);
+ fold_checksum_tree (op0, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after);
- ht.dispose ();
if (memcmp (checksum_before, checksum_after, 16))
fold_check_failed (op0, tem);
checksum_after_op0[16],
checksum_after_op1[16];
struct md5_ctx ctx;
- hash_table <pointer_hash <tree_node> > ht;
+ hash_table<pointer_hash<tree_node> > ht (32);
- ht.create (32);
md5_init_ctx (&ctx);
- fold_checksum_tree (op0, &ctx, ht);
+ fold_checksum_tree (op0, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before_op0);
ht.empty ();
md5_init_ctx (&ctx);
- fold_checksum_tree (op1, &ctx, ht);
+ fold_checksum_tree (op1, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before_op1);
ht.empty ();
#endif
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
- fold_checksum_tree (op0, &ctx, ht);
+ fold_checksum_tree (op0, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after_op0);
ht.empty ();
fold_check_failed (op0, tem);
md5_init_ctx (&ctx);
- fold_checksum_tree (op1, &ctx, ht);
+ fold_checksum_tree (op1, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after_op1);
- ht.dispose ();
if (memcmp (checksum_before_op1, checksum_after_op1, 16))
fold_check_failed (op1, tem);
checksum_after_op1[16],
checksum_after_op2[16];
struct md5_ctx ctx;
- hash_table <pointer_hash <tree_node> > ht;
+ hash_table<pointer_hash<tree_node> > ht (32);
- ht.create (32);
md5_init_ctx (&ctx);
- fold_checksum_tree (op0, &ctx, ht);
+ fold_checksum_tree (op0, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before_op0);
ht.empty ();
md5_init_ctx (&ctx);
- fold_checksum_tree (op1, &ctx, ht);
+ fold_checksum_tree (op1, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before_op1);
ht.empty ();
md5_init_ctx (&ctx);
- fold_checksum_tree (op2, &ctx, ht);
+ fold_checksum_tree (op2, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before_op2);
ht.empty ();
#endif
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
- fold_checksum_tree (op0, &ctx, ht);
+ fold_checksum_tree (op0, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after_op0);
ht.empty ();
fold_check_failed (op0, tem);
md5_init_ctx (&ctx);
- fold_checksum_tree (op1, &ctx, ht);
+ fold_checksum_tree (op1, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after_op1);
ht.empty ();
fold_check_failed (op1, tem);
md5_init_ctx (&ctx);
- fold_checksum_tree (op2, &ctx, ht);
+ fold_checksum_tree (op2, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after_op2);
- ht.dispose ();
if (memcmp (checksum_before_op2, checksum_after_op2, 16))
fold_check_failed (op2, tem);
checksum_after_fn[16],
checksum_after_arglist[16];
struct md5_ctx ctx;
- hash_table <pointer_hash <tree_node> > ht;
+ hash_table<pointer_hash<tree_node> > ht (32);
int i;
- ht.create (32);
md5_init_ctx (&ctx);
- fold_checksum_tree (fn, &ctx, ht);
+ fold_checksum_tree (fn, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before_fn);
ht.empty ();
md5_init_ctx (&ctx);
for (i = 0; i < nargs; i++)
- fold_checksum_tree (argarray[i], &ctx, ht);
+ fold_checksum_tree (argarray[i], &ctx, &ht);
md5_finish_ctx (&ctx, checksum_before_arglist);
ht.empty ();
#endif
#ifdef ENABLE_FOLD_CHECKING
md5_init_ctx (&ctx);
- fold_checksum_tree (fn, &ctx, ht);
+ fold_checksum_tree (fn, &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after_fn);
ht.empty ();
md5_init_ctx (&ctx);
for (i = 0; i < nargs; i++)
- fold_checksum_tree (argarray[i], &ctx, ht);
+ fold_checksum_tree (argarray[i], &ctx, &ht);
md5_finish_ctx (&ctx, checksum_after_arglist);
- ht.dispose ();
if (memcmp (checksum_before_arglist, checksum_after_arglist, 16))
fold_check_failed (NULL_TREE, tem);
}
/* Hashtable for the load/store memory refs. */
-static hash_table <pre_ldst_expr_hasher> pre_ldst_table;
+static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
/* Bitmap containing one bit for each register in the program.
Used when performing GCSE to track which registers have been set since
NULL, /*have_reg_qty=*/false);
e.pattern = x;
- slot = pre_ldst_table.find_slot_with_hash (&e, hash, INSERT);
+ slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
if (*slot)
return *slot;
static void
free_ld_motion_mems (void)
{
- if (pre_ldst_table.is_created ())
- pre_ldst_table.dispose ();
+ delete pre_ldst_table;
+ pre_ldst_table = NULL;
while (pre_ldst_mems)
{
{
struct ls_expr e;
ls_expr **slot;
- if (!pre_ldst_table.is_created ())
+ if (!pre_ldst_table)
return NULL;
e.pattern = x;
- slot = pre_ldst_table.find_slot (&e, NO_INSERT);
+ slot = pre_ldst_table->find_slot (&e, NO_INSERT);
if (!slot || (*slot)->invalid)
return NULL;
return *slot;
rtx insn;
pre_ldst_mems = NULL;
- pre_ldst_table.create (13);
+ pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
FOR_EACH_BB_FN (bb, cfun)
{
else
{
*last = ptr->next;
- pre_ldst_table.remove_elt_with_hash (ptr, ptr->hash_index);
+ pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
free_ldst_entry (ptr);
ptr = * last;
}
return p1->obj == p2;
}
-static hash_table <saving_hasher> saving_htab;
+static hash_table<saving_hasher> *saving_htab;
/* Register an object in the hash table. */
return 0;
slot = (struct ptr_data **)
- saving_htab.find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
+ saving_htab->find_slot_with_hash (obj, POINTER_HASH (obj), INSERT);
if (*slot != NULL)
{
gcc_assert ((*slot)->note_ptr_fn == note_ptr_fn
return;
data = (struct ptr_data *)
- saving_htab.find_with_hash (obj, POINTER_HASH (obj));
+ saving_htab->find_with_hash (obj, POINTER_HASH (obj));
gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
data->reorder_fn = reorder_fn;
return;
result = (struct ptr_data *)
- saving_htab.find_with_hash (*ptr, POINTER_HASH (*ptr));
+ saving_htab->find_with_hash (*ptr, POINTER_HASH (*ptr));
gcc_assert (result);
*ptr = result->new_addr;
}
else
{
new_ptr = (struct ptr_data *)
- saving_htab.find_with_hash (ptr, POINTER_HASH (ptr));
+ saving_htab->find_with_hash (ptr, POINTER_HASH (ptr));
if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
!= 1)
fatal_error ("can%'t write PCH file: %m");
gt_pch_save_stringpool ();
timevar_push (TV_PCH_PTR_REALLOC);
- saving_htab.create (50000);
+ saving_htab = new hash_table<saving_hasher> (50000);
for (rt = gt_ggc_rtab; *rt; rt++)
for (rti = *rt; rti->base != NULL; rti++)
state.f = f;
state.d = init_ggc_pch ();
state.count = 0;
- saving_htab.traverse <traversal_state *, ggc_call_count> (&state);
+ saving_htab->traverse <traversal_state *, ggc_call_count> (&state);
mmi.size = ggc_pch_total_size (state.d);
state.ptrs = XNEWVEC (struct ptr_data *, state.count);
state.ptrs_i = 0;
- saving_htab.traverse <traversal_state *, ggc_call_alloc> (&state);
+ saving_htab->traverse <traversal_state *, ggc_call_alloc> (&state);
timevar_pop (TV_PCH_PTR_REALLOC);
timevar_push (TV_PCH_PTR_SORT);
XDELETE (state.ptrs);
XDELETE (this_object);
- saving_htab.dispose ();
+ delete saving_htab;
+ saving_htab = NULL;
}
/* Read the state of the compiler back in from F. */
}
/* Hashtable used for statistics. */
-static hash_table <loc_desc_hasher> loc_hash;
+static hash_table<loc_desc_hasher> *loc_hash;
struct ptr_hash_entry
{
}
/* Hashtable converting address of allocated field to loc descriptor. */
-static hash_table <ptr_hash_hasher> ptr_hash;
+static hash_table<ptr_hash_hasher> *ptr_hash;
/* Return descriptor for given call site, create new one if needed. */
static struct loc_descriptor *
loc.file = name;
loc.line = line;
loc.function = function;
- if (!loc_hash.is_created ())
- loc_hash.create (10);
+ if (!loc_hash)
+ loc_hash = new hash_table<loc_desc_hasher> (10);
- slot = loc_hash.find_slot (&loc, INSERT);
+ slot = loc_hash->find_slot (&loc, INSERT);
if (*slot)
return *slot;
*slot = XCNEW (struct loc_descriptor);
p->ptr = ptr;
p->loc = loc;
p->size = allocated + overhead;
- if (!ptr_hash.is_created ())
- ptr_hash.create (10);
- slot = ptr_hash.find_slot_with_hash (ptr, htab_hash_pointer (ptr), INSERT);
+ if (!ptr_hash)
+ ptr_hash = new hash_table<ptr_hash_hasher> (10);
+ slot = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), INSERT);
gcc_assert (!*slot);
*slot = p;
if (!ggc_marked_p (p->ptr))
{
p->loc->collected += p->size;
- ptr_hash.clear_slot (slot);
+ ptr_hash->clear_slot (slot);
free (p);
}
return 1;
void
ggc_prune_overhead_list (void)
{
- ptr_hash.traverse <void *, ggc_prune_ptr> (NULL);
+ ptr_hash->traverse <void *, ggc_prune_ptr> (NULL);
}
/* Notice that the pointer has been freed. */
void
ggc_free_overhead (void *ptr)
{
- ptr_hash_entry **slot;
- slot = ptr_hash.find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT);
+ ptr_hash_entry **slot
+ = ptr_hash->find_slot_with_hash (ptr, htab_hash_pointer (ptr), NO_INSERT);
struct ptr_hash_entry *p;
/* The pointer might be not found if a PCH read happened between allocation
and ggc_free () call. FIXME: account memory properly in the presence of
return;
p = (struct ptr_hash_entry *) *slot;
p->loc->freed += p->size;
- ptr_hash.clear_slot (slot);
+ ptr_hash->clear_slot (slot);
free (p);
}
ggc_collect ();
loc_array = XCNEWVEC (struct loc_descriptor *,
- loc_hash.elements_with_deleted ());
+ loc_hash->elements_with_deleted ());
fprintf (stderr, "-------------------------------------------------------\n");
fprintf (stderr, "\n%-48s %10s %10s %10s %10s %10s\n",
"source location", "Garbage", "Freed", "Leak", "Overhead", "Times");
fprintf (stderr, "-------------------------------------------------------\n");
- loc_hash.traverse <int *, ggc_add_statistics> (&nentries);
+ loc_hash->traverse <int *, ggc_add_statistics> (&nentries);
qsort (loc_array, nentries, sizeof (*loc_array),
final ? final_cmp_statistic : cmp_statistic);
for (i = 0; i < nentries; i++)
}
/* Hash table embodying a mapping from base exprs to chains of candidates. */
-static hash_table <cand_chain_hasher> base_cand_map;
+static hash_table<cand_chain_hasher> *base_cand_map;
\f
/* Pointer map used by tree_to_aff_combination_expand. */
static struct pointer_map_t *name_expansions;
int max_iters = PARAM_VALUE (PARAM_MAX_SLSR_CANDIDATE_SCAN);
mapping_key.base_expr = base_expr;
- chain = base_cand_map.find (&mapping_key);
+ chain = base_cand_map->find (&mapping_key);
for (; chain && iters < max_iters; chain = chain->next, ++iters)
{
node->base_expr = base;
node->cand = c;
node->next = NULL;
- slot = base_cand_map.find_slot (node, INSERT);
+ slot = base_cand_map->find_slot (node, INSERT);
if (*slot)
{
dump_cand_chains (void)
{
fprintf (dump_file, "\nStrength reduction candidate chains:\n\n");
- base_cand_map.traverse_noresize <void *, ssa_base_cand_dump_callback> (NULL);
+ base_cand_map->traverse_noresize <void *, ssa_base_cand_dump_callback>
+ (NULL);
fputs ("\n", dump_file);
}
gcc_obstack_init (&chain_obstack);
/* Allocate the mapping from base expressions to candidate chains. */
- base_cand_map.create (500);
+ base_cand_map = new hash_table<cand_chain_hasher> (500);
/* Allocate the mapping from bases to alternative bases. */
alt_base_map = pointer_map_create ();
analyze_candidates_and_replace ();
loop_optimizer_finalize ();
- base_cand_map.dispose ();
+ delete base_cand_map;
+ base_cand_map = NULL;
obstack_free (&chain_obstack, NULL);
pointer_map_destroy (stmt_cand_map);
cand_vec.release ();
vec<tree> case_labels;
/* The formal temporary table. Should this be persistent? */
- hash_table <gimplify_hasher> temp_htab;
+ hash_table<gimplify_hasher> *temp_htab;
int conditions;
bool save_stack;
else
record_vars (c->temps);
- if (c->temp_htab.is_created ())
- c->temp_htab.dispose ();
+ delete c->temp_htab;
+ c->temp_htab = NULL;
ctx_free (c);
}
elt_t **slot;
elt.val = val;
- if (!gimplify_ctxp->temp_htab.is_created ())
- gimplify_ctxp->temp_htab.create (1000);
- slot = gimplify_ctxp->temp_htab.find_slot (&elt, INSERT);
+ if (!gimplify_ctxp->temp_htab)
+ gimplify_ctxp->temp_htab = new hash_table<gimplify_hasher> (1000);
+ slot = gimplify_ctxp->temp_htab->find_slot (&elt, INSERT);
if (*slot == NULL)
{
elt_p = XNEW (elt_t);
free (c);
}
-typedef hash_table <clast_index_hasher> clast_index_htab_type;
+typedef hash_table<clast_index_hasher> clast_index_htab_type;
/* Returns a pointer to a new element of type clast_name_index_p built
from NAME, INDEX, LEVEL, BOUND_ONE, and BOUND_TWO. */
vector of parameters. */
static inline int
-clast_name_to_level (clast_name_p name, clast_index_htab_type index_table)
+clast_name_to_level (clast_name_p name, clast_index_htab_type *index_table)
{
struct clast_name_index tmp;
clast_name_index **slot;
tmp.name = ((const struct clast_name *) name)->name;
tmp.free_name = NULL;
- slot = index_table.find_slot (&tmp, NO_INSERT);
+ slot = index_table->find_slot (&tmp, NO_INSERT);
if (slot && *slot)
return ((struct clast_name_index *) *slot)->level;
SCATTERING_DIMENSIONS vector. */
static inline int
-clast_name_to_index (struct clast_name *name, clast_index_htab_type index_table)
+clast_name_to_index (struct clast_name *name, clast_index_htab_type *index_table)
{
struct clast_name_index tmp;
clast_name_index **slot;
tmp.name = ((const struct clast_name *) name)->name;
tmp.free_name = NULL;
- slot = index_table.find_slot (&tmp, NO_INSERT);
+ slot = index_table->find_slot (&tmp, NO_INSERT);
if (slot && *slot)
return (*slot)->index;
found in the INDEX_TABLE, false otherwise. */
static inline bool
-clast_name_to_lb_ub (struct clast_name *name, clast_index_htab_type index_table,
- mpz_t bound_one, mpz_t bound_two)
+clast_name_to_lb_ub (struct clast_name *name,
+ clast_index_htab_type *index_table, mpz_t bound_one,
+ mpz_t bound_two)
{
struct clast_name_index tmp;
clast_name_index **slot;
tmp.name = name->name;
tmp.free_name = NULL;
- slot = index_table.find_slot (&tmp, NO_INSERT);
+ slot = index_table->find_slot (&tmp, NO_INSERT);
if (slot && *slot)
{
/* Records in INDEX_TABLE the INDEX and LEVEL for NAME. */
static inline void
-save_clast_name_index (clast_index_htab_type index_table, const char *name,
+save_clast_name_index (clast_index_htab_type *index_table, const char *name,
int index, int level, mpz_t bound_one, mpz_t bound_two)
{
struct clast_name_index tmp;
tmp.name = name;
tmp.free_name = NULL;
- slot = index_table.find_slot (&tmp, INSERT);
+ slot = index_table->find_slot (&tmp, INSERT);
if (slot)
{
typedef struct ivs_params {
vec<tree> params, *newivs;
- clast_index_htab_type newivs_index, params_index;
+ clast_index_htab_type *newivs_index, *params_index;
sese region;
} *ivs_params_p;
{
int index;
- if (ip->params.exists () && ip->params_index.is_created ())
+ if (ip->params.exists () && ip->params_index)
{
index = clast_name_to_index (name, ip->params_index);
return ip->params[index];
}
- gcc_assert (ip->newivs && ip->newivs_index.is_created ());
+ gcc_assert (ip->newivs && ip->newivs_index);
index = clast_name_to_index (name, ip->newivs_index);
gcc_assert (index >= 0);
{
bool found = false;
- if (ip->params.exists () && ip->params_index.is_created ())
+ if (ip->params.exists () && ip->params_index)
found = clast_name_to_lb_ub (name, ip->params_index, bound_one, bound_two);
if (!found)
{
- gcc_assert (ip->newivs && ip->newivs_index.is_created ());
+ gcc_assert (ip->newivs && ip->newivs_index);
found = clast_name_to_lb_ub (name, ip->newivs_index, bound_one,
bound_two);
gcc_assert (found);
static void
mark_bb_with_pbb (poly_bb_p pbb, basic_block bb,
- bb_pbb_htab_type bb_pbb_mapping)
+ bb_pbb_htab_type *bb_pbb_mapping)
{
bb_pbb_def tmp;
bb_pbb_def **x;
tmp.bb = bb;
- x = bb_pbb_mapping.find_slot (&tmp, INSERT);
+ x = bb_pbb_mapping->find_slot (&tmp, INSERT);
if (x && !*x)
*x = new_bb_pbb_def (bb, pbb);
/* Find BB's related poly_bb_p in hash table BB_PBB_MAPPING. */
poly_bb_p
-find_pbb_via_hash (bb_pbb_htab_type bb_pbb_mapping, basic_block bb)
+find_pbb_via_hash (bb_pbb_htab_type *bb_pbb_mapping, basic_block bb)
{
bb_pbb_def tmp;
bb_pbb_def **slot;
tmp.bb = bb;
- slot = bb_pbb_mapping.find_slot (&tmp, NO_INSERT);
+ slot = bb_pbb_mapping->find_slot (&tmp, NO_INSERT);
if (slot && *slot)
return ((bb_pbb_def *) *slot)->pbb;
related poly_bb_p. */
scop_p
-get_loop_body_pbbs (loop_p loop, bb_pbb_htab_type bb_pbb_mapping,
+get_loop_body_pbbs (loop_p loop, bb_pbb_htab_type *bb_pbb_mapping,
vec<poly_bb_p> *pbbs)
{
unsigned i;
static edge
translate_clast_user (struct clast_user_stmt *stmt, edge next_e,
- bb_pbb_htab_type bb_pbb_mapping, ivs_params_p ip)
+ bb_pbb_htab_type *bb_pbb_mapping, ivs_params_p ip)
{
int i, nb_loops;
basic_block new_bb;
}
static edge
-translate_clast (loop_p, struct clast_stmt *, edge, bb_pbb_htab_type,
+translate_clast (loop_p, struct clast_stmt *, edge, bb_pbb_htab_type *,
int, ivs_params_p);
/* Create the loop for a clast for statement.
static edge
translate_clast_for_loop (loop_p context_loop, struct clast_for *stmt,
- edge next_e, bb_pbb_htab_type bb_pbb_mapping,
+ edge next_e, bb_pbb_htab_type *bb_pbb_mapping,
int level, tree type, tree lb, tree ub,
ivs_params_p ip)
{
static edge
translate_clast_for (loop_p context_loop, struct clast_for *stmt, edge next_e,
- bb_pbb_htab_type bb_pbb_mapping, int level,
+ bb_pbb_htab_type *bb_pbb_mapping, int level,
ivs_params_p ip)
{
tree type, lb, ub;
static edge
translate_clast_guard (loop_p context_loop, struct clast_guard *stmt,
- edge next_e, bb_pbb_htab_type bb_pbb_mapping, int level,
+ edge next_e, bb_pbb_htab_type *bb_pbb_mapping, int level,
ivs_params_p ip)
{
edge last_e = graphite_create_new_guard (next_e, stmt, ip);
static edge
translate_clast (loop_p context_loop, struct clast_stmt *stmt, edge next_e,
- bb_pbb_htab_type bb_pbb_mapping, int level, ivs_params_p ip)
+ bb_pbb_htab_type *bb_pbb_mapping, int level, ivs_params_p ip)
{
if (!stmt)
return next_e;
static CloogUnionDomain *
add_names_to_union_domain (scop_p scop, CloogUnionDomain *union_domain,
int nb_scattering_dims,
- clast_index_htab_type params_index)
+ clast_index_htab_type *params_index)
{
sese region = SCOP_REGION (scop);
int i;
}
static CloogInput *
-generate_cloog_input (scop_p scop, clast_index_htab_type params_index)
+generate_cloog_input (scop_p scop, clast_index_htab_type *params_index)
{
CloogUnionDomain *union_domain;
CloogInput *cloog_input;
without a program. */
static struct clast_stmt *
-scop_to_clast (scop_p scop, clast_index_htab_type params_index)
+scop_to_clast (scop_p scop, clast_index_htab_type *params_index)
{
CloogInput *cloog_input;
struct clast_stmt *clast;
print_generated_program (FILE *file, scop_p scop)
{
CloogOptions *options = set_cloog_options ();
- clast_index_htab_type params_index;
+ clast_index_htab_type *params_index = new clast_index_htab_type (10);
struct clast_stmt *clast;
- params_index.create (10);
-
clast = scop_to_clast (scop, params_index);
fprintf (file, " (clast: \n");
*/
bool
-gloog (scop_p scop, bb_pbb_htab_type bb_pbb_mapping)
+gloog (scop_p scop, bb_pbb_htab_type *bb_pbb_mapping)
{
auto_vec<tree, 10> newivs;
loop_p context_loop;
sese region = SCOP_REGION (scop);
ifsese if_region = NULL;
- clast_index_htab_type newivs_index, params_index;
+ clast_index_htab_type *newivs_index, *params_index;
struct clast_stmt *clast;
struct ivs_params ip;
timevar_push (TV_GRAPHITE_CODE_GEN);
gloog_error = false;
- params_index.create (10);
+ params_index = new clast_index_htab_type (10);
clast = scop_to_clast (scop, params_index);
graphite_verify ();
context_loop = SESE_ENTRY (region)->src->loop_father;
- newivs_index.create (10);
+ newivs_index= new clast_index_htab_type (10);
ip.newivs = &newivs;
ip.newivs_index = newivs_index;
free (if_region->region);
free (if_region);
- newivs_index.dispose ();
- params_index.dispose ();
+ delete newivs_index;
+ newivs_index = NULL;
+ delete params_index;
+ params_index = NULL;
cloog_clast_free (clast);
timevar_pop (TV_GRAPHITE_CODE_GEN);
poly_bb_p. */
bool
-loop_is_parallel_p (loop_p loop, bb_pbb_htab_type bb_pbb_mapping, int depth)
+loop_is_parallel_p (loop_p loop, bb_pbb_htab_type *bb_pbb_mapping, int depth)
{
bool dependences;
scop_p scop;
return (bp1->bb->index == bp2->bb->index);
}
-typedef hash_table <bb_pbb_hasher> bb_pbb_htab_type;
+typedef hash_table<bb_pbb_hasher> bb_pbb_htab_type;
-extern bool gloog (scop_p, bb_pbb_htab_type);
-poly_bb_p find_pbb_via_hash (bb_pbb_htab_type, basic_block);
-bool loop_is_parallel_p (loop_p, bb_pbb_htab_type, int);
-scop_p get_loop_body_pbbs (loop_p, bb_pbb_htab_type, vec<poly_bb_p> *);
+extern bool gloog (scop_p, bb_pbb_htab_type *);
+poly_bb_p find_pbb_via_hash (bb_pbb_htab_type *, basic_block);
+bool loop_is_parallel_p (loop_p, bb_pbb_htab_type *, int);
+scop_p get_loop_body_pbbs (loop_p, bb_pbb_htab_type *, vec<poly_bb_p> *);
#endif
scop_p scop;
bool need_cfg_cleanup_p = false;
vec<scop_p> scops = vNULL;
- bb_pbb_htab_type bb_pbb_mapping;
isl_ctx *ctx;
/* If a function is parallel it was most probably already run through graphite
print_global_statistics (dump_file);
}
- bb_pbb_mapping.create (10);
-
+ bb_pbb_htab_type bb_pbb_mapping (10);
FOR_EACH_VEC_ELT (scops, i, scop)
if (dbg_cnt (graphite_scop))
{
if (POLY_SCOP_P (scop)
&& apply_poly_transforms (scop)
- && gloog (scop, bb_pbb_mapping))
+ && gloog (scop, &bb_pbb_mapping))
need_cfg_cleanup_p = true;
}
- bb_pbb_mapping.dispose ();
free_scops (scops);
graphite_finalize (need_cfg_cleanup_p);
the_isl_ctx = NULL;
/* Two hash tables to record delay_pairs, one indexed by I1 and the other
indexed by I2. */
-static hash_table <delay_i1_hasher> delay_htab;
-static hash_table <delay_i2_hasher> delay_htab_i2;
+static hash_table<delay_i1_hasher> *delay_htab;
+static hash_table<delay_i2_hasher> *delay_htab_i2;
/* Called through htab_traverse. Walk the hashtable using I2 as
index, and delete all elements involving an UID higher than
struct delay_pair *p = *slot;
if (INSN_UID (p->i2) >= maxuid || INSN_UID (p->i1) >= maxuid)
{
- delay_htab_i2.clear_slot (slot);
+ delay_htab_i2->clear_slot (slot);
}
return 1;
}
if (INSN_UID ((*pslot)->i1) >= maxuid)
{
- delay_htab.clear_slot (pslot);
+ delay_htab->clear_slot (pslot);
return 1;
}
pprev = &first;
}
*pprev = NULL;
if (first == NULL)
- delay_htab.clear_slot (pslot);
+ delay_htab->clear_slot (pslot);
else
*pslot = first;
return 1;
void
discard_delay_pairs_above (int max_uid)
{
- delay_htab.traverse <int *, haifa_htab_i1_traverse> (&max_uid);
- delay_htab_i2.traverse <int *, haifa_htab_i2_traverse> (&max_uid);
+ delay_htab->traverse <int *, haifa_htab_i1_traverse> (&max_uid);
+ delay_htab_i2->traverse <int *, haifa_htab_i2_traverse> (&max_uid);
}
/* This function can be called by a port just before it starts the final
p->cycles = cycles;
p->stages = stages;
- if (!delay_htab.is_created ())
+ if (!delay_htab)
{
- delay_htab.create (10);
- delay_htab_i2.create (10);
+ delay_htab = new hash_table<delay_i1_hasher> (10);
+ delay_htab_i2 = new hash_table<delay_i2_hasher> (10);
}
- slot = delay_htab.find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
+ slot = delay_htab->find_slot_with_hash (i1, htab_hash_pointer (i1), INSERT);
p->next_same_i1 = *slot;
*slot = p;
- slot = delay_htab_i2.find_slot_with_hash (i2, htab_hash_pointer (i2), INSERT);
+ slot = delay_htab_i2->find_slot (p, INSERT);
*slot = p;
}
{
struct delay_pair *pair;
- if (!delay_htab.is_created ())
+ if (!delay_htab)
return NULL_RTX;
- pair = delay_htab_i2.find_with_hash (insn, htab_hash_pointer (insn));
+ pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
if (!pair || pair->stages > 0)
return NULL_RTX;
return pair->i1;
sd_iterator_def sd_it;
dep_t dep;
- if (!delay_htab.is_created ())
+ if (!delay_htab)
return;
- pair = delay_htab_i2.find_with_hash (insn, htab_hash_pointer (insn));
+ pair = delay_htab_i2->find_with_hash (insn, htab_hash_pointer (insn));
if (!pair)
return;
add_dependence (insn, pair->i1, REG_DEP_ANTI);
{
rtx pro = DEP_PRO (dep);
struct delay_pair *other_pair
- = delay_htab_i2.find_with_hash (pro, htab_hash_pointer (pro));
+ = delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
if (!other_pair || other_pair->stages)
continue;
if (pair_delay (other_pair) >= pair_delay (pair))
if (DEP_COST (link) != UNKNOWN_DEP_COST)
return DEP_COST (link);
- if (delay_htab.is_created ())
+ if (delay_htab)
{
struct delay_pair *delay_entry;
delay_entry
- = delay_htab_i2.find_with_hash (used, htab_hash_pointer (used));
+ = delay_htab_i2->find_with_hash (used, htab_hash_pointer (used));
if (delay_entry)
{
if (delay_entry->i1 == insn)
{
int delay_cost = 0;
- if (delay_htab.is_created ())
+ if (delay_htab)
{
struct delay_pair *delay_entry;
delay_entry
- = delay_htab.find_with_hash (insn,
- htab_hash_pointer (insn));
+ = delay_htab->find_with_hash (insn,
+ htab_hash_pointer (insn));
while (delay_entry && delay_cost == 0)
{
delay_cost = estimate_shadow_tick (delay_entry);
goto restart_choose_ready;
}
- if (delay_htab.is_created ())
+ if (delay_htab)
{
/* If this insn is the first part of a delay-slot pair, record a
backtrack point. */
struct delay_pair *delay_entry;
delay_entry
- = delay_htab.find_with_hash (insn, htab_hash_pointer (insn));
+ = delay_htab->find_with_hash (insn, htab_hash_pointer (insn));
if (delay_entry)
{
save_backtrack_point (delay_entry, ls);
void
free_delay_pairs (void)
{
- if (delay_htab.is_created ())
+ if (delay_htab)
{
- delay_htab.empty ();
- delay_htab_i2.empty ();
+ delay_htab->empty ();
+ delay_htab_i2->empty ();
}
}
is allocated. This type is called the allocator type. It is
parameterized on the value type. It provides four functions.
- - A static member function named 'control_alloc'. This function
- allocates the control data blocks for the table.
-
- - A static member function named 'control_free'. This function
- frees the control data blocks for the table.
-
- A static member function named 'data_alloc'. This function
allocates the data elements in the table.
template <typename Type>
struct xcallocator
{
- static Type *control_alloc (size_t count);
static Type *data_alloc (size_t count);
- static void control_free (Type *memory);
static void data_free (Type *memory);
};
-/* Allocate memory for COUNT control blocks. */
-
-template <typename Type>
-inline Type *
-xcallocator <Type>::control_alloc (size_t count)
-{
- return static_cast <Type *> (xcalloc (count, sizeof (Type)));
-}
-
-
/* Allocate memory for COUNT data blocks. */
template <typename Type>
}
-/* Free memory for control blocks. */
-
-template <typename Type>
-inline void
-xcallocator <Type>::control_free (Type *memory)
-{
- return ::free (memory);
-}
-
-
/* Free memory for data blocks. */
template <typename Type>
extern hashval_t hash_table_mod2 (hashval_t hash, unsigned int index);
-/* Internal implementation type. */
-
-template <typename T>
-struct hash_table_control
-{
- /* Table itself. */
- T **entries;
-
- /* Current size (in entries) of the hash table. */
- size_t size;
-
- /* Current number of elements including also deleted elements. */
- size_t n_elements;
-
- /* Current number of deleted elements in the table. */
- size_t n_deleted;
-
- /* The following member is used for debugging. Its value is number
- of all calls of `htab_find_slot' for the hash table. */
- unsigned int searches;
-
- /* The following member is used for debugging. Its value is number
- of collisions fixed for time of work with the hash table. */
- unsigned int collisions;
-
- /* Current size (in entries) of the hash table, as an index into the
- table of primes. */
- unsigned int size_prime_index;
-};
-
-
/* User-facing hash table type.
The table stores elements of type Descriptor::value_type.
The default is xcallocator.
*/
-
template <typename Descriptor,
- template <typename Type> class Allocator = xcallocator>
+ template<typename Type> class Allocator= xcallocator>
class hash_table
{
-public:
typedef typename Descriptor::value_type value_type;
typedef typename Descriptor::compare_type compare_type;
- class iterator
- {
- public:
- inline iterator ();
- inline iterator (value_type **, value_type **);
- inline value_type &operator * ();
- void slide ();
- inline iterator &operator ++ ();
- inline bool operator != (const iterator &) const;
- private:
- value_type **m_slot;
- value_type **m_limit;
- };
-
-private:
- hash_table_control <value_type> *htab;
-
- value_type **find_empty_slot_for_expand (hashval_t hash);
- void expand ();
-
public:
- hash_table ();
- void create (size_t initial_slots);
- bool is_created ();
- void dispose ();
- value_type *find (const value_type *value);
- value_type *find_with_hash (const compare_type *comparable, hashval_t hash);
- value_type **find_slot (const value_type *value, enum insert_option insert);
- value_type **find_slot_with_hash (const compare_type *comparable,
- hashval_t hash, enum insert_option insert);
- void empty ();
- void clear_slot (value_type **slot);
- void remove_elt (const value_type *value);
- void remove_elt_with_hash (const compare_type *comparable, hashval_t hash);
- size_t size ();
- size_t elements ();
- size_t elements_with_deleted ();
- double collisions ();
-
- template <typename Argument,
- int (*Callback) (value_type **slot, Argument argument)>
- void traverse_noresize (Argument argument);
-
- template <typename Argument,
- int (*Callback) (value_type **slot, Argument argument)>
- void traverse (Argument argument);
+ hash_table (size_t);
+ ~hash_table ();
- iterator begin ();
- iterator end ();
-};
+ /* Current size (in entries) of the hash table. */
+ size_t size () const { return m_size; }
+ /* Return the current number of elements in this hash table. */
+ size_t elements () const { return m_n_elements - m_n_deleted; }
-/* Construct the hash table. The only useful operation next is create. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline
-hash_table <Descriptor, Allocator>::hash_table ()
-: htab (NULL)
-{
-}
+ /* Return the current number of elements in this hash table. */
+ size_t elements_with_deleted () const { return m_n_elements; }
+ /* This function clears all entries in the given hash table. */
+ void empty ();
-/* See if the table has been created, as opposed to constructed. */
+ /* This function clears a specified SLOT in a hash table. It is
+ useful when you've already done the lookup and don't want to do it
+ again. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline bool
-hash_table <Descriptor, Allocator>::is_created ()
-{
- return htab != NULL;
-}
+ void clear_slot (value_type **);
+ /* This function searches for a hash table entry equal to the given
+ COMPARABLE element starting with the given HASH value. It cannot
+ be used to insert or delete an element. */
+ value_type *find_with_hash (const compare_type *, hashval_t);
-/* Like find_with_hash, but compute the hash value from the element. */
+/* Like find_slot_with_hash, but compute the hash value from the element. */
+ value_type *find (const value_type *value)
+ {
+ return find_with_hash (value, Descriptor::hash (value));
+ }
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline typename Descriptor::value_type *
-hash_table <Descriptor, Allocator>::find (const value_type *value)
-{
- return find_with_hash (value, Descriptor::hash (value));
-}
+ value_type **find_slot (const value_type *value, insert_option insert)
+ {
+ return find_slot_with_hash (value, Descriptor::hash (value), insert);
+ }
+ /* This function searches for a hash table slot containing an entry
+ equal to the given COMPARABLE element and starting with the given
+ HASH. To delete an entry, call this with insert=NO_INSERT, then
+ call clear_slot on the slot returned (possibly after doing some
+ checks). To insert an entry, call this with insert=INSERT, then
+ write the value you want into the returned slot. When inserting an
+ entry, NULL may be returned if memory allocation fails. */
+ value_type **find_slot_with_hash (const compare_type *comparable,
+ hashval_t hash, enum insert_option insert);
-/* Like find_slot_with_hash, but compute the hash value from the element. */
+ /* This function deletes an element with the given COMPARABLE value
+ from hash table starting with the given HASH. If there is no
+ matching element in the hash table, this function does nothing. */
+ void remove_elt_with_hash (const compare_type *, hashval_t);
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline typename Descriptor::value_type **
-hash_table <Descriptor, Allocator>
-::find_slot (const value_type *value, enum insert_option insert)
-{
- return find_slot_with_hash (value, Descriptor::hash (value), insert);
-}
+/* Like remove_elt_with_hash, but compute the hash value from the element. */
+ void remove_elt (const value_type *value)
+ {
+ remove_elt_with_hash (value, Descriptor::hash (value));
+ }
+ /* This function scans over the entire hash table calling CALLBACK for
+ each live entry. If CALLBACK returns false, the iteration stops.
+ ARGUMENT is passed as CALLBACK's second argument. */
+ template <typename Argument,
+ int (*Callback) (value_type **slot, Argument argument)>
+ void traverse_noresize (Argument argument);
-/* Like remove_elt_with_hash, but compute the hash value from the element. */
+ /* Like traverse_noresize, but does resize the table when it is too empty
+ to improve effectivity of subsequent calls. */
+ template <typename Argument,
+ int (*Callback) (value_type **slot, Argument argument)>
+ void traverse (Argument argument);
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline void
-hash_table <Descriptor, Allocator>::remove_elt (const value_type *value)
-{
- remove_elt_with_hash (value, Descriptor::hash (value));
-}
+ class iterator
+ {
+ public:
+ iterator () : m_slot (NULL), m_limit (NULL) {}
+ iterator (value_type **slot, value_type **limit) :
+ m_slot (slot), m_limit (limit) {}
-/* Return the current size of this hash table. */
+ inline value_type &operator * () { return **m_slot; }
+ void slide ();
+ inline iterator &operator ++ ();
+ bool operator != (const iterator &other) const
+ {
+ return m_slot != other.m_slot || m_limit != other.m_limit;
+ }
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline size_t
-hash_table <Descriptor, Allocator>::size ()
-{
- return htab->size;
-}
+ private:
+ value_type **m_slot;
+ value_type **m_limit;
+ };
+ iterator begin () const
+ {
+ iterator iter (m_entries, m_entries + m_size);
+ iter.slide ();
+ return iter;
+ }
-/* Return the current number of elements in this hash table. */
+ iterator end () const { return iterator (); }
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline size_t
-hash_table <Descriptor, Allocator>::elements ()
-{
- return htab->n_elements - htab->n_deleted;
-}
+ double collisions () const
+ {
+ return m_searches ? static_cast <double> (m_collisions) / m_searches : 0;
+ }
+private:
-/* Return the current number of elements in this hash table. */
+ value_type **find_empty_slot_for_expand (hashval_t);
+ void expand ();
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline size_t
-hash_table <Descriptor, Allocator>::elements_with_deleted ()
-{
- return htab->n_elements;
-}
+ /* Table itself. */
+ typename Descriptor::value_type **m_entries;
+ size_t m_size;
- /* Return the fraction of fixed collisions during all work with given
- hash table. */
+ /* Current number of elements including also deleted elements. */
+ size_t m_n_elements;
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline double
-hash_table <Descriptor, Allocator>::collisions ()
-{
- if (htab->searches == 0)
- return 0.0;
+ /* Current number of deleted elements in the table. */
+ size_t m_n_deleted;
- return static_cast <double> (htab->collisions) / htab->searches;
-}
+ /* The following member is used for debugging. Its value is number
+ of all calls of `htab_find_slot' for the hash table. */
+ unsigned int m_searches;
+ /* The following member is used for debugging. Its value is number
+ of collisions fixed for time of work with the hash table. */
+ unsigned int m_collisions;
-/* Create a hash table with at least the given number of INITIAL_SLOTS. */
+ /* Current size (in entries) of the hash table, as an index into the
+ table of primes. */
+ unsigned int m_size_prime_index;
+};
-template <typename Descriptor,
- template <typename Type> class Allocator>
-void
-hash_table <Descriptor, Allocator>::create (size_t size)
+template<typename Descriptor, template<typename Type> class Allocator>
+hash_table<Descriptor, Allocator>::hash_table (size_t size) :
+ m_n_elements (0), m_n_deleted (0), m_searches (0), m_collisions (0)
{
unsigned int size_prime_index;
size_prime_index = hash_table_higher_prime_index (size);
size = prime_tab[size_prime_index].prime;
- htab = Allocator <hash_table_control <value_type> > ::control_alloc (1);
- gcc_assert (htab != NULL);
- htab->entries = Allocator <value_type*> ::data_alloc (size);
- gcc_assert (htab->entries != NULL);
- htab->size = size;
- htab->size_prime_index = size_prime_index;
+ m_entries = Allocator <value_type*> ::data_alloc (size);
+ gcc_assert (m_entries != NULL);
+ m_size = size;
+ m_size_prime_index = size_prime_index;
}
-
-/* Dispose of a hash table. Free all memory and return this hash table to
- the non-created state. Naturally the hash table must already exist. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-void
-hash_table <Descriptor, Allocator>::dispose ()
+template<typename Descriptor, template<typename Type> class Allocator>
+hash_table<Descriptor, Allocator>::~hash_table ()
{
- size_t size = htab->size;
- value_type **entries = htab->entries;
+ for (size_t i = m_size - 1; i < m_size; i--)
+ if (m_entries[i] != HTAB_EMPTY_ENTRY && m_entries[i] != HTAB_DELETED_ENTRY)
+ Descriptor::remove (m_entries[i]);
- for (int i = size - 1; i >= 0; i--)
- if (entries[i] != HTAB_EMPTY_ENTRY && entries[i] != HTAB_DELETED_ENTRY)
- Descriptor::remove (entries[i]);
-
- Allocator <value_type *> ::data_free (entries);
- Allocator <hash_table_control <value_type> > ::control_free (htab);
- htab = NULL;
+ Allocator <value_type *> ::data_free (m_entries);
}
-
/* Similar to find_slot, but without several unwanted side effects:
- Does not call equal when it finds an existing entry.
- Does not change the count of elements/searches/collisions in the
This function also assumes there are no deleted entries in the table.
HASH is the hash value for the element to be inserted. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
+template<typename Descriptor, template<typename Type> class Allocator>
typename Descriptor::value_type **
-hash_table <Descriptor, Allocator>::find_empty_slot_for_expand (hashval_t hash)
+hash_table<Descriptor, Allocator>::find_empty_slot_for_expand (hashval_t hash)
{
- hashval_t index = hash_table_mod1 (hash, htab->size_prime_index);
- size_t size = htab->size;
- value_type **slot = htab->entries + index;
+ hashval_t index = hash_table_mod1 (hash, m_size_prime_index);
+ size_t size = m_size;
+ value_type **slot = m_entries + index;
hashval_t hash2;
if (*slot == HTAB_EMPTY_ENTRY)
else if (*slot == HTAB_DELETED_ENTRY)
abort ();
- hash2 = hash_table_mod2 (hash, htab->size_prime_index);
+ hash2 = hash_table_mod2 (hash, m_size_prime_index);
for (;;)
{
index += hash2;
if (index >= size)
index -= size;
- slot = htab->entries + index;
+ slot = m_entries + index;
if (*slot == HTAB_EMPTY_ENTRY)
return slot;
else if (*slot == HTAB_DELETED_ENTRY)
}
}
-
/* The following function changes size of memory allocated for the
entries and repeatedly inserts the table elements. The occupancy
of the table after the call will be about 50%. Naturally the hash
table entries is changed. If memory allocation fails, this function
will abort. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
+ template<typename Descriptor, template<typename Type> class Allocator>
void
-hash_table <Descriptor, Allocator>::expand ()
+hash_table<Descriptor, Allocator>::expand ()
{
- value_type **oentries;
- value_type **olimit;
- value_type **p;
- value_type **nentries;
- size_t nsize, osize, elts;
- unsigned int oindex, nindex;
-
- oentries = htab->entries;
- oindex = htab->size_prime_index;
- osize = htab->size;
- olimit = oentries + osize;
- elts = elements ();
+ value_type **oentries = m_entries;
+ unsigned int oindex = m_size_prime_index;
+ size_t osize = size ();
+ value_type **olimit = oentries + osize;
+ size_t elts = elements ();
/* Resize only when table after removal of unused elements is either
too full or too empty. */
+ unsigned int nindex;
+ size_t nsize;
if (elts * 2 > osize || (elts * 8 < osize && osize > 32))
{
nindex = hash_table_higher_prime_index (elts * 2);
nsize = osize;
}
- nentries = Allocator <value_type *> ::data_alloc (nsize);
+ value_type **nentries = Allocator <value_type *> ::data_alloc (nsize);
gcc_assert (nentries != NULL);
- htab->entries = nentries;
- htab->size = nsize;
- htab->size_prime_index = nindex;
- htab->n_elements -= htab->n_deleted;
- htab->n_deleted = 0;
+ m_entries = nentries;
+ m_size = nsize;
+ m_size_prime_index = nindex;
+ m_n_elements -= m_n_deleted;
+ m_n_deleted = 0;
- p = oentries;
+ value_type **p = oentries;
do
{
value_type *x = *p;
Allocator <value_type *> ::data_free (oentries);
}
+template<typename Descriptor, template<typename Type> class Allocator>
+void
+hash_table<Descriptor, Allocator>::empty ()
+{
+ size_t size = m_size;
+ value_type **entries = m_entries;
+ int i;
+
+ for (i = size - 1; i >= 0; i--)
+ if (entries[i] != HTAB_EMPTY_ENTRY && entries[i] != HTAB_DELETED_ENTRY)
+ Descriptor::remove (entries[i]);
+
+ /* Instead of clearing megabyte, downsize the table. */
+ if (size > 1024*1024 / sizeof (PTR))
+ {
+ int nindex = hash_table_higher_prime_index (1024 / sizeof (PTR));
+ int nsize = prime_tab[nindex].prime;
+
+ Allocator <value_type *> ::data_free (m_entries);
+ m_entries = Allocator <value_type *> ::data_alloc (nsize);
+ m_size = nsize;
+ m_size_prime_index = nindex;
+ }
+ else
+ memset (entries, 0, size * sizeof (value_type *));
+ m_n_deleted = 0;
+ m_n_elements = 0;
+}
+
+/* This function clears a specified SLOT in a hash table. It is
+ useful when you've already done the lookup and don't want to do it
+ again. */
+
+template<typename Descriptor, template<typename Type> class Allocator>
+void
+hash_table<Descriptor, Allocator>::clear_slot (value_type **slot)
+{
+ if (slot < m_entries || slot >= m_entries + size ()
+ || *slot == HTAB_EMPTY_ENTRY || *slot == HTAB_DELETED_ENTRY)
+ abort ();
+
+ Descriptor::remove (*slot);
+
+ *slot = static_cast <value_type *> (HTAB_DELETED_ENTRY);
+ m_n_deleted++;
+}
/* This function searches for a hash table entry equal to the given
COMPARABLE element starting with the given HASH value. It cannot
be used to insert or delete an element. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
+template<typename Descriptor, template<typename Type> class Allocator>
typename Descriptor::value_type *
-hash_table <Descriptor, Allocator>
+hash_table<Descriptor, Allocator>
::find_with_hash (const compare_type *comparable, hashval_t hash)
{
- hashval_t index, hash2;
- size_t size;
- value_type *entry;
+ m_searches++;
+ size_t size = m_size;
+ hashval_t index = hash_table_mod1 (hash, m_size_prime_index);
- htab->searches++;
- size = htab->size;
- index = hash_table_mod1 (hash, htab->size_prime_index);
-
- entry = htab->entries[index];
+ value_type *entry = m_entries[index];
if (entry == HTAB_EMPTY_ENTRY
|| (entry != HTAB_DELETED_ENTRY && Descriptor::equal (entry, comparable)))
return entry;
- hash2 = hash_table_mod2 (hash, htab->size_prime_index);
+ hashval_t hash2 = hash_table_mod2 (hash, m_size_prime_index);
for (;;)
{
- htab->collisions++;
+ m_collisions++;
index += hash2;
if (index >= size)
index -= size;
- entry = htab->entries[index];
+ entry = m_entries[index];
if (entry == HTAB_EMPTY_ENTRY
|| (entry != HTAB_DELETED_ENTRY
&& Descriptor::equal (entry, comparable)))
}
}
-
/* This function searches for a hash table slot containing an entry
equal to the given COMPARABLE element and starting with the given
HASH. To delete an entry, call this with insert=NO_INSERT, then
write the value you want into the returned slot. When inserting an
entry, NULL may be returned if memory allocation fails. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
+template<typename Descriptor, template<typename Type> class Allocator>
typename Descriptor::value_type **
-hash_table <Descriptor, Allocator>
+hash_table<Descriptor, Allocator>
::find_slot_with_hash (const compare_type *comparable, hashval_t hash,
enum insert_option insert)
{
- value_type **first_deleted_slot;
- hashval_t index, hash2;
- size_t size;
- value_type *entry;
-
- size = htab->size;
- if (insert == INSERT && size * 3 <= htab->n_elements * 4)
- {
- expand ();
- size = htab->size;
- }
-
- index = hash_table_mod1 (hash, htab->size_prime_index);
+ if (insert == INSERT && m_size * 3 <= m_n_elements * 4)
+ expand ();
- htab->searches++;
- first_deleted_slot = NULL;
+ m_searches++;
- entry = htab->entries[index];
+ value_type **first_deleted_slot = NULL;
+ hashval_t index = hash_table_mod1 (hash, m_size_prime_index);
+ hashval_t hash2 = hash_table_mod2 (hash, m_size_prime_index);
+ value_type *entry = m_entries[index];
+ size_t size = m_size;
if (entry == HTAB_EMPTY_ENTRY)
goto empty_entry;
else if (entry == HTAB_DELETED_ENTRY)
- first_deleted_slot = &htab->entries[index];
+ first_deleted_slot = &m_entries[index];
else if (Descriptor::equal (entry, comparable))
- return &htab->entries[index];
+ return &m_entries[index];
- hash2 = hash_table_mod2 (hash, htab->size_prime_index);
for (;;)
{
- htab->collisions++;
+ m_collisions++;
index += hash2;
if (index >= size)
index -= size;
- entry = htab->entries[index];
+ entry = m_entries[index];
if (entry == HTAB_EMPTY_ENTRY)
goto empty_entry;
else if (entry == HTAB_DELETED_ENTRY)
{
if (!first_deleted_slot)
- first_deleted_slot = &htab->entries[index];
+ first_deleted_slot = &m_entries[index];
}
else if (Descriptor::equal (entry, comparable))
- return &htab->entries[index];
+ return &m_entries[index];
}
empty_entry:
if (first_deleted_slot)
{
- htab->n_deleted--;
+ m_n_deleted--;
*first_deleted_slot = static_cast <value_type *> (HTAB_EMPTY_ENTRY);
return first_deleted_slot;
}
- htab->n_elements++;
- return &htab->entries[index];
-}
-
-
-/* This function clears all entries in the given hash table. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-void
-hash_table <Descriptor, Allocator>::empty ()
-{
- size_t size = htab->size;
- value_type **entries = htab->entries;
- int i;
-
- for (i = size - 1; i >= 0; i--)
- if (entries[i] != HTAB_EMPTY_ENTRY && entries[i] != HTAB_DELETED_ENTRY)
- Descriptor::remove (entries[i]);
-
- /* Instead of clearing megabyte, downsize the table. */
- if (size > 1024*1024 / sizeof (PTR))
- {
- int nindex = hash_table_higher_prime_index (1024 / sizeof (PTR));
- int nsize = prime_tab[nindex].prime;
-
- Allocator <value_type *> ::data_free (htab->entries);
- htab->entries = Allocator <value_type *> ::data_alloc (nsize);
- htab->size = nsize;
- htab->size_prime_index = nindex;
- }
- else
- memset (entries, 0, size * sizeof (value_type *));
- htab->n_deleted = 0;
- htab->n_elements = 0;
-}
-
-
-/* This function clears a specified SLOT in a hash table. It is
- useful when you've already done the lookup and don't want to do it
- again. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-void
-hash_table <Descriptor, Allocator>::clear_slot (value_type **slot)
-{
- if (slot < htab->entries || slot >= htab->entries + htab->size
- || *slot == HTAB_EMPTY_ENTRY || *slot == HTAB_DELETED_ENTRY)
- abort ();
-
- Descriptor::remove (*slot);
-
- *slot = static_cast <value_type *> (HTAB_DELETED_ENTRY);
- htab->n_deleted++;
+ m_n_elements++;
+ return &m_entries[index];
}
-
/* This function deletes an element with the given COMPARABLE value
from hash table starting with the given HASH. If there is no
matching element in the hash table, this function does nothing. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
+template<typename Descriptor, template<typename Type> class Allocator>
void
-hash_table <Descriptor, Allocator>
+hash_table<Descriptor, Allocator>
::remove_elt_with_hash (const compare_type *comparable, hashval_t hash)
{
- value_type **slot;
-
- slot = find_slot_with_hash (comparable, hash, NO_INSERT);
+ value_type **slot = find_slot_with_hash (comparable, hash, NO_INSERT);
if (*slot == HTAB_EMPTY_ENTRY)
return;
Descriptor::remove (*slot);
*slot = static_cast <value_type *> (HTAB_DELETED_ENTRY);
- htab->n_deleted++;
+ m_n_deleted++;
}
-
/* This function scans over the entire hash table calling CALLBACK for
each live entry. If CALLBACK returns false, the iteration stops.
ARGUMENT is passed as CALLBACK's second argument. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
-template <typename Argument,
+template<typename Descriptor,
+ template<typename Type> class Allocator>
+template<typename Argument,
int (*Callback) (typename Descriptor::value_type **slot, Argument argument)>
void
-hash_table <Descriptor, Allocator>::traverse_noresize (Argument argument)
+hash_table<Descriptor, Allocator>::traverse_noresize (Argument argument)
{
- value_type **slot;
- value_type **limit;
-
- slot = htab->entries;
- limit = slot + htab->size;
+ value_type **slot = m_entries;
+ value_type **limit = slot + size ();
do
{
while (++slot < limit);
}
-
/* Like traverse_noresize, but does resize the table when it is too empty
to improve effectivity of subsequent calls. */
int (*Callback) (typename Descriptor::value_type **slot,
Argument argument)>
void
-hash_table <Descriptor, Allocator>::traverse (Argument argument)
+hash_table<Descriptor, Allocator>::traverse (Argument argument)
{
- size_t size = htab->size;
+ size_t size = m_size;
if (elements () * 8 < size && size > 32)
expand ();
traverse_noresize <Argument, Callback> (argument);
}
-
-/* Iterator definitions. */
-
-/* The default constructor produces the end value. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline
-hash_table <Descriptor, Allocator>::iterator::iterator ()
-: m_slot (NULL), m_limit (NULL)
-{
-}
-
-/* The parameterized constructor produces the begin value. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline
-hash_table <Descriptor, Allocator>::iterator::iterator
- (value_type **slot, value_type **limit)
-: m_slot (slot), m_limit (limit)
-{
-}
-
-/* Obtain the element. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline typename hash_table <Descriptor, Allocator>::value_type &
-hash_table <Descriptor, Allocator>::iterator::operator * ()
-{
- return **m_slot;
-}
-
/* Slide down the iterator slots until an active entry is found. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
+template<typename Descriptor, template<typename Type> class Allocator>
void
-hash_table <Descriptor, Allocator>::iterator::slide ()
+hash_table<Descriptor, Allocator>::iterator::slide ()
{
for ( ; m_slot < m_limit; ++m_slot )
{
/* Bump the iterator. */
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline typename hash_table <Descriptor, Allocator>::iterator &
-hash_table <Descriptor, Allocator>::iterator::operator ++ ()
+template<typename Descriptor, template<typename Type> class Allocator>
+inline typename hash_table<Descriptor, Allocator>::iterator &
+hash_table<Descriptor, Allocator>::iterator::operator ++ ()
{
++m_slot;
slide ();
return *this;
}
-/* Compare iterators. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline bool
-hash_table <Descriptor, Allocator>::iterator::
- operator != (const iterator &other) const
-{
- return m_slot != other.m_slot || m_limit != other.m_limit;
-}
-
-/* Hash table iterator producers. */
-
-/* The beginning of a hash table iteration. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline typename hash_table <Descriptor, Allocator>::iterator
-hash_table <Descriptor, Allocator>::begin ()
-{
- iterator hti (htab->entries, htab->entries + htab->size);
- hti.slide ();
- return hti;
-}
-
-/* The end of a hash table iteration. */
-
-template <typename Descriptor,
- template <typename Type> class Allocator>
-inline typename hash_table <Descriptor, Allocator>::iterator
-hash_table <Descriptor, Allocator>::end ()
-{
- return iterator ();
-}
/* Iterate through the elements of hash_table HTAB,
using hash_table <....>::iterator ITER,
/* ODR type hash used to lookup ODR type based on tree type node. */
-typedef hash_table <odr_hasher> odr_hash_type;
-static odr_hash_type odr_hash;
+typedef hash_table<odr_hasher> odr_hash_type;
+static odr_hash_type *odr_hash;
/* ODR types are also stored into ODR_TYPE vector to allow consistent
walking. Bases appear before derived types. Vector is garbage collected
type = TYPE_MAIN_VARIANT (type);
gcc_checking_assert (TYPE_MAIN_VARIANT (type) == type);
hash = hash_type_name (type);
- slot = odr_hash.find_slot_with_hash (type, hash, insert ? INSERT : NO_INSERT);
+ slot
+ = odr_hash->find_slot_with_hash (type, hash, insert ? INSERT : NO_INSERT);
if (!slot)
return NULL;
FILE *inheritance_dump_file;
int flags;
- if (odr_hash.is_created ())
+ if (odr_hash)
return;
timevar_push (TV_IPA_INHERITANCE);
inheritance_dump_file = dump_begin (TDI_inheritance, &flags);
- odr_hash.create (23);
+ odr_hash = new odr_hash_type (23);
/* We reconstruct the graph starting of types of all methods seen in the
the unit. */
/* Polymorphic call target query cache. */
-typedef hash_table <polymorphic_call_target_hasher>
+typedef hash_table<polymorphic_call_target_hasher>
polymorphic_call_target_hash_type;
-static polymorphic_call_target_hash_type polymorphic_call_target_hash;
+static polymorphic_call_target_hash_type *polymorphic_call_target_hash;
/* Destroy polymorphic call target query cache. */
{
if (cached_polymorphic_call_targets)
{
- polymorphic_call_target_hash.dispose ();
+ delete polymorphic_call_target_hash;
+ polymorphic_call_target_hash = NULL;
pointer_set_destroy (cached_polymorphic_call_targets);
cached_polymorphic_call_targets = NULL;
}
bool skipped = false;
/* If ODR is not initialized, return empty incomplete list. */
- if (!odr_hash.is_created ())
+ if (!odr_hash)
{
if (completep)
*completep = false;
if (!cached_polymorphic_call_targets)
{
cached_polymorphic_call_targets = pointer_set_create ();
- polymorphic_call_target_hash.create (23);
+ polymorphic_call_target_hash
+ = new polymorphic_call_target_hash_type (23);
if (!node_removal_hook_holder)
{
node_removal_hook_holder =
key.type = type;
key.otr_token = otr_token;
key.context = context;
- slot = polymorphic_call_target_hash.find_slot (&key, INSERT);
+ slot = polymorphic_call_target_hash->find_slot (&key, INSERT);
if (cache_token)
*cache_token = (void *)*slot;
if (*slot)
|| fcode == BUILT_IN_TRAP))
return true;
- if (!odr_hash.is_created ())
+ if (!odr_hash)
return true;
targets = possible_polymorphic_call_targets (otr_type, otr_token, ctx, &final);
for (i = 0; i < targets.length (); i++)
{
struct cgraph_node *n;
- if (!odr_hash.is_created ())
+ if (!odr_hash)
return;
free_polymorphic_call_targets_hash ();
timevar_push (TV_IPA_INHERITANCE);
HASHTABLE is the on-side hash kept to avoid duplicates. */
static void
-account_time_size (hash_table <histogram_hash> hashtable,
+account_time_size (hash_table<histogram_hash> *hashtable,
vec<histogram_entry *> &histogram,
gcov_type count, int time, int size)
{
histogram_entry key = {count, 0, 0};
- histogram_entry **val = hashtable.find_slot (&key, INSERT);
+ histogram_entry **val = hashtable->find_slot (&key, INSERT);
if (!*val)
{
{
struct cgraph_node *node;
gimple_stmt_iterator gsi;
- hash_table <histogram_hash> hashtable;
basic_block bb;
- hashtable.create (10);
+ hash_table<histogram_hash> hashtable (10);
histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
10);
time += estimate_num_insns (stmt, &eni_time_weights);
size += estimate_num_insns (stmt, &eni_size_weights);
}
- account_time_size (hashtable, histogram, bb->count, time, size);
+ account_time_size (&hashtable, histogram, bb->count, time, size);
}
- hashtable.dispose ();
histogram.qsort (cmp_counts);
}
struct lto_file_decl_data ** file_data_vec
= lto_get_file_decl_data ();
struct lto_file_decl_data * file_data;
- hash_table <histogram_hash> hashtable;
int j = 0;
- hashtable.create (10);
+ hash_table<histogram_hash> hashtable (10);
histogram_pool = create_alloc_pool ("IPA histogram", sizeof (struct histogram_entry),
10);
gcov_type count = streamer_read_gcov_count (ib);
int time = streamer_read_uhwi (ib);
int size = streamer_read_uhwi (ib);
- account_time_size (hashtable, histogram,
+ account_time_size (&hashtable, histogram,
count, time, size);
}
lto_destroy_simple_input_block (file_data,
ib, data, len);
}
}
- hashtable.dispose ();
histogram.qsort (cmp_counts);
}
}
/* Hash table of unique allocno hard registers. */
-static hash_table <allocno_hard_regs_hasher> allocno_hard_regs_htab;
+static hash_table<allocno_hard_regs_hasher> *allocno_hard_regs_htab;
/* Return allocno hard registers in the hash table equal to HV. */
static allocno_hard_regs_t
find_hard_regs (allocno_hard_regs_t hv)
{
- return allocno_hard_regs_htab.find (hv);
+ return allocno_hard_regs_htab->find (hv);
}
/* Insert allocno hard registers HV in the hash table (if it is not
static allocno_hard_regs_t
insert_hard_regs (allocno_hard_regs_t hv)
{
- allocno_hard_regs **slot = allocno_hard_regs_htab.find_slot (hv, INSERT);
+ allocno_hard_regs **slot = allocno_hard_regs_htab->find_slot (hv, INSERT);
if (*slot == NULL)
*slot = hv;
init_allocno_hard_regs (void)
{
allocno_hard_regs_vec.create (200);
- allocno_hard_regs_htab.create (200);
+ allocno_hard_regs_htab
+ = new hash_table<allocno_hard_regs_hasher> (200);
}
/* Add (or update info about) allocno hard registers with SET and
allocno_hard_regs_vec.iterate (i, &hv);
i++)
ira_free (hv);
- allocno_hard_regs_htab.dispose ();
+ delete allocno_hard_regs_htab;
+ allocno_hard_regs_htab = NULL;
allocno_hard_regs_vec.release ();
}
}
/* Hash table of unique cost classes. */
-static hash_table <cost_classes_hasher> cost_classes_htab;
+static hash_table<cost_classes_hasher> *cost_classes_htab;
/* Map allocno class -> cost classes for pseudo of given allocno
class. */
sizeof (cost_classes_t) * N_REG_CLASSES);
memset (cost_classes_mode_cache, 0,
sizeof (cost_classes_t) * MAX_MACHINE_MODE);
- cost_classes_htab.create (200);
+ cost_classes_htab = new hash_table<cost_classes_hasher> (200);
}
/* Create new cost classes from cost classes FROM and set up members
}
classes.classes[classes.num++] = cl;
}
- slot = cost_classes_htab.find_slot (&classes, INSERT);
+ slot = cost_classes_htab->find_slot (&classes, INSERT);
if (*slot == NULL)
{
classes_ptr = setup_cost_classes (&classes);
continue;
classes.classes[classes.num++] = cl;
}
- slot = cost_classes_htab.find_slot (&classes, INSERT);
+ slot = cost_classes_htab->find_slot (&classes, INSERT);
if (*slot == NULL)
{
classes_ptr = setup_cost_classes (&classes);
finish_regno_cost_classes (void)
{
ira_free (regno_cost_classes);
- cost_classes_htab.dispose ();
+ delete cost_classes_htab;
+ cost_classes_htab = NULL;
}
\f
+2014-06-24 Trevor Saunders <tsaunders@mozilla.com>
+
+ * jcf-io.c: Adjust.
+
2014-06-11 Jan Hubicka <hubicka@ucw.cz>
* java/class.c (build_utf8_ref): Update handling for section names
during class lookup. (There is no need to cache the values
associated with names that were found; they are saved in
IDENTIFIER_CLASS_VALUE.) */
-static hash_table <charstar_hash> memoized_class_lookups;
+static hash_table<charstar_hash> *memoized_class_lookups;
/* Returns a freshly malloc'd string with the fully qualified pathname
of the .class file for the class CLASSNAME. CLASSNAME must be
hashval_t hash;
/* Create the hash table, if it does not already exist. */
- if (!memoized_class_lookups.is_created ())
- memoized_class_lookups.create (37);
+ if (!memoized_class_lookups)
+ memoized_class_lookups = new hash_table<charstar_hash> (37);
/* Loop for this class in the hashtable. If it is present, we've
already looked for this class and failed to find it. */
hash = charstar_hash::hash (classname);
- if (memoized_class_lookups.find_with_hash (classname, hash))
+ if (memoized_class_lookups->find_with_hash (classname, hash))
return NULL;
/* Allocate and zero out the buffer, since we don't explicitly put a
/* Remember that this class could not be found so that we do not
have to look again. */
- *memoized_class_lookups.find_slot_with_hash (classname, hash, INSERT)
+ *memoized_class_lookups->find_slot_with_hash (classname, hash, INSERT)
= classname;
return NULL;
entry2->inv->insn, entry2->expr);
}
-typedef hash_table <invariant_expr_hasher> invariant_htab_type;
+typedef hash_table<invariant_expr_hasher> invariant_htab_type;
/* Checks whether invariant with value EXPR in machine mode MODE is
recorded in EQ. If this is the case, return the invariant. Otherwise
insert INV to the table for this expression and return INV. */
static struct invariant *
-find_or_insert_inv (invariant_htab_type eq, rtx expr, enum machine_mode mode,
+find_or_insert_inv (invariant_htab_type *eq, rtx expr, enum machine_mode mode,
struct invariant *inv)
{
hashval_t hash = hash_invariant_expr_1 (inv->insn, expr);
pentry.expr = expr;
pentry.inv = inv;
pentry.mode = mode;
- slot = eq.find_slot_with_hash (&pentry, hash, INSERT);
+ slot = eq->find_slot_with_hash (&pentry, hash, INSERT);
entry = *slot;
if (entry)
hash table of the invariants. */
static void
-find_identical_invariants (invariant_htab_type eq, struct invariant *inv)
+find_identical_invariants (invariant_htab_type *eq, struct invariant *inv)
{
unsigned depno;
bitmap_iterator bi;
{
unsigned i;
struct invariant *inv;
- invariant_htab_type eq;
- eq.create (invariants.length ());
+ invariant_htab_type eq (invariants.length ());
FOR_EACH_VEC_ELT (invariants, i, inv)
- find_identical_invariants (eq, inv);
-
- eq.dispose ();
+ find_identical_invariants (&eq, inv);
}
/* Determines the basic blocks inside LOOP that are always executed and
/* Bivs of the current loop. */
-static hash_table <biv_entry_hasher> bivs;
+static hash_table<biv_entry_hasher> *bivs;
static bool iv_analyze_op (rtx, rtx, struct rtx_iv *);
}
}
- bivs.empty ();
+ bivs->empty ();
}
if (clean_slate)
{
df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
- bivs.create (10);
+ bivs = new hash_table<biv_entry_hasher> (10);
clean_slate = false;
}
else
static bool
analyzed_for_bivness_p (rtx def, struct rtx_iv *iv)
{
- struct biv_entry *biv = bivs.find_with_hash (def, REGNO (def));
+ struct biv_entry *biv = bivs->find_with_hash (def, REGNO (def));
if (!biv)
return false;
record_biv (rtx def, struct rtx_iv *iv)
{
struct biv_entry *biv = XNEW (struct biv_entry);
- biv_entry **slot = bivs.find_slot_with_hash (def, REGNO (def), INSERT);
+ biv_entry **slot = bivs->find_slot_with_hash (def, REGNO (def), INSERT);
biv->regno = REGNO (def);
biv->iv = *iv;
clear_iv_info ();
clean_slate = true;
df_finish_pass (true);
- bivs.dispose ();
+ delete bivs;
+ bivs = NULL;
free (iv_ref_table);
iv_ref_table = NULL;
iv_ref_table_size = 0;
struct opt_info
{
- hash_table <iv_split_hasher> insns_to_split; /* A hashtable of insns to
+ hash_table<iv_split_hasher> *insns_to_split; /* A hashtable of insns to
split. */
struct iv_to_split *iv_to_split_head; /* The first iv to split. */
struct iv_to_split **iv_to_split_tail; /* Pointer to the tail of the list. */
- hash_table <var_expand_hasher> insns_with_var_to_expand; /* A hashtable of
+ hash_table<var_expand_hasher> *insns_with_var_to_expand; /* A hashtable of
insns with accumulators to expand. */
struct var_to_expand *var_to_expand_head; /* The first var to expand. */
struct var_to_expand **var_to_expand_tail; /* Pointer to the tail of the list. */
if (flag_split_ivs_in_unroller)
{
- opt_info->insns_to_split.create (5 * loop->num_nodes);
+ opt_info->insns_to_split
+ = new hash_table<iv_split_hasher> (5 * loop->num_nodes);
opt_info->iv_to_split_head = NULL;
opt_info->iv_to_split_tail = &opt_info->iv_to_split_head;
}
if (flag_variable_expansion_in_unroller
&& can_apply)
{
- opt_info->insns_with_var_to_expand.create (5 * loop->num_nodes);
+ opt_info->insns_with_var_to_expand
+ = new hash_table<var_expand_hasher> (5 * loop->num_nodes);
opt_info->var_to_expand_head = NULL;
opt_info->var_to_expand_tail = &opt_info->var_to_expand_head;
}
if (!INSN_P (insn))
continue;
- if (opt_info->insns_to_split.is_created ())
+ if (opt_info->insns_to_split)
ivts = analyze_iv_to_split_insn (insn);
if (ivts)
{
- slot1 = opt_info->insns_to_split.find_slot (ivts, INSERT);
+ slot1 = opt_info->insns_to_split->find_slot (ivts, INSERT);
gcc_assert (*slot1 == NULL);
*slot1 = ivts;
*opt_info->iv_to_split_tail = ivts;
continue;
}
- if (opt_info->insns_with_var_to_expand.is_created ())
+ if (opt_info->insns_with_var_to_expand)
ves = analyze_insn_to_expand_var (loop, insn);
if (ves)
{
- slot2 = opt_info->insns_with_var_to_expand.find_slot (ves, INSERT);
+ slot2 = opt_info->insns_with_var_to_expand->find_slot (ves, INSERT);
gcc_assert (*slot2 == NULL);
*slot2 = ves;
*opt_info->var_to_expand_tail = ves;
gcc_assert (!unrolling || rewrite_original_loop);
/* Allocate the basic variables (i0). */
- if (opt_info->insns_to_split.is_created ())
+ if (opt_info->insns_to_split)
for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
allocate_basic_variable (ivts);
ve_templ.insn = orig_insn;
/* Apply splitting iv optimization. */
- if (opt_info->insns_to_split.is_created ())
+ if (opt_info->insns_to_split)
{
maybe_strip_eq_note_for_split_iv (opt_info, insn);
- ivts = opt_info->insns_to_split.find (&ivts_templ);
+ ivts = opt_info->insns_to_split->find (&ivts_templ);
if (ivts)
{
}
}
/* Apply variable expansion optimization. */
- if (unrolling && opt_info->insns_with_var_to_expand.is_created ())
+ if (unrolling && opt_info->insns_with_var_to_expand)
{
ves = (struct var_to_expand *)
- opt_info->insns_with_var_to_expand.find (&ve_templ);
+ opt_info->insns_with_var_to_expand->find (&ve_templ);
if (ves)
{
gcc_assert (GET_CODE (PATTERN (insn))
/* Initialize the variable expansions in the loop preheader
and take care of combining them at the loop exit. */
- if (opt_info->insns_with_var_to_expand.is_created ())
+ if (opt_info->insns_with_var_to_expand)
{
for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
insert_var_expansion_initialization (ves, opt_info->loop_preheader);
continue;
ivts_templ.insn = orig_insn;
- if (opt_info->insns_to_split.is_created ())
+ if (opt_info->insns_to_split)
{
maybe_strip_eq_note_for_split_iv (opt_info, orig_insn);
ivts = (struct iv_to_split *)
- opt_info->insns_to_split.find (&ivts_templ);
+ opt_info->insns_to_split->find (&ivts_templ);
if (ivts)
{
if (!delta)
static void
free_opt_info (struct opt_info *opt_info)
{
- if (opt_info->insns_to_split.is_created ())
- opt_info->insns_to_split.dispose ();
- if (opt_info->insns_with_var_to_expand.is_created ())
+ delete opt_info->insns_to_split;
+ opt_info->insns_to_split = NULL;
+ if (opt_info->insns_with_var_to_expand)
{
struct var_to_expand *ves;
for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
ves->var_expansions.release ();
- opt_info->insns_with_var_to_expand.dispose ();
+ delete opt_info->insns_with_var_to_expand;
+ opt_info->insns_with_var_to_expand = NULL;
}
free (opt_info);
}
}
/* The table to hold the file names. */
-static hash_table <freeing_string_slot_hasher> file_name_hash_table;
+static hash_table<freeing_string_slot_hasher> *file_name_hash_table;
/* Check that tag ACTUAL has one of the given values. NUM_TAGS is the
s_slot.s = string;
s_slot.len = len;
- slot = file_name_hash_table.find_slot (&s_slot, INSERT);
+ slot = file_name_hash_table->find_slot (&s_slot, INSERT);
if (*slot == NULL)
{
char *saved_string;
lto_reader_init (void)
{
lto_streamer_init ();
- file_name_hash_table.create (37);
+ file_name_hash_table
+ = new hash_table<freeing_string_slot_hasher> (37);
}
clear_line_info (ob);
- ob->string_hash_table.create (37);
+ ob->string_hash_table = new hash_table<string_slot_hasher> (37);
gcc_obstack_init (&ob->obstack);
return ob;
{
enum lto_section_type section_type = ob->section_type;
- ob->string_hash_table.dispose ();
+ delete ob->string_hash_table;
+ ob->string_hash_table = NULL;
free (ob->main_stream);
free (ob->string_stream);
return (e1->key == e2->key);
}
-static hash_table <tree_hash_entry> tree_htab;
+static hash_table<tree_hash_entry> *tree_htab;
#endif
/* Initialization common to the LTO reader and writer. */
streamer_check_handled_ts_structures ();
#ifdef LTO_STREAMER_DEBUG
- tree_htab.create (31);
+ tree_htab = new hash_table<tree_hash_entry> (31);
#endif
}
ent.key = t;
ent.value = orig_t;
- slot = tree_htab.find_slot (&ent, INSERT);
+ slot = tree_htab->find_slot (&ent, INSERT);
gcc_assert (!*slot);
*slot = XNEW (struct tree_hash_entry);
**slot = ent;
struct tree_hash_entry **slot;
ent.key = t;
- slot = tree_htab.find_slot (&ent, NO_INSERT);
+ slot = tree_htab->find_slot (&ent, NO_INSERT);
return (slot ? (*slot)->value : 0);
}
struct tree_hash_entry **slot;
ent.key = t;
- slot = tree_htab.find_slot (&ent, NO_INSERT);
+ slot = tree_htab->find_slot (&ent, NO_INSERT);
gcc_assert (slot);
free (*slot);
- tree_htab.clear_slot (slot);
+ tree_htab->clear_slot (slot);
}
#endif
/* The hash table that contains the set of strings we have seen so
far and the indexes assigned to them. */
- hash_table <string_slot_hasher> string_hash_table;
+ hash_table<string_slot_hasher> *string_hash_table;
/* The current cgraph_node that we are currently serializing. Null
if we are serializing something else. */
+2014-06-24 Trevor Saunders <tsaunders@mozilla.com>
+
+ * lto.c: Adjust.
+
2014-06-20 Jan Hubicka <hubicka@ucw.cz>
* lto-symtab.c (lto_varpool_replace_node): Report TLS model conflicts.
return true;
}
-static hash_table <tree_scc_hasher> tree_scc_hash;
+static hash_table<tree_scc_hasher> *tree_scc_hash;
static struct obstack tree_scc_hash_obstack;
static unsigned long num_merged_types;
/* Look for the list of candidate SCCs to compare against. */
tree_scc **slot;
- slot = tree_scc_hash.find_slot_with_hash (scc, scc_hash, INSERT);
+ slot = tree_scc_hash->find_slot_with_hash (scc, scc_hash, INSERT);
if (*slot)
{
/* Try unifying against each candidate. */
gimple_canonical_types = htab_create_ggc (16381, gimple_canonical_type_hash,
gimple_canonical_type_eq, 0);
gcc_obstack_init (&tree_scc_hash_obstack);
- tree_scc_hash.create (4096);
+ tree_scc_hash = new hash_table<tree_scc_hasher> (4096);
/* Register the common node types with the canonical type machinery so
we properly share alias-sets across languages and TUs. Do not
print_lto_report_1 ();
/* Free gimple type merging datastructures. */
- tree_scc_hash.dispose ();
+ delete tree_scc_hash;
+ tree_scc_hash = NULL;
obstack_free (&tree_scc_hash_obstack, NULL);
htab_delete (gimple_canonical_types);
gimple_canonical_types = NULL;
fprintf (stderr, "[%s] read %lu SCCs of average size %f\n",
pfx, num_sccs_read, total_scc_size / (double)num_sccs_read);
fprintf (stderr, "[%s] %lu tree bodies read in total\n", pfx, total_scc_size);
- if (flag_wpa && tree_scc_hash.is_created ())
+ if (flag_wpa && tree_scc_hash)
{
fprintf (stderr, "[%s] tree SCC table: size %ld, %ld elements, "
"collision ratio: %f\n", pfx,
- (long) tree_scc_hash.size (),
- (long) tree_scc_hash.elements (),
- tree_scc_hash.collisions ());
+ (long) tree_scc_hash->size (),
+ (long) tree_scc_hash->elements (),
+ tree_scc_hash->collisions ());
hash_table<tree_scc_hasher>::iterator hiter;
tree_scc *scc, *max_scc = NULL;
unsigned max_length = 0;
- FOR_EACH_HASH_TABLE_ELEMENT (tree_scc_hash, scc, x, hiter)
+ FOR_EACH_HASH_TABLE_ELEMENT (*tree_scc_hash, scc, x, hiter)
{
unsigned length = 0;
tree_scc *s = scc;
+2014-06-24 Trevor Saunders <tsaunders@mozilla.com>
+
+ * objc-act.c: Adjust.
+
2014-05-17 Trevor Saunders <tsaunders@mozilla.com>
* objc-act.c (objc_build_string_object): Adjust.
{
/* First, build the hashtable by putting all the instance
variables of superclasses in it. */
- hash_table <decl_name_hash> htab;
- htab.create (37);
+ hash_table<decl_name_hash> htab (37);
tree interface;
for (interface = lookup_interface (CLASS_SUPER_NAME
(objc_interface_context));
}
}
}
- htab.dispose ();
return true;
}
}
return !strcmp (s1->unique_name, s2->unique_name);
}
-static hash_table <pass_registry_hasher> name_to_pass_map;
+static hash_table<pass_registry_hasher> *name_to_pass_map;
/* Register PASS with NAME. */
struct pass_registry **slot;
struct pass_registry pr;
- if (!name_to_pass_map.is_created ())
- name_to_pass_map.create (256);
+ if (!name_to_pass_map)
+ name_to_pass_map = new hash_table<pass_registry_hasher> (256);
pr.unique_name = name;
- slot = name_to_pass_map.find_slot (&pr, INSERT);
+ slot = name_to_pass_map->find_slot (&pr, INSERT);
if (!*slot)
{
struct pass_registry *new_pr;
return;
pass_tab.safe_grow_cleared (g->get_passes ()->passes_by_id_size + 1);
- name_to_pass_map.traverse <void *, passes_pass_traverse> (NULL);
+ name_to_pass_map->traverse <void *, passes_pass_traverse> (NULL);
}
static bool override_gate_status (opt_pass *, tree, bool);
struct pass_registry **slot, pr;
pr.unique_name = name;
- slot = name_to_pass_map.find_slot (&pr, NO_INSERT);
+ slot = name_to_pass_map->find_slot (&pr, NO_INSERT);
if (!slot || !*slot)
return NULL;
/* A hash table to map event names to the position of the names in the
plugin_event_name table. */
-static hash_table <event_hasher> event_tab;
+static hash_table<event_hasher> *event_tab;
/* Keep track of the limit of allocated events and space ready for
allocating events. */
{
const char ***slot;
- if (!event_tab.is_created ())
+ if (!event_tab)
{
int i;
- event_tab.create (150);
+ event_tab = new hash_table<event_hasher> (150);
for (i = 0; i < event_last; i++)
{
- slot = event_tab.find_slot (&plugin_event_name[i], INSERT);
+ slot = event_tab->find_slot (&plugin_event_name[i], INSERT);
gcc_assert (*slot == HTAB_EMPTY_ENTRY);
*slot = &plugin_event_name[i];
}
}
- slot = event_tab.find_slot (&name, insert);
+ slot = event_tab->find_slot (&name, insert);
if (slot == NULL)
return -1;
if (*slot != HTAB_EMPTY_ENTRY)
plugin_callbacks, event_horizon);
}
/* All the pointers in the hash table will need to be updated. */
- event_tab.dispose ();
+ delete event_tab;
+ event_tab = NULL;
}
else
*slot = &plugin_event_name[event_last];
}
/* The table itself. */
-static hash_table <expr_hasher> expr_table;
+static hash_table<expr_hasher> *expr_table;
\f
static struct obstack expr_obstack;
make the hash table too small, but unnecessarily making it too large
also doesn't help. The i/4 is a gcse.c relic, and seems like a
reasonable choice. */
- expr_table.create (MAX (i / 4, 13));
+ expr_table = new hash_table<expr_hasher> (MAX (i / 4, 13));
/* We allocate everything on obstacks because we often can roll back
the whole obstack to some point. Freeing obstacks is very fast. */
{
free (uid_cuid);
- expr_table.dispose ();
+ delete expr_table;
+ expr_table = NULL;
obstack_free (&expr_obstack, NULL);
obstack_free (&occr_obstack, NULL);
cur_expr->hash = hash;
cur_expr->avail_occr = NULL;
- slot = expr_table.find_slot_with_hash (cur_expr, hash, INSERT);
+ slot = expr_table->find_slot_with_hash (cur_expr, hash, INSERT);
if (! (*slot))
/* The expression isn't found, so insert it. */
tmp_expr->hash = hash;
tmp_expr->avail_occr = NULL;
- slot = expr_table.find_slot_with_hash (tmp_expr, hash, INSERT);
+ slot = expr_table->find_slot_with_hash (tmp_expr, hash, INSERT);
obstack_free (&expr_obstack, tmp_expr);
if (!slot)
{
fprintf (file, "\n\nexpression hash table\n");
fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
- (long) expr_table.size (),
- (long) expr_table.elements (),
- expr_table.collisions ());
- if (expr_table.elements () > 0)
+ (long) expr_table->size (),
+ (long) expr_table->elements (),
+ expr_table->collisions ());
+ if (expr_table->elements () > 0)
{
fprintf (file, "\n\ntable entries:\n");
- expr_table.traverse <FILE *, dump_expr_hash_table_entry> (file);
+ expr_table->traverse <FILE *, dump_expr_hash_table_entry> (file);
}
fprintf (file, "\n");
}
static void
delete_redundant_insns (void)
{
- expr_table.traverse <void *, delete_redundant_insns_1> (NULL);
+ expr_table->traverse <void *, delete_redundant_insns_1> (NULL);
if (dump_file)
fprintf (dump_file, "\n");
}
if (dump_file)
dump_hash_table (dump_file);
- if (expr_table.elements () > 0)
+ if (expr_table->elements () > 0)
{
eliminate_partially_redundant_loads ();
delete_redundant_insns ();
return (elt1->old_name == elt2->old_name);
}
-typedef hash_table <rename_map_hasher> rename_map_type;
+typedef hash_table<rename_map_hasher> rename_map_type;
\f
/* Print to stderr all the elements of RENAME_MAP. */
DEBUG_FUNCTION void
-debug_rename_map (rename_map_type rename_map)
+debug_rename_map (rename_map_type *rename_map)
{
- rename_map.traverse <void *, debug_rename_map_1> (NULL);
+ rename_map->traverse <void *, debug_rename_map_1> (NULL);
}
/* Computes a hash function for database element ELT. */
/* Returns the expression associated to OLD_NAME in RENAME_MAP. */
static tree
-get_rename (rename_map_type rename_map, tree old_name)
+get_rename (rename_map_type *rename_map, tree old_name)
{
struct rename_map_elt_s tmp;
rename_map_elt_s **slot;
gcc_assert (TREE_CODE (old_name) == SSA_NAME);
tmp.old_name = old_name;
- slot = rename_map.find_slot (&tmp, NO_INSERT);
+ slot = rename_map->find_slot (&tmp, NO_INSERT);
if (slot && *slot)
return (*slot)->expr;
/* Register in RENAME_MAP the rename tuple (OLD_NAME, EXPR). */
static void
-set_rename (rename_map_type rename_map, tree old_name, tree expr)
+set_rename (rename_map_type *rename_map, tree old_name, tree expr)
{
struct rename_map_elt_s tmp;
rename_map_elt_s **slot;
return;
tmp.old_name = old_name;
- slot = rename_map.find_slot (&tmp, INSERT);
+ slot = rename_map->find_slot (&tmp, INSERT);
if (!slot)
return;
is set when the code generation cannot continue. */
static bool
-rename_uses (gimple copy, rename_map_type rename_map,
+rename_uses (gimple copy, rename_map_type *rename_map,
gimple_stmt_iterator *gsi_tgt,
sese region, loop_p loop, vec<tree> iv_map,
bool *gloog_error)
static void
graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb,
- rename_map_type rename_map,
+ rename_map_type *rename_map,
vec<tree> iv_map, sese region,
bool *gloog_error)
{
bool *gloog_error)
{
basic_block new_bb = split_edge (next_e);
- rename_map_type rename_map;
- rename_map.create (10);
+ rename_map_type rename_map (10);
next_e = single_succ_edge (new_bb);
- graphite_copy_stmts_from_block (bb, new_bb, rename_map, iv_map, region,
+ graphite_copy_stmts_from_block (bb, new_bb, &rename_map, iv_map, region,
gloog_error);
remove_phi_nodes (new_bb);
- rename_map.dispose ();
return next_e;
}
free (v);
}
-typedef hash_table <stats_counter_hasher> stats_counter_table_type;
+typedef hash_table<stats_counter_hasher> stats_counter_table_type;
/* Array of statistic hashes, indexed by pass id. */
-static stats_counter_table_type *statistics_hashes;
+static stats_counter_table_type **statistics_hashes;
static unsigned nr_statistics_hashes;
/* Return the current hashtable to be used for recording or printing
statistics. */
-static stats_counter_table_type
+static stats_counter_table_type *
curr_statistics_hash (void)
{
unsigned idx;
idx = current_pass->static_pass_number;
if (idx < nr_statistics_hashes
- && statistics_hashes[idx].is_created ())
+ && statistics_hashes[idx])
return statistics_hashes[idx];
if (idx >= nr_statistics_hashes)
{
- statistics_hashes = XRESIZEVEC (stats_counter_table_type,
+ statistics_hashes = XRESIZEVEC (stats_counter_table_type *,
statistics_hashes, idx+1);
memset (statistics_hashes + nr_statistics_hashes, 0,
(idx + 1 - nr_statistics_hashes)
- * sizeof (stats_counter_table_type));
+ * sizeof (stats_counter_table_type *));
nr_statistics_hashes = idx + 1;
}
- statistics_hashes[idx].create (15);
+ statistics_hashes[idx] = new stats_counter_table_type (15);
return statistics_hashes[idx];
}
fprintf (dump_file, "Pass statistics:\n");
fprintf (dump_file, "----------------\n");
curr_statistics_hash ()
- .traverse_noresize <void *, statistics_fini_pass_1> (NULL);
+ ->traverse_noresize <void *, statistics_fini_pass_1> (NULL);
fprintf (dump_file, "\n");
}
if (statistics_dump_file
&& !(statistics_dump_flags & TDF_STATS
|| statistics_dump_flags & TDF_DETAILS))
curr_statistics_hash ()
- .traverse_noresize <void *, statistics_fini_pass_2> (NULL);
+ ->traverse_noresize <void *, statistics_fini_pass_2> (NULL);
curr_statistics_hash ()
- .traverse_noresize <void *, statistics_fini_pass_3> (NULL);
+ ->traverse_noresize <void *, statistics_fini_pass_3> (NULL);
}
/* Helper for printing summary information. */
{
unsigned i;
for (i = 0; i < nr_statistics_hashes; ++i)
- if (statistics_hashes[i].is_created ()
+ if (statistics_hashes[i]
&& passes->get_pass_for_id (i) != NULL)
statistics_hashes[i]
- .traverse_noresize <opt_pass *, statistics_fini_1>
+ ->traverse_noresize <opt_pass *, statistics_fini_1>
(passes->get_pass_for_id (i));
}
and HISTOGRAM_P. */
static statistics_counter_t *
-lookup_or_add_counter (stats_counter_table_type hash, const char *id, int val,
+lookup_or_add_counter (stats_counter_table_type *hash, const char *id, int val,
bool histogram_p)
{
statistics_counter_t **counter;
statistics_counter_t c;
c.id = id;
c.val = val;
- counter = hash.find_slot (&c, INSERT);
+ counter = hash->find_slot (&c, INSERT);
if (!*counter)
{
*counter = XNEW (struct statistics_counter_s);
}
/* Hashtable for the load/store memory refs. */
-static hash_table <st_expr_hasher> store_motion_mems_table;
+static hash_table<st_expr_hasher> *store_motion_mems_table;
/* This will search the st_expr list for a matching expression. If it
doesn't find one, we create one and initialize it. */
NULL, /*have_reg_qty=*/false);
e.pattern = x;
- slot = store_motion_mems_table.find_slot_with_hash (&e, hash, INSERT);
+ slot = store_motion_mems_table->find_slot_with_hash (&e, hash, INSERT);
if (*slot)
return *slot;
static void
free_store_motion_mems (void)
{
- if (store_motion_mems_table.is_created ())
- store_motion_mems_table.dispose ();
+ delete store_motion_mems_table;
+ store_motion_mems_table = NULL;
while (store_motion_mems)
{
unsigned int max_gcse_regno = max_reg_num ();
store_motion_mems = NULL;
- store_motion_mems_table.create (13);
+ store_motion_mems_table = new hash_table<st_expr_hasher> (13);
last_set_in = XCNEWVEC (int, max_gcse_regno);
already_set = XNEWVEC (int, max_gcse_regno);
if (! ptr->avail_stores)
{
*prev_next_ptr_ptr = ptr->next;
- store_motion_mems_table.remove_elt_with_hash (ptr, ptr->hash_index);
+ store_motion_mems_table->remove_elt_with_hash (ptr, ptr->hash_index);
free_st_expr_entry (ptr);
}
else
num_stores = compute_store_table ();
if (num_stores == 0)
{
- store_motion_mems_table.dispose ();
+ delete store_motion_mems_table;
+ store_motion_mems_table = NULL;
end_alias_analysis ();
return 0;
}
/* The actual log. */
-static hash_table <log_entry_hasher> tm_log;
+static hash_table<log_entry_hasher> *tm_log;
/* Addresses to log with a save/restore sequence. These should be in
dominator order. */
/* Map for an SSA_NAME originally pointing to a non aliased new piece
of memory (malloc, alloc, etc). */
-static hash_table <tm_mem_map_hasher> tm_new_mem_hash;
+static hash_table<tm_mem_map_hasher> *tm_new_mem_hash;
/* Initialize logging data structures. */
static void
tm_log_init (void)
{
- tm_log.create (10);
- tm_new_mem_hash.create (5);
+ tm_log = new hash_table<log_entry_hasher> (10);
+ tm_new_mem_hash = new hash_table<tm_mem_map_hasher> (5);
tm_log_save_addresses.create (5);
}
static void
tm_log_delete (void)
{
- tm_log.dispose ();
- tm_new_mem_hash.dispose ();
+ delete tm_log;
+ tm_log = NULL;
+ delete tm_new_mem_hash;
+ tm_new_mem_hash = NULL;
tm_log_save_addresses.release ();
}
struct tm_log_entry l, *lp;
l.addr = addr;
- slot = tm_log.find_slot (&l, INSERT);
+ slot = tm_log->find_slot (&l, INSERT);
if (!*slot)
{
tree type = TREE_TYPE (addr);
static void
tm_log_emit (void)
{
- hash_table <log_entry_hasher>::iterator hi;
+ hash_table<log_entry_hasher>::iterator hi;
struct tm_log_entry *lp;
- FOR_EACH_HASH_TABLE_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
+ FOR_EACH_HASH_TABLE_ELEMENT (*tm_log, lp, tm_log_entry_t, hi)
{
size_t i;
gimple stmt;
for (i = 0; i < tm_log_save_addresses.length (); ++i)
{
l.addr = tm_log_save_addresses[i];
- lp = *(tm_log.find_slot (&l, NO_INSERT));
+ lp = *(tm_log->find_slot (&l, NO_INSERT));
gcc_assert (lp->save_var != NULL);
/* We only care about variables in the current transaction. */
for (i = tm_log_save_addresses.length () - 1; i >= 0; i--)
{
l.addr = tm_log_save_addresses[i];
- lp = *(tm_log.find_slot (&l, NO_INSERT));
+ lp = *(tm_log->find_slot (&l, NO_INSERT));
gcc_assert (lp->save_var != NULL);
/* We only care about variables in the current transaction. */
/* Look in cache first. */
elt.val = x;
- slot = tm_new_mem_hash.find_slot (&elt, INSERT);
+ slot = tm_new_mem_hash->find_slot (&elt, INSERT);
elt_p = *slot;
if (elt_p)
return elt_p->local_new_memory;
/* Unique counter for TM loads and stores. Loads and stores of the
same address get the same ID. */
static unsigned int tm_memopt_value_id;
-static hash_table <tm_memop_hasher> tm_memopt_value_numbers;
+static hash_table<tm_memop_hasher> *tm_memopt_value_numbers;
#define STORE_AVAIL_IN(BB) \
((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
tmpmem.addr = gimple_call_arg (stmt, 0);
- slot = tm_memopt_value_numbers.find_slot (&tmpmem, op);
+ slot = tm_memopt_value_numbers->find_slot (&tmpmem, op);
if (*slot)
mem = *slot;
else if (op == INSERT)
fprintf (dump_file, "TM memopt: %s: [", set_name);
EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
{
- hash_table <tm_memop_hasher>::iterator hi;
+ hash_table<tm_memop_hasher>::iterator hi;
struct tm_memop *mem = NULL;
/* Yeah, yeah, yeah. Whatever. This is just for debugging. */
- FOR_EACH_HASH_TABLE_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
+ FOR_EACH_HASH_TABLE_ELEMENT (*tm_memopt_value_numbers, mem, tm_memop_t, hi)
if (mem->value_id == i)
break;
gcc_assert (mem->value_id == i);
vec<basic_block> bbs;
tm_memopt_value_id = 0;
- tm_memopt_value_numbers.create (10);
+ tm_memopt_value_numbers = new hash_table<tm_memop_hasher> (10);
for (region = all_tm_regions; region; region = region->next)
{
tm_memopt_free_sets (bbs);
bbs.release ();
bitmap_obstack_release (&tm_memopt_obstack);
- tm_memopt_value_numbers.empty ();
+ tm_memopt_value_numbers->empty ();
}
- tm_memopt_value_numbers.dispose ();
+ delete tm_memopt_value_numbers;
+ tm_memopt_value_numbers = NULL;
return 0;
}
}
/* Static variables. */
-static hash_table <tree_upper_hasher> TB_up_ht;
+static hash_table<tree_upper_hasher> *TB_up_ht;
static vec<tree, va_gc> *TB_history_stack;
static int TB_verbose = 1;
/* Store in a hashtable information about previous and upper statements. */
{
- TB_up_ht.create (1023);
+ TB_up_ht = new hash_table<tree_upper_hasher> (1023);
TB_update_up (head);
}
}
ret:;
- TB_up_ht.dispose ();
+ delete TB_up_ht;
+ TB_up_ht = NULL;
return;
}
if (node == NULL_TREE)
return NULL_TREE;
- res = TB_up_ht.find (node);
+ res = TB_up_ht->find (node);
return res;
}
for (i = 0; i < n; i++)
{
tree op = TREE_OPERAND (node, i);
- slot = TB_up_ht.find_slot (op, INSERT);
+ slot = TB_up_ht->find_slot (op, INSERT);
*slot = node;
}
}
return LOCATION_LINE (a->locus) == LOCATION_LINE (b->locus);
}
-static hash_table <locus_discrim_hasher> discriminator_per_locus;
+static hash_table<locus_discrim_hasher> *discriminator_per_locus;
/* Basic blocks and flowgraphs. */
static void make_blocks (gimple_seq);
group_case_labels ();
/* Create the edges of the flowgraph. */
- discriminator_per_locus.create (13);
+ discriminator_per_locus = new hash_table<locus_discrim_hasher> (13);
make_edges ();
assign_discriminators ();
cleanup_dead_labels ();
- discriminator_per_locus.dispose ();
+ delete discriminator_per_locus;
+ discriminator_per_locus = NULL;
}
item.locus = locus;
item.discriminator = 0;
- slot = discriminator_per_locus.find_slot_with_hash (
+ slot = discriminator_per_locus->find_slot_with_hash (
&item, LOCATION_LINE (locus), INSERT);
gcc_assert (slot);
if (*slot == HTAB_EMPTY_ENTRY)
/* For each complex variable, a pair of variables for the components exists in
the hashtable. */
-static int_tree_htab_type complex_variable_components;
+static int_tree_htab_type *complex_variable_components;
/* For each complex SSA_NAME, a pair of ssa names for the components. */
static vec<tree> complex_ssa_name_components;
{
struct int_tree_map *h, in;
in.uid = uid;
- h = complex_variable_components.find_with_hash (&in, uid);
+ h = complex_variable_components->find_with_hash (&in, uid);
return h ? h->to : NULL;
}
h = XNEW (struct int_tree_map);
h->uid = uid;
h->to = to;
- loc = complex_variable_components.find_slot_with_hash (h, uid, INSERT);
+ loc = complex_variable_components->find_slot_with_hash (h, uid, INSERT);
*loc = h;
}
init_parameter_lattice_values ();
ssa_propagate (complex_visit_stmt, complex_visit_phi);
- complex_variable_components.create (10);
+ complex_variable_components = new int_tree_htab_type (10);
complex_ssa_name_components.create (2 * num_ssa_names);
complex_ssa_name_components.safe_grow_cleared (2 * num_ssa_names);
gsi_commit_edge_inserts ();
- complex_variable_components.dispose ();
+ delete complex_variable_components;
+ complex_variable_components = NULL;
complex_ssa_name_components.release ();
complex_lattice_values.release ();
return 0;
}
/* Note that this table is *not* marked GTY. It is short-lived. */
-static hash_table <finally_tree_hasher> finally_tree;
+static hash_table<finally_tree_hasher> *finally_tree;
static void
record_in_finally_tree (treemple child, gimple parent)
n->child = child;
n->parent = parent;
- slot = finally_tree.find_slot (n, INSERT);
+ slot = finally_tree->find_slot (n, INSERT);
gcc_assert (!*slot);
*slot = n;
}
do
{
n.child = start;
- p = finally_tree.find (&n);
+ p = finally_tree->find (&n);
if (!p)
return true;
start.g = p->parent;
if (bodyp == NULL)
return 0;
- finally_tree.create (31);
+ finally_tree = new hash_table<finally_tree_hasher> (31);
eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
memset (&null_state, 0, sizeof (null_state));
didn't change its value, and we don't have to re-set the function. */
gcc_assert (bodyp == gimple_body (current_function_decl));
- finally_tree.dispose ();
+ delete finally_tree;
+ finally_tree = NULL;
BITMAP_FREE (eh_region_may_contain_throw_map);
eh_seq = NULL;
/* Each entry in VAR_INFOS contains an element of type STRUCT
VAR_INFO_D. */
-static hash_table <var_info_hasher> var_infos;
+static hash_table<var_info_hasher> *var_infos;
/* Information stored for SSA names. */
struct var_info_d vi;
var_info_d **slot;
vi.var = decl;
- slot = var_infos.find_slot_with_hash (&vi, DECL_UID (decl), INSERT);
+ slot = var_infos->find_slot_with_hash (&vi, DECL_UID (decl), INSERT);
if (*slot == NULL)
{
var_info_p v = XCNEW (struct var_info_d);
static void
insert_phi_nodes (bitmap_head *dfs)
{
- hash_table <var_info_hasher>::iterator hi;
+ hash_table<var_info_hasher>::iterator hi;
unsigned i;
var_info_p info;
timevar_push (TV_TREE_INSERT_PHI_NODES);
- auto_vec<var_info_p> vars (var_infos.elements ());
- FOR_EACH_HASH_TABLE_ELEMENT (var_infos, info, var_info_p, hi)
+ auto_vec<var_info_p> vars (var_infos->elements ());
+ FOR_EACH_HASH_TABLE_ELEMENT (*var_infos, info, var_info_p, hi)
if (info->info.need_phi_state != NEED_PHI_STATE_NO)
vars.quick_push (info);
/* Dump statistics for the hash table HTAB. */
static void
-htab_statistics (FILE *file, hash_table <var_info_hasher> htab)
+htab_statistics (FILE *file, const hash_table<var_info_hasher> &htab)
{
fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
(long) htab.size (),
void
dump_tree_ssa_stats (FILE *file)
{
- if (var_infos.is_created ())
+ if (var_infos)
{
fprintf (file, "\nHash table statistics:\n");
fprintf (file, " var_infos: ");
- htab_statistics (file, var_infos);
+ htab_statistics (file, *var_infos);
fprintf (file, "\n");
}
}
dump_var_infos (FILE *file)
{
fprintf (file, "\n\nDefinition and live-in blocks:\n\n");
- if (var_infos.is_created ())
- var_infos.traverse <FILE *, debug_var_infos_r> (file);
+ if (var_infos)
+ var_infos->traverse <FILE *, debug_var_infos_r> (file);
}
if (dump_file && (dump_flags & TDF_STATS))
{
dump_dfa_stats (dump_file);
- if (var_infos.is_created ())
+ if (var_infos)
dump_tree_ssa_stats (dump_file);
}
cfun->gimple_df->in_ssa_p = false;
/* Allocate memory for the DEF_BLOCKS hash table. */
- gcc_assert (!var_infos.is_created ());
- var_infos.create (vec_safe_length (cfun->local_decls));
+ gcc_assert (!var_infos);
+ var_infos = new hash_table<var_info_hasher>
+ (vec_safe_length (cfun->local_decls));
bitmap_obstack_initialize (&update_ssa_obstack);
}
static void
fini_ssa_renamer (void)
{
- if (var_infos.is_created ())
- var_infos.dispose ();
+ delete var_infos;
+ var_infos = NULL;
bitmap_obstack_release (&update_ssa_obstack);
{
/* If we rename bare symbols initialize the mapping to
auxiliar info we need to keep track of. */
- var_infos.create (47);
+ var_infos = new hash_table<var_info_hasher> (47);
/* If we have to rename some symbols from scratch, we need to
start the process at the root of the CFG. FIXME, it should
return a->reduc_version;
}
-typedef hash_table <reduction_hasher> reduction_info_table_type;
+typedef hash_table<reduction_hasher> reduction_info_table_type;
static struct reduction_info *
-reduction_phi (reduction_info_table_type reduction_list, gimple phi)
+reduction_phi (reduction_info_table_type *reduction_list, gimple phi)
{
struct reduction_info tmpred, *red;
- if (reduction_list.elements () == 0 || phi == NULL)
+ if (reduction_list->elements () == 0 || phi == NULL)
return NULL;
tmpred.reduc_phi = phi;
tmpred.reduc_version = gimple_uid (phi);
- red = reduction_list.find (&tmpred);
+ red = reduction_list->find (&tmpred);
return red;
}
return (hashval_t) a->version;
}
-typedef hash_table <name_to_copy_hasher> name_to_copy_table_type;
+typedef hash_table<name_to_copy_hasher> name_to_copy_table_type;
/* A transformation matrix, which is a self-contained ROWSIZE x COLSIZE
matrix. Rather than use floats, we simply keep a single DENOMINATOR that
static tree
take_address_of (tree obj, tree type, edge entry,
- int_tree_htab_type decl_address, gimple_stmt_iterator *gsi)
+ int_tree_htab_type *decl_address, gimple_stmt_iterator *gsi)
{
int uid;
int_tree_map **dslot;
on it. */
uid = DECL_UID (TREE_OPERAND (TREE_OPERAND (*var_p, 0), 0));
ielt.uid = uid;
- dslot = decl_address.find_slot_with_hash (&ielt, uid, INSERT);
+ dslot = decl_address->find_slot_with_hash (&ielt, uid, INSERT);
if (!*dslot)
{
if (gsi == NULL)
{
struct walk_stmt_info info;
edge entry;
- int_tree_htab_type decl_address;
+ int_tree_htab_type *decl_address;
gimple_stmt_iterator *gsi;
bool changed;
bool reset;
static void
eliminate_local_variables_stmt (edge entry, gimple_stmt_iterator *gsi,
- int_tree_htab_type decl_address)
+ int_tree_htab_type *decl_address)
{
struct elv_data dta;
gimple stmt = gsi_stmt (*gsi);
unsigned i;
gimple_stmt_iterator gsi;
bool has_debug_stmt = false;
- int_tree_htab_type decl_address;
- decl_address.create (10);
+ int_tree_htab_type decl_address (10);
basic_block entry_bb = entry->src;
basic_block exit_bb = exit->dest;
has_debug_stmt = true;
}
else
- eliminate_local_variables_stmt (entry, &gsi, decl_address);
+ eliminate_local_variables_stmt (entry, &gsi, &decl_address);
if (has_debug_stmt)
FOR_EACH_VEC_ELT (body, i, bb)
if (bb != entry_bb && bb != exit_bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (gimple_debug_bind_p (gsi_stmt (gsi)))
- eliminate_local_variables_stmt (entry, &gsi, decl_address);
-
- decl_address.dispose ();
+ eliminate_local_variables_stmt (entry, &gsi, &decl_address);
}
/* Returns true if expression EXPR is not defined between ENTRY and
duplicated, storing the copies in DECL_COPIES. */
static tree
-separate_decls_in_region_name (tree name, name_to_copy_table_type name_copies,
- int_tree_htab_type decl_copies, bool copy_name_p)
+separate_decls_in_region_name (tree name, name_to_copy_table_type *name_copies,
+ int_tree_htab_type *decl_copies,
+ bool copy_name_p)
{
tree copy, var, var_copy;
unsigned idx, uid, nuid;
idx = SSA_NAME_VERSION (name);
elt.version = idx;
- slot = name_copies.find_slot_with_hash (&elt, idx,
- copy_name_p ? INSERT : NO_INSERT);
+ slot = name_copies->find_slot_with_hash (&elt, idx,
+ copy_name_p ? INSERT : NO_INSERT);
if (slot && *slot)
return (*slot)->new_name;
uid = DECL_UID (var);
ielt.uid = uid;
- dslot = decl_copies.find_slot_with_hash (&ielt, uid, INSERT);
+ dslot = decl_copies->find_slot_with_hash (&ielt, uid, INSERT);
if (!*dslot)
{
var_copy = create_tmp_var (TREE_TYPE (var), get_name (var));
it again. */
nuid = DECL_UID (var_copy);
ielt.uid = nuid;
- dslot = decl_copies.find_slot_with_hash (&ielt, nuid, INSERT);
+ dslot = decl_copies->find_slot_with_hash (&ielt, nuid, INSERT);
gcc_assert (!*dslot);
nielt = XNEW (struct int_tree_map);
nielt->uid = nuid;
static void
separate_decls_in_region_stmt (edge entry, edge exit, gimple stmt,
- name_to_copy_table_type name_copies,
- int_tree_htab_type decl_copies)
+ name_to_copy_table_type *name_copies,
+ int_tree_htab_type *decl_copies)
{
use_operand_p use;
def_operand_p def;
static bool
separate_decls_in_region_debug (gimple stmt,
- name_to_copy_table_type name_copies,
- int_tree_htab_type decl_copies)
+ name_to_copy_table_type *name_copies,
+ int_tree_htab_type *decl_copies)
{
use_operand_p use;
ssa_op_iter oi;
return true;
gcc_assert (DECL_P (var) && SSA_VAR_P (var));
ielt.uid = DECL_UID (var);
- dslot = decl_copies.find_slot_with_hash (&ielt, ielt.uid, NO_INSERT);
+ dslot = decl_copies->find_slot_with_hash (&ielt, ielt.uid, NO_INSERT);
if (!dslot)
return true;
if (gimple_debug_bind_p (stmt))
continue;
elt.version = SSA_NAME_VERSION (name);
- slot = name_copies.find_slot_with_hash (&elt, elt.version, NO_INSERT);
+ slot = name_copies->find_slot_with_hash (&elt, elt.version, NO_INSERT);
if (!slot)
{
gimple_debug_bind_reset_value (stmt);
shared data is stored in and loaded from. */
static void
create_call_for_reduction (struct loop *loop,
- reduction_info_table_type reduction_list,
+ reduction_info_table_type *reduction_list,
struct clsn_data *ld_st_data)
{
- reduction_list.traverse <struct loop *, create_phi_for_local_result> (loop);
+ reduction_list->traverse <struct loop *, create_phi_for_local_result> (loop);
/* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */
ld_st_data->load_bb = FALLTHRU_EDGE (loop->latch)->dest;
reduction_list
- .traverse <struct clsn_data *, create_call_for_reduction_1> (ld_st_data);
+ ->traverse <struct clsn_data *, create_call_for_reduction_1> (ld_st_data);
}
/* Callback for htab_traverse. Loads the final reduction value at the
REDUCTION_LIST describes the list of reductions that the
loads should be generated for. */
static void
-create_final_loads_for_reduction (reduction_info_table_type reduction_list,
+create_final_loads_for_reduction (reduction_info_table_type *reduction_list,
struct clsn_data *ld_st_data)
{
gimple_stmt_iterator gsi;
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
reduction_list
- .traverse <struct clsn_data *, create_loads_for_reductions> (ld_st_data);
+ ->traverse <struct clsn_data *, create_loads_for_reductions> (ld_st_data);
}
static void
separate_decls_in_region (edge entry, edge exit,
- reduction_info_table_type reduction_list,
+ reduction_info_table_type *reduction_list,
tree *arg_struct, tree *new_arg_struct,
struct clsn_data *ld_st_data)
{
basic_block bb1 = split_edge (entry);
basic_block bb0 = single_pred (bb1);
- name_to_copy_table_type name_copies;
- name_copies.create (10);
- int_tree_htab_type decl_copies;
- decl_copies.create (10);
+ name_to_copy_table_type name_copies (10);
+ int_tree_htab_type decl_copies (10);
unsigned i;
tree type, type_name, nvar;
gimple_stmt_iterator gsi;
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
separate_decls_in_region_stmt (entry, exit, gsi_stmt (gsi),
- name_copies, decl_copies);
+ &name_copies, &decl_copies);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
has_debug_stmt = true;
else
separate_decls_in_region_stmt (entry, exit, stmt,
- name_copies, decl_copies);
+ &name_copies, &decl_copies);
}
}
}
if (is_gimple_debug (stmt))
{
- if (separate_decls_in_region_debug (stmt, name_copies,
- decl_copies))
+ if (separate_decls_in_region_debug (stmt, &name_copies,
+ &decl_copies))
{
gsi_remove (&gsi, true);
continue;
}
}
- if (name_copies.elements () == 0 && reduction_list.elements () == 0)
+ if (name_copies.elements () == 0 && reduction_list->elements () == 0)
{
/* It may happen that there is nothing to copy (if there are only
loop carried and external variables in the loop). */
TYPE_NAME (type) = type_name;
name_copies.traverse <tree, add_field_for_name> (type);
- if (reduction_list.is_created () && reduction_list.elements () > 0)
+ if (reduction_list && reduction_list->elements () > 0)
{
/* Create the fields for reductions. */
- reduction_list.traverse <tree, add_field_for_reduction> (type);
+ reduction_list->traverse <tree, add_field_for_reduction> (type);
}
layout_type (type);
/* Load the calculation from memory (after the join of the threads). */
- if (reduction_list.is_created () && reduction_list.elements () > 0)
+ if (reduction_list && reduction_list->elements () > 0)
{
reduction_list
- .traverse <struct clsn_data *, create_stores_for_reduction>
- (ld_st_data);
+ ->traverse <struct clsn_data *, create_stores_for_reduction>
+ (ld_st_data);
clsn_data.load = make_ssa_name (nvar, NULL);
clsn_data.load_bb = exit->dest;
clsn_data.store = ld_st_data->store;
create_final_loads_for_reduction (reduction_list, &clsn_data);
}
}
-
- decl_copies.dispose ();
- name_copies.dispose ();
}
/* Bitmap containing uids of functions created by parallelization. We cannot
static void
transform_to_exit_first_loop (struct loop *loop,
- reduction_info_table_type reduction_list,
+ reduction_info_table_type *reduction_list,
tree nit)
{
basic_block *bbs, *nbbs, ex_bb, orig_header;
PHI_RESULT of this phi is the resulting value of the reduction
variable when exiting the loop. */
- if (reduction_list.elements () > 0)
+ if (reduction_list->elements () > 0)
{
struct reduction_info *red;
REDUCTION_LIST describes the reductions existent in the LOOP. */
static void
-gen_parallel_loop (struct loop *loop, reduction_info_table_type reduction_list,
+gen_parallel_loop (struct loop *loop,
+ reduction_info_table_type *reduction_list,
unsigned n_threads, struct tree_niter_desc *niter)
{
tree many_iterations_cond, type, nit;
transform_to_exit_first_loop (loop, reduction_list, nit);
/* Generate initializations for reductions. */
- if (reduction_list.elements () > 0)
- reduction_list.traverse <struct loop *, initialize_reductions> (loop);
+ if (reduction_list->elements () > 0)
+ reduction_list->traverse <struct loop *, initialize_reductions> (loop);
/* Eliminate the references to local variables from the loop. */
gcc_assert (single_exit (loop));
loc = gimple_location (cond_stmt);
parallel_head = create_parallel_loop (loop, create_loop_fn (loc), arg_struct,
new_arg_struct, n_threads, loc);
- if (reduction_list.elements () > 0)
+ if (reduction_list->elements () > 0)
create_call_for_reduction (loop, reduction_list, &clsn_data);
scev_reset ();
and PHI, insert it to the REDUCTION_LIST. */
static void
-build_new_reduction (reduction_info_table_type reduction_list,
+build_new_reduction (reduction_info_table_type *reduction_list,
gimple reduc_stmt, gimple phi)
{
reduction_info **slot;
new_reduction->reduc_phi = phi;
new_reduction->reduc_version = SSA_NAME_VERSION (gimple_phi_result (phi));
new_reduction->reduction_code = gimple_assign_rhs_code (reduc_stmt);
- slot = reduction_list.find_slot (new_reduction, INSERT);
+ slot = reduction_list->find_slot (new_reduction, INSERT);
*slot = new_reduction;
}
/* Detect all reductions in the LOOP, insert them into REDUCTION_LIST. */
static void
-gather_scalar_reductions (loop_p loop, reduction_info_table_type reduction_list)
+gather_scalar_reductions (loop_p loop, reduction_info_table_type *reduction_list)
{
gimple_stmt_iterator gsi;
loop_vec_info simple_loop_info;
/* As gimple_uid is used by the vectorizer in between vect_analyze_loop_form
and destroy_loop_vec_info, we can set gimple_uid of reduc_phi stmts
only now. */
- reduction_list.traverse <void *, set_reduc_phi_uids> (NULL);
+ reduction_list->traverse <void *, set_reduc_phi_uids> (NULL);
}
/* Try to initialize NITER for code generation part. */
static bool
try_create_reduction_list (loop_p loop,
- reduction_info_table_type reduction_list)
+ reduction_info_table_type *reduction_list)
{
edge exit = single_dom_exit (loop);
gimple_stmt_iterator gsi;
fprintf (dump_file,
" checking if it a part of reduction pattern: \n");
}
- if (reduction_list.elements () == 0)
+ if (reduction_list->elements () == 0)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
bool changed = false;
struct loop *loop;
struct tree_niter_desc niter_desc;
- reduction_info_table_type reduction_list;
struct obstack parloop_obstack;
HOST_WIDE_INT estimated;
source_location loop_loc;
return false;
gcc_obstack_init (&parloop_obstack);
- reduction_list.create (10);
+ reduction_info_table_type reduction_list (10);
init_stmt_vec_info_vec ();
FOR_EACH_LOOP (loop, 0)
if (!try_get_loop_niter (loop, &niter_desc))
continue;
- if (!try_create_reduction_list (loop, reduction_list))
+ if (!try_create_reduction_list (loop, &reduction_list))
continue;
if (!flag_loop_parallelize_all
fprintf (dump_file, "\nloop at %s:%d: ",
LOCATION_FILE (loop_loc), LOCATION_LINE (loop_loc));
}
- gen_parallel_loop (loop, reduction_list,
+ gen_parallel_loop (loop, &reduction_list,
n_threads, &niter_desc);
}
free_stmt_vec_info_vec ();
- reduction_list.dispose ();
obstack_free (&parloop_obstack, NULL);
/* Parallelization will cause new function calls to be inserted through
/* Set of candidates. */
static bitmap candidate_bitmap;
-static hash_table <uid_decl_hasher> candidates;
+static hash_table<uid_decl_hasher> *candidates;
/* For a candidate UID return the candidates decl. */
{
tree_node t;
t.decl_minimal.uid = uid;
- return candidates.find_with_hash (&t, static_cast <hashval_t> (uid));
+ return candidates->find_with_hash (&t, static_cast <hashval_t> (uid));
}
/* Bitmap of candidates which we should try to entirely scalarize away and
sra_initialize (void)
{
candidate_bitmap = BITMAP_ALLOC (NULL);
- candidates.create (vec_safe_length (cfun->local_decls) / 2);
+ candidates = new hash_table<uid_decl_hasher>
+ (vec_safe_length (cfun->local_decls) / 2);
should_scalarize_away_bitmap = BITMAP_ALLOC (NULL);
cannot_scalarize_away_bitmap = BITMAP_ALLOC (NULL);
gcc_obstack_init (&name_obstack);
sra_deinitialize (void)
{
BITMAP_FREE (candidate_bitmap);
- candidates.dispose ();
+ delete candidates;
+ candidates = NULL;
BITMAP_FREE (should_scalarize_away_bitmap);
BITMAP_FREE (cannot_scalarize_away_bitmap);
free_alloc_pool (access_pool);
disqualify_candidate (tree decl, const char *reason)
{
if (bitmap_clear_bit (candidate_bitmap, DECL_UID (decl)))
- candidates.clear_slot (candidates.find_slot_with_hash (decl,
- DECL_UID (decl),
- NO_INSERT));
+ candidates->remove_elt_with_hash (decl, DECL_UID (decl));
if (dump_file && (dump_flags & TDF_DETAILS))
{
}
bitmap_set_bit (candidate_bitmap, DECL_UID (var));
- slot = candidates.find_slot_with_hash (var, DECL_UID (var), INSERT);
+ slot = candidates->find_slot_with_hash (var, DECL_UID (var), INSERT);
*slot = var;
if (dump_file && (dump_flags & TDF_DETAILS))
continue;
bitmap_set_bit (candidate_bitmap, DECL_UID (parm));
- slot = candidates.find_slot_with_hash (parm, DECL_UID (parm), INSERT);
+ slot = candidates->find_slot_with_hash (parm, DECL_UID (parm), INSERT);
*slot = parm;
ret = true;
return val;
}
-typedef hash_table <pointer_hash <gimple_statement_base> > gimple_htab;
+typedef hash_table<pointer_hash<gimple_statement_base> > gimple_htab;
/* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
static void
insert_clobber_before_stack_restore (tree saved_val, tree var,
- gimple_htab *visited)
+ gimple_htab **visited)
{
gimple stmt, clobber_stmt;
tree clobber;
}
else if (gimple_code (stmt) == GIMPLE_PHI)
{
- if (!visited->is_created ())
- visited->create (10);
+ if (!*visited)
+ *visited = new gimple_htab (10);
- slot = visited->find_slot (stmt, INSERT);
+ slot = (*visited)->find_slot (stmt, INSERT);
if (*slot != NULL)
continue;
{
gimple stmt;
tree saved_val;
- gimple_htab visited;
+ gimple_htab *visited = NULL;
for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i))
{
break;
}
- if (visited.is_created ())
- visited.dispose ();
+ delete visited;
}
/* Detects a __builtin_alloca_with_align with constant size argument. Declares
&& p1->second_element == p2->second_element);
}
-typedef hash_table <coalesce_pair_hasher> coalesce_table_type;
+typedef hash_table<coalesce_pair_hasher> coalesce_table_type;
typedef coalesce_table_type::iterator coalesce_iterator_type;
typedef struct coalesce_list_d
{
- coalesce_table_type list; /* Hash table. */
+ coalesce_table_type *list; /* Hash table. */
coalesce_pair_p *sorted; /* List when sorted. */
int num_sorted; /* Number in the sorted list. */
cost_one_pair_p cost_one_list;/* Single use coalesces with cost 1. */
size = 40;
list = (coalesce_list_p) xmalloc (sizeof (struct coalesce_list_d));
- list->list.create (size);
+ list->list = new coalesce_table_type (size);
list->sorted = NULL;
list->num_sorted = 0;
list->cost_one_list = NULL;
delete_coalesce_list (coalesce_list_p cl)
{
gcc_assert (cl->cost_one_list == NULL);
- cl->list.dispose ();
+ delete cl->list;
+ cl->list = NULL;
free (cl->sorted);
gcc_assert (cl->num_sorted == 0);
free (cl);
}
hash = coalesce_pair_hasher::hash (&p);
- slot = cl->list.find_slot_with_hash (&p, hash, create ? INSERT : NO_INSERT);
+ slot = cl->list->find_slot_with_hash (&p, hash, create ? INSERT : NO_INSERT);
if (!slot)
return NULL;
static inline int
num_coalesce_pairs (coalesce_list_p cl)
{
- return cl->list.elements ();
+ return cl->list->elements ();
}
/* Iterate over CL using ITER, returning values in PAIR. */
#define FOR_EACH_PARTITION_PAIR(PAIR, ITER, CL) \
- FOR_EACH_HASH_TABLE_ELEMENT ((CL)->list, (PAIR), coalesce_pair_p, (ITER))
+ FOR_EACH_HASH_TABLE_ELEMENT (*(CL)->list, (PAIR), coalesce_pair_p, (ITER))
/* Prepare CL for removal of preferred pairs. When finished they are sorted
from the same SSA_NAME_VAR so debug info remains undisturbed. */
if (!optimize)
{
- hash_table <ssa_name_var_hash> ssa_name_hash;
+ hash_table<ssa_name_var_hash> ssa_name_hash (10);
- ssa_name_hash.create (10);
for (i = 1; i < num_ssa_names; i++)
{
tree a = ssa_name (i);
}
}
}
- ssa_name_hash.dispose ();
}
if (dump_file && (dump_flags & TDF_DETAILS))
dump_var_map (dump_file, map);
global redundancy elimination). Similarly as we pass through conditionals
we record the conditional itself as having either a true or false value
in this table. */
-static hash_table <expr_elt_hasher> avail_exprs;
+static hash_table<expr_elt_hasher> *avail_exprs;
/* Stack of dest,src pairs that need to be restored during finalization.
static void optimize_stmt (basic_block, gimple_stmt_iterator);
static tree lookup_avail_expr (gimple, bool);
static hashval_t avail_expr_hash (const void *);
-static void htab_statistics (FILE *, hash_table <expr_elt_hasher>);
+static void htab_statistics (FILE *,
+ const hash_table<expr_elt_hasher> &);
static void record_cond (cond_equivalence *);
static void record_const_or_copy (tree, tree);
static void record_equality (tree, tree);
memset (&opt_stats, 0, sizeof (opt_stats));
/* Create our hash tables. */
- avail_exprs.create (1024);
+ avail_exprs = new hash_table<expr_elt_hasher> (1024);
avail_exprs_stack.create (20);
const_and_copies_stack.create (20);
need_eh_cleanup = BITMAP_ALLOC (NULL);
loop_optimizer_finalize ();
/* Delete our main hashtable. */
- avail_exprs.dispose ();
+ delete avail_exprs;
+ avail_exprs = NULL;
/* Free asserted bitmaps and stacks. */
BITMAP_FREE (need_eh_cleanup);
print_expr_hash_elt (dump_file, victim);
}
- slot = avail_exprs.find_slot_with_hash (victim, victim->hash, NO_INSERT);
+ slot = avail_exprs->find_slot (victim, NO_INSERT);
gcc_assert (slot && *slot == victim);
- avail_exprs.clear_slot (slot);
+ avail_exprs->clear_slot (slot);
}
}
fprintf (file, "\nHash table statistics:\n");
fprintf (file, " avail_exprs: ");
- htab_statistics (file, avail_exprs);
+ htab_statistics (file, *avail_exprs);
}
/* Dump statistics for the hash table HTAB. */
static void
-htab_statistics (FILE *file, hash_table <expr_elt_hasher> htab)
+htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
{
fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
(long) htab.size (),
initialize_hash_element_from_expr (&p->cond, p->value, element);
- slot = avail_exprs.find_slot_with_hash (element, element->hash, INSERT);
+ slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
if (*slot == NULL)
{
*slot = element;
return NULL_TREE;
/* Finally try to find the expression in the main expression hash table. */
- slot = avail_exprs.find_slot_with_hash (&element, element.hash,
- (insert ? INSERT : NO_INSERT));
+ slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
if (slot == NULL)
{
free_expr_hash_elt_contents (&element);
{
int x, num_part;
tree var;
- hash_table <tree_int_map_hasher> tree_to_index;
struct tree_int_map *m, *mapstorage;
num_part = num_var_partitions (map);
- tree_to_index.create (num_part);
+ hash_table<tree_int_map_hasher> tree_to_index (num_part);
/* We can have at most num_part entries in the hash tables, so it's
enough to allocate so many map elements once, saving some malloc
calls. */
map->num_basevars = m - mapstorage;
free (mapstorage);
- tree_to_index. dispose ();
}
static struct
{
/* The hash table of memory references accessed in loops. */
- hash_table <mem_ref_hasher> refs;
+ hash_table<mem_ref_hasher> *refs;
/* The list of memory references. */
vec<mem_ref_p> refs_list;
gcc_assert (!store);
hash = iterative_hash_expr (*mem, 0);
- ref = memory_accesses.refs.find_with_hash (*mem, hash);
+ ref = memory_accesses.refs->find_with_hash (*mem, hash);
gcc_assert (ref != NULL);
return ref;
else
{
hash = iterative_hash_expr (*mem, 0);
- slot = memory_accesses.refs.find_slot_with_hash (*mem, hash, INSERT);
+ slot = memory_accesses.refs->find_slot_with_hash (*mem, hash, INSERT);
if (*slot)
{
ref = (mem_ref_p) *slot;
alloc_aux_for_edges (0);
- memory_accesses.refs.create (100);
+ memory_accesses.refs = new hash_table<mem_ref_hasher> (100);
memory_accesses.refs_list.create (100);
/* Allocate a special, unanalyzable mem-ref with ID zero. */
memory_accesses.refs_list.quick_push
bitmap_obstack_release (&lim_bitmap_obstack);
pointer_map_destroy (lim_aux_data_map);
- memory_accesses.refs.dispose ();
+ delete memory_accesses.refs;
+ memory_accesses.refs = NULL;
FOR_EACH_VEC_ELT (memory_accesses.refs_list, i, ref)
memref_free (ref);
/* The hashtable of loop invariant expressions created
by ivopt. */
- hash_table <iv_inv_expr_hasher> inv_expr_tab;
+ hash_table<iv_inv_expr_hasher> *inv_expr_tab;
/* Loop invariant expression id. */
int inv_expr_id;
data->niters = NULL;
data->iv_uses.create (20);
data->iv_candidates.create (20);
- data->inv_expr_tab.create (10);
+ data->inv_expr_tab = new hash_table<iv_inv_expr_hasher> (10);
data->inv_expr_id = 0;
decl_rtl_to_reset.create (20);
}
ent.expr = expr;
ent.hash = iterative_hash_expr (expr, 0);
- slot = data->inv_expr_tab.find_slot (&ent, INSERT);
+ slot = data->inv_expr_tab->find_slot (&ent, INSERT);
if (*slot)
return (*slot)->id;
decl_rtl_to_reset.truncate (0);
- data->inv_expr_tab.empty ();
+ data->inv_expr_tab->empty ();
data->inv_expr_id = 0;
}
decl_rtl_to_reset.release ();
data->iv_uses.release ();
data->iv_candidates.release ();
- data->inv_expr_tab.dispose ();
+ delete data->inv_expr_tab;
+ data->inv_expr_tab = NULL;
}
/* Returns true if the loop body BODY includes any function calls. */
&& n1->size == n2->size;
}
-/* The hash table for remembering what we've seen. */
-static hash_table <ssa_names_hasher> seen_ssa_names;
+class nontrapping_dom_walker : public dom_walker
+{
+public:
+ nontrapping_dom_walker (cdi_direction direction, pointer_set_t *ps)
+ : dom_walker (direction), m_nontrapping (ps), m_seen_ssa_names (128) {}
+
+ virtual void before_dom_children (basic_block);
+ virtual void after_dom_children (basic_block);
+
+private:
+
+ /* We see the expression EXP in basic block BB. If it's an interesting
+ expression (an MEM_REF through an SSA_NAME) possibly insert the
+ expression into the set NONTRAP or the hash table of seen expressions.
+ STORE is true if this expression is on the LHS, otherwise it's on
+ the RHS. */
+ void add_or_mark_expr (basic_block, tree, bool);
+
+ pointer_set_t *m_nontrapping;
+
+ /* The hash table for remembering what we've seen. */
+ hash_table<ssa_names_hasher> m_seen_ssa_names;
+};
+
+/* Called by walk_dominator_tree, when entering the block BB. */
+void
+nontrapping_dom_walker::before_dom_children (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+ gimple_stmt_iterator gsi;
+
+ /* If we haven't seen all our predecessors, clear the hash-table. */
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if ((((size_t)e->src->aux) & 2) == 0)
+ {
+ nt_call_phase++;
+ break;
+ }
+
+ /* Mark this BB as being on the path to dominator root and as visited. */
+ bb->aux = (void*)(1 | 2);
+
+ /* And walk the statements in order. */
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+
+ if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
+ nt_call_phase++;
+ else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
+ {
+ add_or_mark_expr (bb, gimple_assign_lhs (stmt), true);
+ add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), false);
+ }
+ }
+}
+
+/* Called by walk_dominator_tree, when basic block BB is exited. */
+void
+nontrapping_dom_walker::after_dom_children (basic_block bb)
+{
+ /* This BB isn't on the path to dominator root anymore. */
+ bb->aux = (void*)2;
+}
/* We see the expression EXP in basic block BB. If it's an interesting
expression (an MEM_REF through an SSA_NAME) possibly insert the
expression into the set NONTRAP or the hash table of seen expressions.
STORE is true if this expression is on the LHS, otherwise it's on
the RHS. */
-static void
-add_or_mark_expr (basic_block bb, tree exp,
- struct pointer_set_t *nontrap, bool store)
+void
+nontrapping_dom_walker::add_or_mark_expr (basic_block bb, tree exp, bool store)
{
HOST_WIDE_INT size;
map.offset = tree_to_shwi (TREE_OPERAND (exp, 1));
map.size = size;
- slot = seen_ssa_names.find_slot (&map, INSERT);
+ slot = m_seen_ssa_names.find_slot (&map, INSERT);
n2bb = *slot;
if (n2bb && n2bb->phase >= nt_call_phase)
found_bb = n2bb->bb;
then we can't trap. */
if (found_bb && (((size_t)found_bb->aux) & 1) == 1)
{
- pointer_set_insert (nontrap, exp);
+ pointer_set_insert (m_nontrapping, exp);
}
else
{
}
}
-class nontrapping_dom_walker : public dom_walker
-{
-public:
- nontrapping_dom_walker (cdi_direction direction, pointer_set_t *ps)
- : dom_walker (direction), m_nontrapping (ps) {}
-
- virtual void before_dom_children (basic_block);
- virtual void after_dom_children (basic_block);
-
-private:
- pointer_set_t *m_nontrapping;
-};
-
-/* Called by walk_dominator_tree, when entering the block BB. */
-void
-nontrapping_dom_walker::before_dom_children (basic_block bb)
-{
- edge e;
- edge_iterator ei;
- gimple_stmt_iterator gsi;
-
- /* If we haven't seen all our predecessors, clear the hash-table. */
- FOR_EACH_EDGE (e, ei, bb->preds)
- if ((((size_t)e->src->aux) & 2) == 0)
- {
- nt_call_phase++;
- break;
- }
-
- /* Mark this BB as being on the path to dominator root and as visited. */
- bb->aux = (void*)(1 | 2);
-
- /* And walk the statements in order. */
- for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
- {
- gimple stmt = gsi_stmt (gsi);
-
- if (is_gimple_call (stmt) && !nonfreeing_call_p (stmt))
- nt_call_phase++;
- else if (gimple_assign_single_p (stmt) && !gimple_has_volatile_ops (stmt))
- {
- add_or_mark_expr (bb, gimple_assign_lhs (stmt), m_nontrapping, true);
- add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), m_nontrapping, false);
- }
- }
-}
-
-/* Called by walk_dominator_tree, when basic block BB is exited. */
-void
-nontrapping_dom_walker::after_dom_children (basic_block bb)
-{
- /* This BB isn't on the path to dominator root anymore. */
- bb->aux = (void*)2;
-}
-
/* This is the entry point of gathering non trapping memory accesses.
It will do a dominator walk over the whole function, and it will
make use of the bb->aux pointers. It returns a set of trees
{
nt_call_phase = 0;
pointer_set_t *nontrap = pointer_set_create ();
- seen_ssa_names.create (128);
/* We're going to do a dominator walk, so ensure that we have
dominance information. */
calculate_dominance_info (CDI_DOMINATORS);
nontrapping_dom_walker (CDI_DOMINATORS, nontrap)
.walk (cfun->cfg->x_entry_block_ptr);
- seen_ssa_names.dispose ();
-
clear_aux_for_blocks ();
return nontrap;
}
/* Mapping from expression to id number we can use in bitmap sets. */
static vec<pre_expr> expressions;
-static hash_table <pre_expr_d> expression_to_id;
+static hash_table<pre_expr_d> *expression_to_id;
static vec<unsigned> name_to_id;
/* Allocate an expression id for EXPR. */
}
else
{
- slot = expression_to_id.find_slot (expr, INSERT);
+ slot = expression_to_id->find_slot (expr, INSERT);
gcc_assert (!*slot);
*slot = expr;
}
}
else
{
- slot = expression_to_id.find_slot (expr, NO_INSERT);
+ slot = expression_to_id->find_slot (expr, NO_INSERT);
if (!slot)
return 0;
return ((pre_expr)*slot)->id;
/* The phi_translate_table caches phi translations for a given
expression and predecessor. */
-static hash_table <expr_pred_trans_d> phi_translate_table;
+static hash_table<expr_pred_trans_d> *phi_translate_table;
/* Add the tuple mapping from {expression E, basic block PRED} to
the phi translation table and return whether it pre-existed. */
tem.e = e;
tem.pred = pred;
tem.hashcode = hash;
- slot = phi_translate_table.find_slot_with_hash (&tem, hash, INSERT);
+ slot = phi_translate_table->find_slot_with_hash (&tem, hash, INSERT);
if (*slot)
{
*entry = *slot;
else
/* Remove failed translations again, they cause insert
iteration to not pick up new opportunities reliably. */
- phi_translate_table.remove_elt_with_hash (slot, slot->hashcode);
+ phi_translate_table->remove_elt_with_hash (slot, slot->hashcode);
}
return phitrans;
calculate_dominance_info (CDI_DOMINATORS);
bitmap_obstack_initialize (&grand_bitmap_obstack);
- phi_translate_table.create (5110);
- expression_to_id.create (num_ssa_names * 3);
+ phi_translate_table = new hash_table<expr_pred_trans_d> (5110);
+ expression_to_id = new hash_table<pre_expr_d> (num_ssa_names * 3);
bitmap_set_pool = create_alloc_pool ("Bitmap sets",
sizeof (struct bitmap_set), 30);
pre_expr_pool = create_alloc_pool ("pre_expr nodes",
bitmap_obstack_release (&grand_bitmap_obstack);
free_alloc_pool (bitmap_set_pool);
free_alloc_pool (pre_expr_pool);
- phi_translate_table.dispose ();
- expression_to_id.dispose ();
+ delete phi_translate_table;
+ phi_translate_table = NULL;
+ delete expression_to_id;
+ expression_to_id = NULL;
name_to_id.release ();
free_aux_for_blocks ();
unsigned nr_candidates, nr_candidates2;
sbitmap_iterator sbi0;
vec<operand_entry_t> *subops;
- hash_table <oecount_hasher> ctable;
bool changed = false;
int next_oecount_id = 0;
/* Build linearized sub-operand lists and the counting table. */
cvec.create (0);
- ctable.create (15);
+
+ hash_table<oecount_hasher> ctable (15);
+
/* ??? Macro arguments cannot have multi-argument template types in
them. This typedef is needed to workaround that limitation. */
typedef vec<operand_entry_t> vec_operand_entry_t_heap;
}
}
}
- ctable.dispose ();
/* Sort the counting table. */
cvec.qsort (oecount_cmp);
return vn_nary_op_eq (vno1, vno2);
}
-typedef hash_table <vn_nary_op_hasher> vn_nary_op_table_type;
+typedef hash_table<vn_nary_op_hasher> vn_nary_op_table_type;
typedef vn_nary_op_table_type::iterator vn_nary_op_iterator_type;
phi->phiargs.release ();
}
-typedef hash_table <vn_phi_hasher> vn_phi_table_type;
+typedef hash_table<vn_phi_hasher> vn_phi_table_type;
typedef vn_phi_table_type::iterator vn_phi_iterator_type;
free_reference (v);
}
-typedef hash_table <vn_reference_hasher> vn_reference_table_type;
+typedef hash_table<vn_reference_hasher> vn_reference_table_type;
typedef vn_reference_table_type::iterator vn_reference_iterator_type;
typedef struct vn_tables_s
{
- vn_nary_op_table_type nary;
- vn_phi_table_type phis;
- vn_reference_table_type references;
+ vn_nary_op_table_type *nary;
+ vn_phi_table_type *phis;
+ vn_reference_table_type *references;
struct obstack nary_obstack;
alloc_pool phis_pool;
alloc_pool references_pool;
return vn_constant_eq_with_type (vc1->constant, vc2->constant);
}
-static hash_table <vn_constant_hasher> constant_to_value_id;
+static hash_table<vn_constant_hasher> *constant_to_value_id;
static bitmap constant_value_ids;
vc.hashcode = vn_hash_constant_with_type (constant);
vc.constant = constant;
- slot = constant_to_value_id.find_slot_with_hash (&vc, vc.hashcode, NO_INSERT);
+ slot = constant_to_value_id->find_slot (&vc, NO_INSERT);
if (slot)
return (*slot)->value_id;
return 0;
vc.hashcode = vn_hash_constant_with_type (constant);
vc.constant = constant;
- slot = constant_to_value_id.find_slot_with_hash (&vc, vc.hashcode, INSERT);
+ slot = constant_to_value_id->find_slot (&vc, INSERT);
if (*slot)
return (*slot)->value_id;
hashval_t hash;
hash = vr->hashcode;
- slot = current_info->references.find_slot_with_hash (vr, hash, NO_INSERT);
+ slot = current_info->references->find_slot_with_hash (vr, hash, NO_INSERT);
if (!slot && current_info == optimistic_info)
- slot = valid_info->references.find_slot_with_hash (vr, hash, NO_INSERT);
+ slot = valid_info->references->find_slot_with_hash (vr, hash, NO_INSERT);
if (slot)
{
if (vnresult)
vr->hashcode = vr->hashcode + SSA_NAME_VERSION (vr->vuse);
hash = vr->hashcode;
- slot = current_info->references.find_slot_with_hash (vr, hash, NO_INSERT);
+ slot = current_info->references->find_slot_with_hash (vr, hash, NO_INSERT);
if (!slot && current_info == optimistic_info)
- slot = valid_info->references.find_slot_with_hash (vr, hash, NO_INSERT);
+ slot = valid_info->references->find_slot_with_hash (vr, hash, NO_INSERT);
if (slot)
return *slot;
vr1->result = TREE_CODE (result) == SSA_NAME ? SSA_VAL (result) : result;
vr1->result_vdef = vdef;
- slot = current_info->references.find_slot_with_hash (vr1, vr1->hashcode,
- INSERT);
+ slot = current_info->references->find_slot_with_hash (vr1, vr1->hashcode,
+ INSERT);
/* Because we lookup stores using vuses, and value number failures
using the vdefs (see visit_reference_op_store for how and why),
result = SSA_VAL (result);
vr1->result = result;
- slot = current_info->references.find_slot_with_hash (vr1, vr1->hashcode,
- INSERT);
+ slot = current_info->references->find_slot_with_hash (vr1, vr1->hashcode,
+ INSERT);
/* At this point we should have all the things inserted that we have
seen before, and we should never try inserting something that
*vnresult = NULL;
vno->hashcode = vn_nary_op_compute_hash (vno);
- slot = current_info->nary.find_slot_with_hash (vno, vno->hashcode, NO_INSERT);
+ slot = current_info->nary->find_slot_with_hash (vno, vno->hashcode,
+ NO_INSERT);
if (!slot && current_info == optimistic_info)
- slot = valid_info->nary.find_slot_with_hash (vno, vno->hashcode, NO_INSERT);
+ slot = valid_info->nary->find_slot_with_hash (vno, vno->hashcode,
+ NO_INSERT);
if (!slot)
return NULL_TREE;
if (vnresult)
VNO->HASHCODE first. */
static vn_nary_op_t
-vn_nary_op_insert_into (vn_nary_op_t vno, vn_nary_op_table_type table,
+vn_nary_op_insert_into (vn_nary_op_t vno, vn_nary_op_table_type *table,
bool compute_hash)
{
vn_nary_op_s **slot;
if (compute_hash)
vno->hashcode = vn_nary_op_compute_hash (vno);
- slot = table.find_slot_with_hash (vno, vno->hashcode, INSERT);
+ slot = table->find_slot_with_hash (vno, vno->hashcode, INSERT);
gcc_assert (!*slot);
*slot = vno;
vp1.phiargs = shared_lookup_phiargs;
vp1.block = gimple_bb (phi);
vp1.hashcode = vn_phi_compute_hash (&vp1);
- slot = current_info->phis.find_slot_with_hash (&vp1, vp1.hashcode, NO_INSERT);
+ slot = current_info->phis->find_slot_with_hash (&vp1, vp1.hashcode,
+ NO_INSERT);
if (!slot && current_info == optimistic_info)
- slot = valid_info->phis.find_slot_with_hash (&vp1, vp1.hashcode, NO_INSERT);
+ slot = valid_info->phis->find_slot_with_hash (&vp1, vp1.hashcode,
+ NO_INSERT);
if (!slot)
return NULL_TREE;
return (*slot)->result;
vp1->result = result;
vp1->hashcode = vn_phi_compute_hash (vp1);
- slot = current_info->phis.find_slot_with_hash (vp1, vp1->hashcode, INSERT);
+ slot = current_info->phis->find_slot_with_hash (vp1, vp1->hashcode, INSERT);
/* Because we iterate over phi operations more than once, it's
possible the slot might already exist here, hence no assert.*/
vr2->hashcode = vr1.hashcode;
vr2->result = lhs;
vr2->result_vdef = vdef;
- slot = current_info->references.find_slot_with_hash (vr2, vr2->hashcode,
- INSERT);
+ slot = current_info->references->find_slot_with_hash (vr2, vr2->hashcode,
+ INSERT);
if (*slot)
free_reference (*slot);
*slot = vr2;
vn_phi_s **slot;
memcpy (phi, ophi, sizeof (*phi));
ophi->phiargs.create (0);
- slot = info->phis.find_slot_with_hash (phi, phi->hashcode, INSERT);
+ slot = info->phis->find_slot_with_hash (phi, phi->hashcode, INSERT);
gcc_assert (!*slot);
*slot = phi;
}
ref = (vn_reference_t) pool_alloc (info->references_pool);
memcpy (ref, oref, sizeof (*ref));
oref->operands.create (0);
- slot = info->references.find_slot_with_hash (ref, ref->hashcode, INSERT);
+ slot = info->references->find_slot_with_hash (ref, ref->hashcode, INSERT);
if (*slot)
free_reference (*slot);
*slot = ref;
/* As we are value-numbering optimistically we have to
clear the expression tables and the simplified expressions
in each iteration until we converge. */
- optimistic_info->nary.empty ();
- optimistic_info->phis.empty ();
- optimistic_info->references.empty ();
+ optimistic_info->nary->empty ();
+ optimistic_info->phis->empty ();
+ optimistic_info->references->empty ();
obstack_free (&optimistic_info->nary_obstack, NULL);
gcc_obstack_init (&optimistic_info->nary_obstack);
empty_alloc_pool (optimistic_info->phis_pool);
/* Finally, copy the contents of the no longer used optimistic
table to the valid table. */
- FOR_EACH_HASH_TABLE_ELEMENT (optimistic_info->nary, nary, vn_nary_op_t, hin)
+ FOR_EACH_HASH_TABLE_ELEMENT (*optimistic_info->nary, nary, vn_nary_op_t, hin)
copy_nary (nary, valid_info);
- FOR_EACH_HASH_TABLE_ELEMENT (optimistic_info->phis, phi, vn_phi_t, hip)
+ FOR_EACH_HASH_TABLE_ELEMENT (*optimistic_info->phis, phi, vn_phi_t, hip)
copy_phi (phi, valid_info);
- FOR_EACH_HASH_TABLE_ELEMENT (optimistic_info->references,
+ FOR_EACH_HASH_TABLE_ELEMENT (*optimistic_info->references,
ref, vn_reference_t, hir)
copy_reference (ref, valid_info);
static void
allocate_vn_table (vn_tables_t table)
{
- table->phis.create (23);
- table->nary.create (23);
- table->references.create (23);
+ table->phis = new vn_phi_table_type (23);
+ table->nary = new vn_nary_op_table_type (23);
+ table->references = new vn_reference_table_type (23);
gcc_obstack_init (&table->nary_obstack);
table->phis_pool = create_alloc_pool ("VN phis",
static void
free_vn_table (vn_tables_t table)
{
- table->phis.dispose ();
- table->nary.dispose ();
- table->references.dispose ();
+ delete table->phis;
+ table->phis = NULL;
+ delete table->nary;
+ table->nary = NULL;
+ delete table->references;
+ table->references = NULL;
obstack_free (&table->nary_obstack, NULL);
free_alloc_pool (table->phis_pool);
free_alloc_pool (table->references_pool);
calculate_dominance_info (CDI_DOMINATORS);
sccstack.create (0);
- constant_to_value_id.create (23);
+ constant_to_value_id = new hash_table<vn_constant_hasher> (23);
constant_value_ids = BITMAP_ALLOC (NULL);
{
size_t i;
- constant_to_value_id.dispose ();
+ delete constant_to_value_id;
+ constant_to_value_id = NULL;
BITMAP_FREE (constant_value_ids);
shared_lookup_phiargs.release ();
shared_lookup_references.release ();
/* Now set the value ids of the things we had put in the hash
table. */
- FOR_EACH_HASH_TABLE_ELEMENT (valid_info->nary, vno, vn_nary_op_t, hin)
+ FOR_EACH_HASH_TABLE_ELEMENT (*valid_info->nary, vno, vn_nary_op_t, hin)
set_value_id_for_result (vno->result, &vno->value_id);
- FOR_EACH_HASH_TABLE_ELEMENT (valid_info->phis, vp, vn_phi_t, hip)
+ FOR_EACH_HASH_TABLE_ELEMENT (*valid_info->phis, vp, vn_phi_t, hip)
set_value_id_for_result (vp->result, &vp->value_id);
- FOR_EACH_HASH_TABLE_ELEMENT (valid_info->references, vr, vn_reference_t, hir)
+ FOR_EACH_HASH_TABLE_ELEMENT (*valid_info->references, vr, vn_reference_t,
+ hir)
set_value_id_for_result (vr->result, &vr->value_id);
}
/* Hash table for mapping decls to a chained list of offset -> idx
mappings. */
-static hash_table <stridxlist_hasher> decl_to_stridxlist_htab;
+static hash_table<stridxlist_hasher> *decl_to_stridxlist_htab;
/* Obstack for struct stridxlist and struct decl_stridxlist_map. */
static struct obstack stridx_obstack;
struct stridxlist *list;
tree base;
- if (!decl_to_stridxlist_htab.is_created ())
+ if (!decl_to_stridxlist_htab)
return 0;
base = get_addr_base_and_unit_offset (exp, &off);
return 0;
ent.base.from = base;
- e = decl_to_stridxlist_htab.find_with_hash (&ent, DECL_UID (base));
+ e = decl_to_stridxlist_htab->find_with_hash (&ent, DECL_UID (base));
if (e == NULL)
return 0;
if (base == NULL_TREE || !DECL_P (base))
return NULL;
- if (!decl_to_stridxlist_htab.is_created ())
+ if (!decl_to_stridxlist_htab)
{
- decl_to_stridxlist_htab.create (64);
+ decl_to_stridxlist_htab = new hash_table<stridxlist_hasher> (64);
gcc_obstack_init (&stridx_obstack);
}
ent.base.from = base;
- slot = decl_to_stridxlist_htab.find_slot_with_hash (&ent, DECL_UID (base),
- INSERT);
+ slot = decl_to_stridxlist_htab->find_slot_with_hash (&ent, DECL_UID (base),
+ INSERT);
if (*slot)
{
int i;
ssa_ver_to_stridx.release ();
free_alloc_pool (strinfo_pool);
- if (decl_to_stridxlist_htab.is_created ())
+ if (decl_to_stridxlist_htab)
{
obstack_free (&stridx_obstack, NULL);
- decl_to_stridxlist_htab.dispose ();
+ delete decl_to_stridxlist_htab;
+ decl_to_stridxlist_htab = NULL;
}
laststmt.stmt = NULL;
laststmt.len = NULL_TREE;
/* A hashtable for mapping a bitmap of labels->pointer equivalence
classes. */
-static hash_table <equiv_class_hasher> pointer_equiv_class_table;
+static hash_table<equiv_class_hasher> *pointer_equiv_class_table;
/* A hashtable for mapping a bitmap of labels->location equivalence
classes. */
-static hash_table <equiv_class_hasher> location_equiv_class_table;
+static hash_table<equiv_class_hasher> *location_equiv_class_table;
/* Lookup a equivalence class in TABLE by the bitmap of LABELS with
hash HAS it contains. Sets *REF_LABELS to the bitmap LABELS
is equivalent to. */
static equiv_class_label *
-equiv_class_lookup_or_add (hash_table <equiv_class_hasher> table, bitmap labels)
+equiv_class_lookup_or_add (hash_table<equiv_class_hasher> *table,
+ bitmap labels)
{
equiv_class_label **slot;
equiv_class_label ecl;
ecl.labels = labels;
ecl.hashcode = bitmap_hash (labels);
- slot = table.find_slot_with_hash (&ecl, ecl.hashcode, INSERT);
+ slot = table->find_slot (&ecl, INSERT);
if (!*slot)
{
*slot = XNEW (struct equiv_class_label);
struct scc_info *si = init_scc_info (size);
bitmap_obstack_initialize (&iteration_obstack);
- pointer_equiv_class_table.create (511);
- location_equiv_class_table.create (511);
+ pointer_equiv_class_table = new hash_table<equiv_class_hasher> (511);
+ location_equiv_class_table
+ = new hash_table<equiv_class_hasher> (511);
pointer_equiv_class = 1;
location_equiv_class = 1;
free (graph->points_to);
free (graph->eq_rep);
sbitmap_free (graph->direct_nodes);
- pointer_equiv_class_table.dispose ();
- location_equiv_class_table.dispose ();
+ delete pointer_equiv_class_table;
+ pointer_equiv_class_table = NULL;
+ delete location_equiv_class_table;
+ location_equiv_class_table = NULL;
bitmap_obstack_release (&iteration_obstack);
}
/* Shared_bitmap hashtable. */
-static hash_table <shared_bitmap_hasher> shared_bitmap_table;
+static hash_table<shared_bitmap_hasher> *shared_bitmap_table;
/* Lookup a bitmap in the shared bitmap hashtable, and return an already
existing instance if there is one, NULL otherwise. */
sbi.pt_vars = pt_vars;
sbi.hashcode = bitmap_hash (pt_vars);
- slot = shared_bitmap_table.find_slot_with_hash (&sbi, sbi.hashcode,
- NO_INSERT);
+ slot = shared_bitmap_table->find_slot (&sbi, NO_INSERT);
if (!slot)
return NULL;
else
sbi->pt_vars = pt_vars;
sbi->hashcode = bitmap_hash (pt_vars);
- slot = shared_bitmap_table.find_slot_with_hash (sbi, sbi->hashcode, INSERT);
+ slot = shared_bitmap_table->find_slot (sbi, INSERT);
gcc_assert (!*slot);
*slot = sbi;
}
call_stmt_vars = pointer_map_create ();
memset (&stats, 0, sizeof (stats));
- shared_bitmap_table.create (511);
+ shared_bitmap_table = new hash_table<shared_bitmap_hasher> (511);
init_base_vars ();
gcc_obstack_init (&fake_var_decl_obstack);
{
unsigned int i;
- shared_bitmap_table.dispose ();
+ delete shared_bitmap_table;
+ shared_bitmap_table = NULL;
if (dump_file && (dump_flags & TDF_STATS))
fprintf (dump_file, "Points to sets created:%d\n",
stats.points_to_sets_created);
same->succ_flags.truncate (0);
}
-static hash_table <same_succ_def> same_succ_htab;
+static hash_table<same_succ_def> *same_succ_htab;
/* Array that is used to store the edge flags for a successor. */
DEBUG_FUNCTION void
debug_same_succ ( void)
{
- same_succ_htab.traverse <FILE *, ssa_same_succ_print_traverse> (stderr);
+ same_succ_htab->traverse <FILE *, ssa_same_succ_print_traverse> (stderr);
}
same->hashval = same_succ_hash (same);
- slot = same_succ_htab.find_slot_with_hash (same, same->hashval, INSERT);
+ slot = same_succ_htab->find_slot_with_hash (same, same->hashval, INSERT);
if (*slot == NULL)
{
*slot = same;
init_worklist (void)
{
alloc_aux_for_blocks (sizeof (struct aux_bb_info));
- same_succ_htab.create (n_basic_blocks_for_fn (cfun));
+ same_succ_htab = new hash_table<same_succ_def> (n_basic_blocks_for_fn (cfun));
same_succ_edge_flags = XCNEWVEC (int, last_basic_block_for_fn (cfun));
deleted_bbs = BITMAP_ALLOC (NULL);
deleted_bb_preds = BITMAP_ALLOC (NULL);
delete_worklist (void)
{
free_aux_for_blocks ();
- same_succ_htab.dispose ();
+ delete same_succ_htab;
+ same_succ_htab = NULL;
XDELETEVEC (same_succ_edge_flags);
same_succ_edge_flags = NULL;
BITMAP_FREE (deleted_bbs);
same_succ same = BB_SAME_SUCC (bb);
BB_SAME_SUCC (bb) = NULL;
if (bitmap_single_bit_set_p (same->bbs))
- same_succ_htab.remove_elt_with_hash (same, same->hashval);
+ same_succ_htab->remove_elt_with_hash (same, same->hashval);
else
bitmap_clear_bit (same->bbs, bb->index);
}
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "htab collision / search: %f\n",
- same_succ_htab.collisions ());
+ same_succ_htab->collisions ());
if (nr_bbs_removed_total > 0)
{
/* Main data structure to hold information for duplicates of BB. */
-static hash_table <redirection_data> redirection_data;
+static hash_table<redirection_data> *redirection_data;
/* Given an outgoing edge E lookup and return its entry in our hash table.
elt->dup_blocks[1] = NULL;
elt->incoming_edges = NULL;
- slot = redirection_data.find_slot (elt, insert);
+ slot = redirection_data->find_slot (elt, insert);
/* This will only happen if INSERT is false and the entry is not
in the hash table. */
use a hash table. For normal code there should be no noticeable
difference. However, if we have a block with a large number of
incoming and outgoing edges such linear searches can get expensive. */
- redirection_data.create (EDGE_COUNT (bb->succs));
+ redirection_data
+ = new hash_table<struct redirection_data> (EDGE_COUNT (bb->succs));
/* If we thread the latch of the loop to its exit, the loop ceases to
exist. Make sure we do not restrict ourselves in order to preserve
local_info.template_block = NULL;
local_info.bb = bb;
local_info.jumps_threaded = false;
- redirection_data.traverse <ssa_local_info_t *, ssa_create_duplicates>
+ redirection_data->traverse <ssa_local_info_t *, ssa_create_duplicates>
(&local_info);
/* The template does not have an outgoing edge. Create that outgoing
We do this after creating all the duplicates to avoid creating
unnecessary edges. */
- redirection_data.traverse <ssa_local_info_t *, ssa_fixup_template_block>
+ redirection_data->traverse <ssa_local_info_t *, ssa_fixup_template_block>
(&local_info);
/* The hash table traversals above created the duplicate blocks (and the
statements within the duplicate blocks). This loop creates PHI nodes for
the duplicated blocks and redirects the incoming edges into BB to reach
the duplicates of BB. */
- redirection_data.traverse <ssa_local_info_t *, ssa_redirect_edges>
+ redirection_data->traverse <ssa_local_info_t *, ssa_redirect_edges>
(&local_info);
/* Done with this block. Clear REDIRECTION_DATA. */
- redirection_data.dispose ();
+ delete redirection_data;
+ redirection_data = NULL;
if (noloop_only
&& bb == bb->loop_father->header)
/* Global hash table implementing a mapping from invariant values
to a list of SSA_NAMEs which have the same value. We might be
able to reuse tree-vn for this code. */
-static hash_table <val_ssa_equiv_hasher> val_ssa_equiv;
+static hash_table<val_ssa_equiv_hasher> *val_ssa_equiv;
static void uncprop_into_successor_phis (basic_block);
an_equiv_elt.value = value;
an_equiv_elt.equivalences.create (0);
- slot = val_ssa_equiv.find_slot (&an_equiv_elt, NO_INSERT);
+ slot = val_ssa_equiv->find_slot (&an_equiv_elt, NO_INSERT);
an_equiv_elt_p = *slot;
an_equiv_elt_p->equivalences.pop ();
an_equiv_elt_p->value = value;
an_equiv_elt_p->equivalences.create (0);
- slot = val_ssa_equiv.find_slot (an_equiv_elt_p, INSERT);
+ slot = val_ssa_equiv->find_slot (an_equiv_elt_p, INSERT);
if (*slot == NULL)
*slot = an_equiv_elt_p;
/* Lookup this argument's value in the hash table. */
an_equiv_elt.value = arg;
an_equiv_elt.equivalences.create (0);
- slot = val_ssa_equiv.find_slot (&an_equiv_elt, NO_INSERT);
+ slot = val_ssa_equiv->find_slot (&an_equiv_elt, NO_INSERT);
if (slot)
{
associate_equivalences_with_edges ();
/* Create our global data structures. */
- val_ssa_equiv.create (1024);
+ val_ssa_equiv = new hash_table<val_ssa_equiv_hasher> (1024);
/* We're going to do a dominator walk, so ensure that we have
dominance information. */
/* we just need to empty elements out of the hash table, and cleanup the
AUX field on the edges. */
- val_ssa_equiv.dispose ();
+ delete val_ssa_equiv;
+ val_ssa_equiv = NULL;
FOR_EACH_BB_FN (bb, fun)
{
edge e;
bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
elem.npeel = npeel;
- slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo).find (&elem);
+ slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
if (slot)
slot->count++;
else
slot->npeel = npeel;
slot->dr = dr;
slot->count = 1;
- new_slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo).find_slot (slot, INSERT);
+ new_slot
+ = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
*new_slot = slot;
}
res.inside_cost = INT_MAX;
res.outside_cost = INT_MAX;
LOOP_VINFO_PEELING_HTAB (loop_vinfo)
- .traverse <_vect_peel_extended_info *,
- vect_peeling_hash_get_lowest_cost> (&res);
+ ->traverse <_vect_peel_extended_info *,
+ vect_peeling_hash_get_lowest_cost> (&res);
}
else
{
res.peel_info.count = 0;
LOOP_VINFO_PEELING_HTAB (loop_vinfo)
- .traverse <_vect_peel_extended_info *,
- vect_peeling_hash_get_most_frequent> (&res);
+ ->traverse <_vect_peel_extended_info *,
+ vect_peeling_hash_get_most_frequent> (&res);
}
*npeel = res.peel_info.npeel;
size_zero_node) < 0;
/* Save info about DR in the hash table. */
- if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo).is_created ())
- LOOP_VINFO_PEELING_HTAB (loop_vinfo).create (1);
+ if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
+ LOOP_VINFO_PEELING_HTAB (loop_vinfo)
+ = new hash_table<peel_info_hasher> (1);
vectype = STMT_VINFO_VECTYPE (stmt_info);
nelements = TYPE_VECTOR_SUBPARTS (vectype);
LOOP_VINFO_REDUCTIONS (loop_vinfo).release ();
LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).release ();
- if (LOOP_VINFO_PEELING_HTAB (loop_vinfo).is_created ())
- LOOP_VINFO_PEELING_HTAB (loop_vinfo).dispose ();
+ delete LOOP_VINFO_PEELING_HTAB (loop_vinfo);
+ LOOP_VINFO_PEELING_HTAB (loop_vinfo) = NULL;
destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
into their corresponding constants. */
static void
-adjust_simduid_builtins (hash_table <simduid_to_vf> &htab)
+adjust_simduid_builtins (hash_table<simduid_to_vf> **htab)
{
basic_block bb;
gcc_assert (TREE_CODE (arg) == SSA_NAME);
simduid_to_vf *p = NULL, data;
data.simduid = DECL_UID (SSA_NAME_VAR (arg));
- if (htab.is_created ())
- p = htab.find (&data);
+ if (*htab)
+ p = (*htab)->find (&data);
if (p)
vf = p->vf;
switch (ifn)
struct note_simd_array_uses_struct
{
- hash_table <simd_array_to_simduid> *htab;
+ hash_table<simd_array_to_simduid> **htab;
unsigned int simduid;
};
&& DECL_CONTEXT (*tp) == current_function_decl)
{
simd_array_to_simduid data;
- if (!ns->htab->is_created ())
- ns->htab->create (15);
+ if (!*ns->htab)
+ *ns->htab = new hash_table<simd_array_to_simduid> (15);
data.decl = *tp;
data.simduid = ns->simduid;
- simd_array_to_simduid **slot = ns->htab->find_slot (&data, INSERT);
+ simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
if (*slot == NULL)
{
simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
simduid. */
static void
-note_simd_array_uses (hash_table <simd_array_to_simduid> *htab)
+note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
{
basic_block bb;
gimple_stmt_iterator gsi;
unsigned int num_vectorized_loops = 0;
unsigned int vect_loops_num;
struct loop *loop;
- hash_table <simduid_to_vf> simduid_to_vf_htab;
- hash_table <simd_array_to_simduid> simd_array_to_simduid_htab;
+ hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
+ hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
bool any_ifcvt_loops = false;
unsigned ret = 0;
if (vect_loops_num <= 1)
{
if (cfun->has_simduid_loops)
- adjust_simduid_builtins (simduid_to_vf_htab);
+ adjust_simduid_builtins (&simduid_to_vf_htab);
return 0;
}
if (loop->simduid)
{
simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
- if (!simduid_to_vf_htab.is_created ())
- simduid_to_vf_htab.create (15);
+ if (!simduid_to_vf_htab)
+ simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
- *simduid_to_vf_htab.find_slot (simduid_to_vf_data, INSERT)
+ *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
= simduid_to_vf_data;
}
/* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE} builtins. */
if (cfun->has_simduid_loops)
- adjust_simduid_builtins (simduid_to_vf_htab);
+ adjust_simduid_builtins (&simduid_to_vf_htab);
/* Shrink any "omp array simd" temporary arrays to the
actual vectorization factors. */
- if (simd_array_to_simduid_htab.is_created ())
+ if (simd_array_to_simduid_htab)
{
- for (hash_table <simd_array_to_simduid>::iterator iter
- = simd_array_to_simduid_htab.begin ();
- iter != simd_array_to_simduid_htab.end (); ++iter)
+ for (hash_table<simd_array_to_simduid>::iterator iter
+ = simd_array_to_simduid_htab->begin ();
+ iter != simd_array_to_simduid_htab->end (); ++iter)
if ((*iter).simduid != -1U)
{
tree decl = (*iter).decl;
int vf = 1;
- if (simduid_to_vf_htab.is_created ())
+ if (simduid_to_vf_htab)
{
simduid_to_vf *p = NULL, data;
data.simduid = (*iter).simduid;
- p = simduid_to_vf_htab.find (&data);
+ p = simduid_to_vf_htab->find (&data);
if (p)
vf = p->vf;
}
relayout_decl (decl);
}
- simd_array_to_simduid_htab.dispose ();
+ delete simd_array_to_simduid_htab;
}
- if (simduid_to_vf_htab.is_created ())
- simduid_to_vf_htab.dispose ();
+ delete simduid_to_vf_htab;
+ simduid_to_vf_htab = NULL;
if (num_vectorized_loops > 0)
{
vec<gimple> reduction_chains;
/* Hash table used to choose the best peeling option. */
- hash_table <peel_info_hasher> peeling_htab;
+ hash_table<peel_info_hasher> *peeling_htab;
/* Cost data used by the target cost model. */
void *target_cost_data;
dead_debug_global_init (struct dead_debug_global *debug, bitmap used)
{
debug->used = used;
+ debug->htab = NULL;
if (used)
bitmap_clear (used);
}
dead_debug_global_entry temp_entry;
temp_entry.reg = reg;
- dead_debug_global_entry *entry = global->htab.find (&temp_entry);
+ dead_debug_global_entry *entry = global->htab->find (&temp_entry);
gcc_checking_assert (entry && entry->reg == temp_entry.reg);
return entry;
temp_entry.reg = reg;
temp_entry.dtemp = dtemp;
- if (!global->htab.is_created ())
- global->htab.create (31);
+ if (!global->htab)
+ global->htab = new hash_table<dead_debug_hash_descr> (31);
- dead_debug_global_entry **slot = global->htab.find_slot (&temp_entry, INSERT);
+ dead_debug_global_entry **slot = global->htab->find_slot (&temp_entry,
+ INSERT);
gcc_checking_assert (!*slot);
*slot = XNEW (dead_debug_global_entry);
**slot = temp_entry;
if (global->used != used)
BITMAP_FREE (global->used);
- if (global->htab.is_created ())
- global->htab.dispose ();
+ delete global->htab;
+ global->htab = NULL;
}
/* Add USE to DEBUG, or substitute it right away if it's a pseudo in
struct dead_debug_global
{
/* This hash table that maps pseudos to debug temps. */
- hash_table <dead_debug_hash_descr> htab;
+ hash_table<dead_debug_hash_descr> *htab;
/* For each entry in htab, the bit corresponding to its REGNO will
be set. */
bitmap used;
variable_htab_free (var);
}
-typedef hash_table <variable_hasher> variable_table_type;
+typedef hash_table<variable_hasher> variable_table_type;
typedef variable_table_type::iterator variable_iterator_type;
/* Structure for passing some other parameters to function
enum emit_note_where where;
/* The variables and values active at this point. */
- variable_table_type vars;
+ variable_table_type *vars;
} emit_note_data;
/* Structure holding a refcounted hash table. If refcount > 1,
int refcount;
/* Actual hash table. */
- variable_table_type htab;
+ variable_table_type *htab;
} *shared_hash;
/* Structure holding the IN or OUT set for a basic block. */
static alloc_pool loc_exp_dep_pool;
/* Changed variables, notes will be emitted for them. */
-static variable_table_type changed_variables;
+static variable_table_type *changed_variables;
/* Shall notes be emitted? */
static bool emit_notes;
/* Values whose dynamic location lists have gone empty, but whose
cselib location lists are still usable. Use this to hold the
current location, the backlinks, etc, during emit_notes. */
-static variable_table_type dropped_values;
+static variable_table_type *dropped_values;
/* Empty shared hashtable. */
static shared_hash empty_shared_hash;
static variable_def **unshare_variable (dataflow_set *set, variable_def **slot,
variable var, enum var_init_status);
-static void vars_copy (variable_table_type, variable_table_type);
+static void vars_copy (variable_table_type *, variable_table_type *);
static tree var_debug_decl (tree);
static void var_reg_set (dataflow_set *, rtx, enum var_init_status, rtx);
static void var_reg_delete_and_set (dataflow_set *, rtx, bool,
static void dataflow_set_copy (dataflow_set *, dataflow_set *);
static int variable_union_info_cmp_pos (const void *, const void *);
static void dataflow_set_union (dataflow_set *, dataflow_set *);
-static location_chain find_loc_in_1pdv (rtx, variable, variable_table_type);
+static location_chain find_loc_in_1pdv (rtx, variable, variable_table_type *);
static bool canon_value_cmp (rtx, rtx);
static int loc_cmp (rtx, rtx);
static bool variable_part_different_p (variable_part *, variable_part *);
static void dump_attrs_list (attrs);
static void dump_var (variable);
-static void dump_vars (variable_table_type);
+static void dump_vars (variable_table_type *);
static void dump_dataflow_set (dataflow_set *);
static void dump_dataflow_sets (void);
/* Return the hash table for VARS. */
-static inline variable_table_type
+static inline variable_table_type *
shared_hash_htab (shared_hash vars)
{
return vars->htab;
shared_hash new_vars = (shared_hash) pool_alloc (shared_hash_pool);
gcc_assert (vars->refcount > 1);
new_vars->refcount = 1;
- new_vars->htab.create (vars->htab.elements () + 3);
+ new_vars->htab = new variable_table_type (vars->htab->elements () + 3);
vars_copy (new_vars->htab, vars->htab);
vars->refcount--;
return new_vars;
gcc_checking_assert (vars->refcount > 0);
if (--vars->refcount == 0)
{
- vars->htab.dispose ();
+ delete vars->htab;
pool_free (shared_hash_pool, vars);
}
}
{
if (shared_hash_shared (*pvars))
*pvars = shared_hash_unshare (*pvars);
- return shared_hash_htab (*pvars).find_slot_with_hash (dv, dvhash, ins);
+ return shared_hash_htab (*pvars)->find_slot_with_hash (dv, dvhash, ins);
}
static inline variable_def **
static inline variable_def **
shared_hash_find_slot_1 (shared_hash vars, decl_or_value dv, hashval_t dvhash)
{
- return shared_hash_htab (vars).find_slot_with_hash (dv, dvhash,
- shared_hash_shared (vars)
- ? NO_INSERT : INSERT);
+ return shared_hash_htab (vars)->find_slot_with_hash (dv, dvhash,
+ shared_hash_shared (vars)
+ ? NO_INSERT : INSERT);
}
static inline variable_def **
shared_hash_find_slot_noinsert_1 (shared_hash vars, decl_or_value dv,
hashval_t dvhash)
{
- return shared_hash_htab (vars).find_slot_with_hash (dv, dvhash, NO_INSERT);
+ return shared_hash_htab (vars)->find_slot_with_hash (dv, dvhash, NO_INSERT);
}
static inline variable_def **
static inline variable
shared_hash_find_1 (shared_hash vars, decl_or_value dv, hashval_t dvhash)
{
- return shared_hash_htab (vars).find_with_hash (dv, dvhash);
+ return shared_hash_htab (vars)->find_with_hash (dv, dvhash);
}
static inline variable
if (var->in_changed_variables)
{
variable_def **cslot
- = changed_variables.find_slot_with_hash (var->dv,
- dv_htab_hash (var->dv), NO_INSERT);
+ = changed_variables->find_slot_with_hash (var->dv,
+ dv_htab_hash (var->dv),
+ NO_INSERT);
gcc_assert (*cslot == (void *) var);
var->in_changed_variables = false;
variable_htab_free (var);
/* Copy all variables from hash table SRC to hash table DST. */
static void
-vars_copy (variable_table_type dst, variable_table_type src)
+vars_copy (variable_table_type *dst, variable_table_type *src)
{
variable_iterator_type hi;
variable var;
- FOR_EACH_HASH_TABLE_ELEMENT (src, var, variable, hi)
+ FOR_EACH_HASH_TABLE_ELEMENT (*src, var, variable, hi)
{
variable_def **dstp;
var->refcount++;
- dstp = dst.find_slot_with_hash (var->dv, dv_htab_hash (var->dv), INSERT);
+ dstp = dst->find_slot_with_hash (var->dv, dv_htab_hash (var->dv),
+ INSERT);
*dstp = var;
}
}
set->traversed_vars = set->vars;
shared_hash_htab (set->vars)
- .traverse <overlapping_mems*, drop_overlapping_mem_locs> (&coms);
+ ->traverse <overlapping_mems*, drop_overlapping_mem_locs> (&coms);
set->traversed_vars = NULL;
}
variable_iterator_type hi;
variable var;
- FOR_EACH_HASH_TABLE_ELEMENT (shared_hash_htab (src->vars),
+ FOR_EACH_HASH_TABLE_ELEMENT (*shared_hash_htab (src->vars),
var, variable, hi)
variable_union (var, dst);
}
be in star-canonical form. */
static location_chain
-find_loc_in_1pdv (rtx loc, variable var, variable_table_type vars)
+find_loc_in_1pdv (rtx loc, variable var, variable_table_type *vars)
{
location_chain node;
enum rtx_code loc_code;
gcc_checking_assert (!node->next);
dv = dv_from_value (node->loc);
- rvar = vars.find_with_hash (dv, dv_htab_hash (dv));
+ rvar = vars->find_with_hash (dv, dv_htab_hash (dv));
return find_loc_in_1pdv (loc, rvar, vars);
}
variable_iterator_type hi;
variable var;
- src1_elems = shared_hash_htab (src1->vars).elements ();
- src2_elems = shared_hash_htab (src2->vars).elements ();
+ src1_elems = shared_hash_htab (src1->vars)->elements ();
+ src2_elems = shared_hash_htab (src2->vars)->elements ();
dataflow_set_init (dst);
dst->stack_adjust = cur.stack_adjust;
shared_hash_destroy (dst->vars);
dst->vars = (shared_hash) pool_alloc (shared_hash_pool);
dst->vars->refcount = 1;
- dst->vars->htab.create (MAX (src1_elems, src2_elems));
+ dst->vars->htab = new variable_table_type (MAX (src1_elems, src2_elems));
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
attrs_list_mpdv_union (&dst->regs[i], src1->regs[i], src2->regs[i]);
dsm.cur = src1;
dsm.src_onepart_cnt = 0;
- FOR_EACH_HASH_TABLE_ELEMENT (shared_hash_htab (dsm.src->vars),
+ FOR_EACH_HASH_TABLE_ELEMENT (*shared_hash_htab (dsm.src->vars),
var, variable, hi)
variable_merge_over_src (var, &dsm);
- FOR_EACH_HASH_TABLE_ELEMENT (shared_hash_htab (dsm.cur->vars),
+ FOR_EACH_HASH_TABLE_ELEMENT (*shared_hash_htab (dsm.cur->vars),
var, variable, hi)
variable_merge_over_cur (var, &dsm);
dfpm.permp = permp;
shared_hash_htab (set->vars)
- .traverse <dfset_post_merge*, variable_post_merge_new_vals> (&dfpm);
+ ->traverse <dfset_post_merge*, variable_post_merge_new_vals> (&dfpm);
if (*permp)
shared_hash_htab ((*permp)->vars)
- .traverse <dfset_post_merge*, variable_post_merge_perm_vals> (&dfpm);
+ ->traverse <dfset_post_merge*, variable_post_merge_perm_vals> (&dfpm);
shared_hash_htab (set->vars)
- .traverse <dataflow_set *, canonicalize_values_star> (set);
+ ->traverse <dataflow_set *, canonicalize_values_star> (set);
shared_hash_htab (set->vars)
- .traverse <dataflow_set *, canonicalize_vars_star> (set);
+ ->traverse <dataflow_set *, canonicalize_vars_star> (set);
}
/* Return a node whose loc is a MEM that refers to EXPR in the
any values recursively mentioned in the location lists. */
static location_chain
-find_mem_expr_in_1pdv (tree expr, rtx val, variable_table_type vars)
+find_mem_expr_in_1pdv (tree expr, rtx val, variable_table_type *vars)
{
location_chain node;
decl_or_value dv;
&& !VALUE_RECURSED_INTO (val));
dv = dv_from_value (val);
- var = vars.find_with_hash (dv, dv_htab_hash (dv));
+ var = vars->find_with_hash (dv, dv_htab_hash (dv));
if (!var)
return NULL;
{
set->traversed_vars = set->vars;
shared_hash_htab (set->vars)
- .traverse <dataflow_set *, dataflow_set_preserve_mem_locs> (set);
+ ->traverse <dataflow_set *, dataflow_set_preserve_mem_locs> (set);
set->traversed_vars = set->vars;
shared_hash_htab (set->vars)
- .traverse <dataflow_set *, dataflow_set_remove_mem_locs> (set);
+ ->traverse <dataflow_set *, dataflow_set_remove_mem_locs> (set);
set->traversed_vars = NULL;
}
}
if (old_set->vars == new_set->vars)
return false;
- if (shared_hash_htab (old_set->vars).elements ()
- != shared_hash_htab (new_set->vars).elements ())
+ if (shared_hash_htab (old_set->vars)->elements ()
+ != shared_hash_htab (new_set->vars)->elements ())
return true;
- FOR_EACH_HASH_TABLE_ELEMENT (shared_hash_htab (old_set->vars),
+ FOR_EACH_HASH_TABLE_ELEMENT (*shared_hash_htab (old_set->vars),
var1, variable, hi)
{
- variable_table_type htab = shared_hash_htab (new_set->vars);
- variable var2 = htab.find_with_hash (var1->dv, dv_htab_hash (var1->dv));
+ variable_table_type *htab = shared_hash_htab (new_set->vars);
+ variable var2 = htab->find_with_hash (var1->dv, dv_htab_hash (var1->dv));
if (!var2)
{
if (dump_file && (dump_flags & TDF_DETAILS))
dataflow_set_equiv_regs (out);
shared_hash_htab (out->vars)
- .traverse <dataflow_set *, canonicalize_values_mark> (out);
+ ->traverse <dataflow_set *, canonicalize_values_mark> (out);
shared_hash_htab (out->vars)
- .traverse <dataflow_set *, canonicalize_values_star> (out);
+ ->traverse <dataflow_set *, canonicalize_values_star> (out);
#if ENABLE_CHECKING
shared_hash_htab (out->vars)
- .traverse <dataflow_set *, canonicalize_loc_order_check> (out);
+ ->traverse <dataflow_set *, canonicalize_loc_order_check> (out);
#endif
}
changed = dataflow_set_different (&old_out, out);
if (VTI (bb)->in.vars)
{
htabsz
- -= shared_hash_htab (VTI (bb)->in.vars).size ()
- + shared_hash_htab (VTI (bb)->out.vars).size ();
- oldinsz = shared_hash_htab (VTI (bb)->in.vars).elements ();
- oldoutsz = shared_hash_htab (VTI (bb)->out.vars).elements ();
+ -= shared_hash_htab (VTI (bb)->in.vars)->size ()
+ + shared_hash_htab (VTI (bb)->out.vars)->size ();
+ oldinsz = shared_hash_htab (VTI (bb)->in.vars)->elements ();
+ oldoutsz
+ = shared_hash_htab (VTI (bb)->out.vars)->elements ();
}
else
oldinsz = oldoutsz = 0;
/* Merge and merge_adjust should keep entries in
canonical order. */
shared_hash_htab (in->vars)
- .traverse <dataflow_set *,
- canonicalize_loc_order_check> (in);
+ ->traverse <dataflow_set *,
+ canonicalize_loc_order_check> (in);
#endif
if (dst_can_be_shared)
{
}
changed = compute_bb_dataflow (bb);
- htabsz += shared_hash_htab (VTI (bb)->in.vars).size ()
- + shared_hash_htab (VTI (bb)->out.vars).size ();
+ htabsz += shared_hash_htab (VTI (bb)->in.vars)->size ()
+ + shared_hash_htab (VTI (bb)->out.vars)->size ();
if (htabmax && htabsz > htabmax)
{
fprintf (dump_file,
"BB %i: in %i (was %i), out %i (was %i), rem %i + %i, tsz %i\n",
bb->index,
- (int)shared_hash_htab (VTI (bb)->in.vars).size (),
+ (int)shared_hash_htab (VTI (bb)->in.vars)->size (),
oldinsz,
- (int)shared_hash_htab (VTI (bb)->out.vars).size (),
+ (int)shared_hash_htab (VTI (bb)->out.vars)->size (),
oldoutsz,
(int)worklist->nodes, (int)pending->nodes, htabsz);
/* Print the information about variables from hash table VARS to dump file. */
static void
-dump_vars (variable_table_type vars)
+dump_vars (variable_table_type *vars)
{
- if (vars.elements () > 0)
+ if (vars->elements () > 0)
{
fprintf (dump_file, "Variables:\n");
- vars.traverse <void *, dump_var_tracking_slot> (NULL);
+ vars->traverse <void *, dump_var_tracking_slot> (NULL);
}
}
variable empty_var;
onepart_enum_t onepart;
- slot = dropped_values.find_slot_with_hash (dv, dv_htab_hash (dv), insert);
+ slot = dropped_values->find_slot_with_hash (dv, dv_htab_hash (dv), insert);
if (!slot)
return NULL;
/* Remember this decl or VALUE has been added to changed_variables. */
set_dv_changed (var->dv, true);
- slot = changed_variables.find_slot_with_hash (var->dv, hash, INSERT);
+ slot = changed_variables->find_slot_with_hash (var->dv, hash, INSERT);
if (*slot)
{
if (onepart == ONEPART_VALUE || onepart == ONEPART_DEXPR)
{
- dslot = dropped_values.find_slot_with_hash (var->dv,
- dv_htab_hash (var->dv),
- INSERT);
+ dslot = dropped_values->find_slot_with_hash (var->dv,
+ dv_htab_hash (var->dv),
+ INSERT);
empty_var = *dslot;
if (empty_var)
if (shared_hash_shared (set->vars))
slot = shared_hash_find_slot_unshare (&set->vars, var->dv,
NO_INSERT);
- shared_hash_htab (set->vars).clear_slot (slot);
+ shared_hash_htab (set->vars)->clear_slot (slot);
}
}
}
struct expand_loc_callback_data
{
/* The variables and values active at this point. */
- variable_table_type vars;
+ variable_table_type *vars;
/* Stack of values and debug_exprs under expansion, and their
children. */
back-links in VARS. */
static void
-loc_exp_insert_dep (variable var, rtx x, variable_table_type vars)
+loc_exp_insert_dep (variable var, rtx x, variable_table_type *vars)
{
decl_or_value dv;
variable xvar;
/* ??? Build a vector of variables parallel to EXPANDING, to avoid
an additional look up? */
- xvar = vars.find_with_hash (dv, dv_htab_hash (dv));
+ xvar = vars->find_with_hash (dv, dv_htab_hash (dv));
if (!xvar)
{
static bool
loc_exp_dep_set (variable var, rtx result, rtx *value, int count,
- variable_table_type vars)
+ variable_table_type *vars)
{
bool pending_recursion = false;
attempt to compute a current location. */
static void
-notify_dependents_of_resolved_value (variable ivar, variable_table_type vars)
+notify_dependents_of_resolved_value (variable ivar, variable_table_type *vars)
{
loc_exp_dep *led, *next;
continue;
}
- var = vars.find_with_hash (dv, dv_htab_hash (dv));
+ var = vars->find_with_hash (dv, dv_htab_hash (dv));
if (!var)
var = variable_from_dropped (dv, NO_INSERT);
return NULL;
}
- var = elcd->vars.find_with_hash (dv, dv_htab_hash (dv));
+ var = elcd->vars->find_with_hash (dv, dv_htab_hash (dv));
if (!var)
{
equivalences in VARS, updating their CUR_LOCs in the process. */
static rtx
-vt_expand_loc (rtx loc, variable_table_type vars)
+vt_expand_loc (rtx loc, variable_table_type *vars)
{
struct expand_loc_callback_data data;
rtx result;
in VARS, updating their CUR_LOCs in the process. */
static rtx
-vt_expand_1pvar (variable var, variable_table_type vars)
+vt_expand_1pvar (variable var, variable_table_type *vars)
{
struct expand_loc_callback_data data;
rtx loc;
variable var = *varp;
rtx insn = data->insn;
enum emit_note_where where = data->where;
- variable_table_type vars = data->vars;
+ variable_table_type *vars = data->vars;
rtx note, note_vl;
int i, j, n_var_parts;
bool complete;
set_dv_changed (var->dv, false);
gcc_assert (var->in_changed_variables);
var->in_changed_variables = false;
- changed_variables.clear_slot (varp);
+ changed_variables->clear_slot (varp);
/* Continue traversing the hash table. */
return 1;
variable_def **slot;
variable var;
- slot = changed_variables.find_slot_with_hash (dv, dv_htab_hash (dv),
+ slot = changed_variables->find_slot_with_hash (dv, dv_htab_hash (dv),
NO_INSERT);
var = *slot;
var->in_changed_variables = false;
- changed_variables.clear_slot (slot);
+ changed_variables->clear_slot (slot);
}
/* If VAL (a value or debug_expr) has backlinks to variables actively
have dependencies of their own to notify. */
static void
-notify_dependents_of_changed_value (rtx val, variable_table_type htab,
+notify_dependents_of_changed_value (rtx val, variable_table_type *htab,
vec<rtx, va_heap> *changed_values_stack)
{
variable_def **slot;
loc_exp_dep *led;
decl_or_value dv = dv_from_rtx (val);
- slot = changed_variables.find_slot_with_hash (dv, dv_htab_hash (dv),
+ slot = changed_variables->find_slot_with_hash (dv, dv_htab_hash (dv),
NO_INSERT);
if (!slot)
- slot = htab.find_slot_with_hash (dv, dv_htab_hash (dv), NO_INSERT);
+ slot = htab->find_slot_with_hash (dv, dv_htab_hash (dv), NO_INSERT);
if (!slot)
- slot = dropped_values.find_slot_with_hash (dv, dv_htab_hash (dv),
- NO_INSERT);
+ slot = dropped_values->find_slot_with_hash (dv, dv_htab_hash (dv),
+ NO_INSERT);
var = *slot;
while ((led = VAR_LOC_DEP_LST (var)))
break;
case ONEPART_VDECL:
- ivar = htab.find_with_hash (ldv, dv_htab_hash (ldv));
+ ivar = htab->find_with_hash (ldv, dv_htab_hash (ldv));
gcc_checking_assert (!VAR_LOC_DEP_LST (ivar));
variable_was_changed (ivar, NULL);
break;
case NOT_ONEPART:
pool_free (loc_exp_dep_pool, led);
- ivar = htab.find_with_hash (ldv, dv_htab_hash (ldv));
+ ivar = htab->find_with_hash (ldv, dv_htab_hash (ldv));
if (ivar)
{
int i = ivar->n_var_parts;
CHANGED_VARIABLES. */
static void
-process_changed_values (variable_table_type htab)
+process_changed_values (variable_table_type *htab)
{
int i, n;
rtx val;
/* Move values from changed_variables to changed_values_stack. */
changed_variables
- .traverse <vec<rtx, va_heap>*, var_track_values_to_stack>
+ ->traverse <vec<rtx, va_heap>*, var_track_values_to_stack>
(&changed_values_stack);
/* Back-propagate change notifications in values while popping
shared_hash vars)
{
emit_note_data data;
- variable_table_type htab = shared_hash_htab (vars);
+ variable_table_type *htab = shared_hash_htab (vars);
- if (!changed_variables.elements ())
+ if (!changed_variables->elements ())
return;
if (MAY_HAVE_DEBUG_INSNS)
data.vars = htab;
changed_variables
- .traverse <emit_note_data*, emit_note_insn_var_location> (&data);
+ ->traverse <emit_note_data*, emit_note_insn_var_location> (&data);
}
/* Add variable *SLOT to the chain CHANGED_VARIABLES if it differs from the
same variable in hash table DATA or is not there at all. */
int
-emit_notes_for_differences_1 (variable_def **slot, variable_table_type new_vars)
+emit_notes_for_differences_1 (variable_def **slot, variable_table_type *new_vars)
{
variable old_var, new_var;
old_var = *slot;
- new_var = new_vars.find_with_hash (old_var->dv, dv_htab_hash (old_var->dv));
+ new_var = new_vars->find_with_hash (old_var->dv, dv_htab_hash (old_var->dv));
if (!new_var)
{
table DATA. */
int
-emit_notes_for_differences_2 (variable_def **slot, variable_table_type old_vars)
+emit_notes_for_differences_2 (variable_def **slot, variable_table_type *old_vars)
{
variable old_var, new_var;
new_var = *slot;
- old_var = old_vars.find_with_hash (new_var->dv, dv_htab_hash (new_var->dv));
+ old_var = old_vars->find_with_hash (new_var->dv, dv_htab_hash (new_var->dv));
if (!old_var)
{
int i;
dataflow_set *new_set)
{
shared_hash_htab (old_set->vars)
- .traverse <variable_table_type, emit_notes_for_differences_1>
+ ->traverse <variable_table_type *, emit_notes_for_differences_1>
(shared_hash_htab (new_set->vars));
shared_hash_htab (new_set->vars)
- .traverse <variable_table_type, emit_notes_for_differences_2>
+ ->traverse <variable_table_type *, emit_notes_for_differences_2>
(shared_hash_htab (old_set->vars));
emit_notes_for_changes (insn, EMIT_NOTE_BEFORE_INSN, new_set->vars);
}
basic_block bb;
dataflow_set cur;
- gcc_assert (!changed_variables.elements ());
+ gcc_assert (!changed_variables->elements ());
/* Free memory occupied by the out hash tables, as they aren't used
anymore. */
if (MAY_HAVE_DEBUG_INSNS)
{
- dropped_values.create (cselib_get_next_uid () * 2);
+ dropped_values = new variable_table_type (cselib_get_next_uid () * 2);
loc_exp_dep_pool = create_alloc_pool ("loc_exp_dep pool",
sizeof (loc_exp_dep), 64);
}
}
#ifdef ENABLE_CHECKING
shared_hash_htab (cur.vars)
- .traverse <variable_table_type, emit_notes_for_differences_1>
+ ->traverse <variable_table_type *, emit_notes_for_differences_1>
(shared_hash_htab (empty_shared_hash));
#endif
dataflow_set_destroy (&cur);
if (MAY_HAVE_DEBUG_INSNS)
- dropped_values.dispose ();
+ delete dropped_values;
+ dropped_values = NULL;
emit_notes = false;
}
sizeof (struct shared_hash_def), 256);
empty_shared_hash = (shared_hash) pool_alloc (shared_hash_pool);
empty_shared_hash->refcount = 1;
- empty_shared_hash->htab.create (1);
- changed_variables.create (10);
+ empty_shared_hash->htab = new variable_table_type (1);
+ changed_variables = new variable_table_type (10);
/* Init the IN and OUT sets. */
FOR_ALL_BB_FN (bb, cfun)
}
}
free_aux_for_blocks ();
- empty_shared_hash->htab.dispose ();
- changed_variables.dispose ();
+ delete empty_shared_hash->htab;
+ empty_shared_hash->htab = NULL;
+ delete changed_variables;
+ changed_variables = NULL;
free_alloc_pool (attrs_pool);
free_alloc_pool (var_pool);
free_alloc_pool (loc_chain_pool);
struct vtable_registration key;
struct vtable_registration **slot;
- gcc_assert (node && node->registered.is_created ());
+ gcc_assert (node && node->registered);
key.vtable_decl = vtable_decl;
- slot = (struct vtable_registration **) node->registered.find_slot (&key,
- NO_INSERT);
+ slot = node->registered->find_slot (&key, NO_INSERT);
if (slot && (*slot))
{
struct vtable_registration **slot;
bool inserted_something = false;
- if (!node || !node->registered.is_created ())
+ if (!node || !node->registered)
return false;
key.vtable_decl = vtable_decl;
- slot = (struct vtable_registration **) node->registered.find_slot (&key,
- INSERT);
+ slot = node->registered->find_slot (&key, INSERT);
if (! *slot)
{
to find the nodes for various tasks (see comments in vtable-verify.h
for more details. */
-typedef hash_table <vtbl_map_hasher> vtbl_map_table_type;
+typedef hash_table<vtbl_map_hasher> vtbl_map_table_type;
typedef vtbl_map_table_type::iterator vtbl_map_iterator_type;
/* Vtable map variable nodes stored in a hash table. */
-static vtbl_map_table_type vtbl_map_hash;
+static vtbl_map_table_type *vtbl_map_hash;
/* Vtable map variable nodes stored in a vector. */
vec<struct vtbl_map_node *> vtbl_map_nodes_vec;
tree class_name;
unsigned int type_quals;
- if (!vtbl_map_hash.is_created ())
+ if (!vtbl_map_hash)
return NULL;
gcc_assert (TREE_CODE (class_type) == RECORD_TYPE);
class_name = DECL_ASSEMBLER_NAME (class_type_decl);
key.class_name = class_name;
- slot = (struct vtbl_map_node **) vtbl_map_hash.find_slot (&key,
- NO_INSERT);
+ slot = (struct vtbl_map_node **) vtbl_map_hash->find_slot (&key, NO_INSERT);
if (!slot)
return NULL;
return *slot;
tree class_type_decl;
unsigned int type_quals;
- if (!vtbl_map_hash.is_created ())
- vtbl_map_hash.create (10);
+ if (!vtbl_map_hash)
+ vtbl_map_hash = new vtbl_map_table_type (10);
/* Find the TYPE_DECL for the class. */
class_type_decl = TYPE_NAME (base_class_type);
gcc_assert (HAS_DECL_ASSEMBLER_NAME_P (class_type_decl));
key.class_name = DECL_ASSEMBLER_NAME (class_type_decl);
- slot = (struct vtbl_map_node **) vtbl_map_hash.find_slot (&key,
- INSERT);
+ slot = (struct vtbl_map_node **) vtbl_map_hash->find_slot (&key, INSERT);
if (*slot)
return *slot;
(node->class_info->parents).create (4);
(node->class_info->children).create (4);
- node->registered.create (16);
+ node->registered = new register_table_type (16);
node->is_used = false;
static inline bool equal (const value_type *, const compare_type *);
};
-typedef hash_table <registration_hasher> register_table_type;
+typedef hash_table<registration_hasher> register_table_type;
typedef register_table_type::iterator registration_iterator_type;
/* This struct is used to represent the class hierarchy information
variable. */
struct vtbl_map_node *next, *prev; /* Pointers for the linked list
structure. */
- register_table_type registered; /* Hashtable of vtable pointers for which
+ register_table_type *registered; /* Hashtable of vtable pointers for which
we have generated a _VLTRegisterPair
call with this vtable map variable. */
bool is_used; /* Boolean indicating if we used this vtable map