return m != NULL;
}
-// Set the range for NAME to R in the global cache.
+// Set the range for NAME to R in the ssa cache.
void
ssa_cache::clear_range (tree name)
{
unsigned v = SSA_NAME_VERSION (name);
if (v >= m_tab.length ())
- m_tab.safe_grow_cleared (num_ssa_names + 1);
+ return;
m_tab[v] = NULL;
}
if (!gimple_range_ssa_p (ssa_name (x)))
continue;
Value_Range r (TREE_TYPE (ssa_name (x)));
- if (get_range (r, ssa_name (x)) && !r.varying_p ())
+ // Invoke dump_range_query which is a private virtual version of
+ // get_range. This avoids performance impacts on general queries,
+ // but allows sharing of the dump routine.
+ if (dump_range_query (r, ssa_name (x)) && !r.varying_p ())
{
if (print_header)
{
fputc ('\n', f);
}
+// Virtual private get_range query for dumping.
+
+bool
+ssa_cache::dump_range_query (vrange &r, tree name) const
+{
+ return get_range (r, name);
+}
+
+// Virtual private get_range query for dumping.
+
+bool
+ssa_lazy_cache::dump_range_query (vrange &r, tree name) const
+{
+ return get_range (r, name);
+}
+
+
+// Set range of NAME to R in a lazy cache. Return FALSE if it did not already
+// have a range.
+
+bool
+ssa_lazy_cache::set_range (tree name, const vrange &r)
+{
+ unsigned v = SSA_NAME_VERSION (name);
+ if (!bitmap_set_bit (active_p, v))
+ {
+ // There is already an entry, simply set it.
+ gcc_checking_assert (v < m_tab.length ());
+ return ssa_cache::set_range (name, r);
+ }
+ if (v >= m_tab.length ())
+ m_tab.safe_grow (num_ssa_names + 1);
+ m_tab[v] = m_range_allocator->clone (r);
+ return false;
+}
+
// --------------------------------------------------------------------------
void clear_range (tree name);
void clear ();
void dump (FILE *f = stderr);
-private:
+protected:
+ virtual bool dump_range_query (vrange &r, tree name) const;
vec<vrange *> m_tab;
vrange_allocator *m_range_allocator;
};
+// This is the same as global cache, except it maintains an active bitmap
+// rather than depending on a zero'd out vector of pointers. This is better
+// for sparsely/lightly used caches.
+// It could be made a fully derived class, but at this point there doesnt seem
+// to be a need to take the performance hit for it.
+
+class ssa_lazy_cache : protected ssa_cache
+{
+public:
+ inline ssa_lazy_cache () { active_p = BITMAP_ALLOC (NULL); }
+ inline ~ssa_lazy_cache () { BITMAP_FREE (active_p); }
+ bool set_range (tree name, const vrange &r);
+ inline bool get_range (vrange &r, tree name) const;
+ inline void clear_range (tree name)
+ { bitmap_clear_bit (active_p, SSA_NAME_VERSION (name)); } ;
+ inline void clear () { bitmap_clear (active_p); }
+ inline void dump (FILE *f = stderr) { ssa_cache::dump (f); }
+protected:
+ virtual bool dump_range_query (vrange &r, tree name) const;
+ bitmap active_p;
+};
+
+// Return TRUE if NAME has a range, and return it in R.
+
+bool
+ssa_lazy_cache::get_range (vrange &r, tree name) const
+{
+ if (!bitmap_bit_p (active_p, SSA_NAME_VERSION (name)))
+ return false;
+ return ssa_cache::get_range (r, name);
+}
+
// This class provides all the caches a global ranger may need, and makes
// them available for gori-computes to query so outgoing edges can be
// properly calculated.
const vec<basic_block> &path,
const bitmap_head *dependencies,
bool resolve)
- : m_cache (new ssa_cache),
- m_has_cache_entry (BITMAP_ALLOC (NULL)),
+ : m_cache (),
m_ranger (ranger),
m_resolve (resolve)
{
}
path_range_query::path_range_query (gimple_ranger &ranger, bool resolve)
- : m_cache (new ssa_cache),
- m_has_cache_entry (BITMAP_ALLOC (NULL)),
+ : m_cache (),
m_ranger (ranger),
m_resolve (resolve)
{
path_range_query::~path_range_query ()
{
delete m_oracle;
- BITMAP_FREE (m_has_cache_entry);
- delete m_cache;
}
// Return TRUE if NAME is an exit dependency for the path.
&& bitmap_bit_p (m_exit_dependencies, SSA_NAME_VERSION (name)));
}
-// Mark cache entry for NAME as unused.
-
-void
-path_range_query::clear_cache (tree name)
-{
- unsigned v = SSA_NAME_VERSION (name);
- bitmap_clear_bit (m_has_cache_entry, v);
-}
-
// If NAME has a cache entry, return it in R, and return TRUE.
inline bool
if (!gimple_range_ssa_p (name))
return get_global_range_query ()->range_of_expr (r, name);
- unsigned v = SSA_NAME_VERSION (name);
- if (bitmap_bit_p (m_has_cache_entry, v))
- return m_cache->get_range (r, name);
-
- return false;
-}
-
-// Set the cache entry for NAME to R.
-
-void
-path_range_query::set_cache (const vrange &r, tree name)
-{
- unsigned v = SSA_NAME_VERSION (name);
- bitmap_set_bit (m_has_cache_entry, v);
- m_cache->set_range (name, r);
+ return m_cache.get_range (r, name);
}
void
fprintf (dump_file, "\n");
}
- m_cache->dump (dump_file);
+ m_cache.dump (dump_file);
}
void
if (m_resolve && defined_outside_path (name))
{
range_on_path_entry (r, name);
- set_cache (r, name);
+ m_cache.set_range (name, r);
return true;
}
r.intersect (glob);
}
- set_cache (r, name);
+ m_cache.set_range (name, r);
return true;
}
m_path = path.copy ();
m_pos = m_path.length () - 1;
m_undefined_path = false;
- bitmap_clear (m_has_cache_entry);
+ m_cache.clear ();
compute_ranges (dependencies);
}
if (m_resolve && m_ranger.range_of_expr (r, name, phi))
return;
- // Try to fold the phi exclusively with global or cached values.
+ // Try to fold the phi exclusively with global values.
// This will get things like PHI <5(99), 6(88)>. We do this by
// calling range_of_expr with no context.
unsigned nargs = gimple_phi_num_args (phi);
for (size_t i = 0; i < nargs; ++i)
{
tree arg = gimple_phi_arg_def (phi, i);
- if (range_of_expr (arg_range, arg, /*stmt=*/NULL))
+ if (m_ranger.range_of_expr (arg_range, arg, /*stmt=*/NULL))
r.union_ (arg_range);
else
{
void
path_range_query::compute_ranges_in_phis (basic_block bb)
{
- auto_bitmap phi_set;
-
// PHIs must be resolved simultaneously on entry to the block
// because any dependencies must be satisfied with values on entry.
// Thus, we calculate all PHIs first, and then update the cache at
Value_Range r (TREE_TYPE (name));
if (range_defined_in_block (r, name, bb))
- {
- unsigned v = SSA_NAME_VERSION (name);
- set_cache (r, name);
- bitmap_set_bit (phi_set, v);
- // Pretend we don't have a cache entry for this name until
- // we're done with all PHIs.
- bitmap_clear_bit (m_has_cache_entry, v);
- }
+ m_cache.set_range (name, r);
}
- bitmap_ior_into (m_has_cache_entry, phi_set);
}
// Return TRUE if relations may be invalidated after crossing edge E.
{
tree name = ssa_name (i);
if (ssa_defined_in_bb (name, bb))
- clear_cache (name);
+ m_cache.clear_range (name);
}
// Solve dependencies defined in this block, starting with the PHIs...
if (gimple_code (SSA_NAME_DEF_STMT (name)) != GIMPLE_PHI
&& range_defined_in_block (r, name, bb))
- set_cache (r, name);
+ m_cache.set_range (name, r);
}
if (at_exit ())
if (get_cache (cached_range, name))
r.intersect (cached_range);
- set_cache (r, name);
+ m_cache.set_range (name, r);
if (DEBUG_SOLVER)
{
fprintf (dump_file, "outgoing_edge_range_p for ");
r.set_varying (TREE_TYPE (name));
if (m_ranger.m_cache.m_exit.maybe_adjust_range (r, name, bb))
- set_cache (r, name);
+ m_cache.set_range (name, r);
}
}