auto_vec <const binding_key *> binding_keys;
for (auto iter : map)
{
- const binding_key *key = iter.first;
- const svalue *bound_sval = iter.second;
+ const binding_key *key = iter.m_key;
+ const svalue *bound_sval = iter.m_sval;
if (const concrete_binding *concrete_key
= key->dyn_cast_concrete_binding ())
{
for (auto cluster_iter : *state.m_region_model->get_store ())
for (auto binding_iter : *cluster_iter.second)
{
- const svalue *svalue = binding_iter.second;
+ const svalue *svalue = binding_iter.m_sval;
if (const region *reg = svalue->maybe_get_region ())
if (svalue->get_type () && !reg->get_type ())
{
concrete_bindings_t conc_bindings;
for (auto iter : cluster)
{
- const binding_key *key = iter.first;
- const svalue *svalue = iter.second;
+ const binding_key *key = iter.m_key;
+ const svalue *svalue = iter.m_sval;
if (auto conc_key = key->dyn_cast_concrete_binding ())
conc_bindings[conc_key->get_bit_range ()] = svalue;
if (const region *reg = svalue->maybe_get_region ())
case SK_COMPOUND:
{
const compound_svalue *compound_sval = (const compound_svalue *)sval;
- binding_map typeless_map;
+ binding_map typeless_map (*mgr.get_store_manager ());
for (auto iter : compound_sval->get_map ())
{
- const binding_key *key = iter.first;
- const svalue *bound_sval = iter.second;
+ const binding_key *key = iter.m_key;
+ const svalue *bound_sval = iter.m_sval;
typeless_map.put (key, strip_types (bound_sval, mgr));
}
return mgr.get_or_create_compound_svalue (NULL_TREE, typeless_map);
= as_a <const compound_svalue *> (summary_sval);
region_model_manager *mgr = get_manager ();
store_manager *store_mgr = mgr->get_store_manager ();
- binding_map caller_map;
+ binding_map caller_map (*store_mgr);
auto_vec <const binding_key *> summary_keys;
for (auto kv : *compound_summary_sval)
- summary_keys.safe_push (kv.first);
+ summary_keys.safe_push (kv.m_key);
summary_keys.qsort (binding_key::cmp_ptrs);
for (auto key : summary_keys)
{
for (auto inner_kv : *inner_compound_sval)
{
// These should already be mapped to the caller.
- const binding_key *inner_key = inner_kv.first;
- const svalue *inner_sval = inner_kv.second;
+ const binding_key *inner_key = inner_kv.m_key;
+ const svalue *inner_sval = inner_kv.m_sval;
gcc_assert (inner_key->concrete_p ());
const concrete_binding *concrete_key
= as_a <const concrete_binding *> (inner_key);
#include "analyzer/common.h"
#include "cfg.h"
-#include "basic-block.h"
-#include "gimple.h"
#include "gimple-pretty-print.h"
#include "gimple-iterator.h"
#include "inlining-iterator.h"
#include <zlib.h>
#include "cfg.h"
-#include "basic-block.h"
#include "gcc-rich-location.h"
-#include "gimple.h"
#include "gimple-iterator.h"
#include "gimple-pretty-print.h"
#include "cgraph.h"
if (const compound_svalue *compound_sval
= sval->dyn_cast_compound_svalue ())
for (auto iter : *compound_sval)
- if (iter.second->get_kind () == SK_UNKNOWN)
+ if (iter.m_sval->get_kind () == SK_UNKNOWN)
return true;
return false;
}
const svalue *ptr_sval = cd.get_arg_svalue (0);
const region *reg
= model->deref_rvalue (ptr_sval, cd.get_arg_tree (0), ctxt);
- model->get_store ()->mark_as_escaped (reg);
+ store_manager *store_mgr = model->get_manager ()->get_store_manager ();
+ model->get_store ()->mark_as_escaped (*store_mgr, reg);
enum memory_space mem_space = reg->get_memory_space ();
switch (mem_space)
{
= sval->dyn_cast_compound_svalue ())
for (auto iter : *compound_sval)
{
- const svalue *inner_sval = iter.second;
+ const svalue *inner_sval = iter.m_sval;
if (inner_sval->can_have_associated_state_p ())
impl_set_state (inner_sval, state, origin, ext_state);
}
for (auto iter : *cluster)
{
- const svalue *sval = iter.second;
+ const svalue *sval = iter.m_sval;
if (!can_purge_p (ext_state, sval))
return false;
}
|| !base_reg->tracked_p ())
continue;
- binding_cluster *cluster = m_store.get_or_create_cluster (base_reg);
+ binding_cluster *cluster
+ = m_store.get_or_create_cluster (*m_mgr->get_store_manager (),
+ base_reg);
cluster->on_asm (stmt, m_mgr->get_store_manager (),
conjured_purge (this, ctxt));
}
if (const compound_svalue *compound_sval
= sval->dyn_cast_compound_svalue ())
{
- for (compound_svalue::iterator_t iter = compound_sval->begin ();
+ for (auto iter = compound_sval->begin ();
iter != compound_sval->end (); ++iter)
{
- const svalue *iter_sval = (*iter).second;
+ const svalue *iter_sval = (*iter).m_sval;
handle_sval (iter_sval);
}
}
if (const compound_svalue *compound_sval
= sval->dyn_cast_compound_svalue ())
{
- for (compound_svalue::iterator_t iter = compound_sval->begin ();
+ for (auto iter = compound_sval->begin ();
iter != compound_sval->end (); ++iter)
{
- const svalue *iter_sval = (*iter).second;
+ const svalue *iter_sval = (*iter).m_sval;
handle_sval (iter_sval);
}
}
iter != m_mutable_base_regs.end (); ++iter)
{
const region *base_reg = *iter;
- m_store->mark_as_escaped (base_reg);
+ store_manager *store_mgr = m_model->get_manager ()->get_store_manager ();
+ m_store->mark_as_escaped (*store_mgr, base_reg);
/* If we have a function that's escaped, potentially add
it to the worklist. */
return;
for (auto iter : *cluster)
{
- const binding_key *key = iter.first;
- const svalue *sval = iter.second;
+ const binding_key *key = iter.m_key;
+ const svalue *sval = iter.m_sval;
if (const concrete_binding *concrete_key
= key->dyn_cast_concrete_binding ())
logger->end_log_line ();
}
- binding_map result;
+ binding_map result (*store_mgr);
while (1)
{
svalue_set maybe_live_values;
m_store.mark_region_as_unknown (m_mgr->get_store_manager(), reg,
uncertainty, &maybe_live_values);
- m_store.on_maybe_live_values (maybe_live_values);
+ m_store.on_maybe_live_values (*m_mgr->get_store_manager (),
+ maybe_live_values);
}
/* Determine what is known about the condition "LHS_SVAL OP RHS_SVAL" within
const svalue *init_ptr_sval
= m_mgr->get_or_create_initial_value (param_reg);
const region *pointee_reg = m_mgr->get_symbolic_region (init_ptr_sval);
- m_store.mark_as_escaped (pointee_reg);
+ store_manager *store_mgr = m_mgr->get_store_manager ();
+ m_store.mark_as_escaped (*store_mgr, pointee_reg);
if (nonnull)
{
const svalue *null_ptr_sval
}
}
-/* Implementation of BindingVisitor.
- Update the bound svalues for regions below REG to use poisoned
- values instead. */
-
-struct bad_pointer_finder
-{
- bad_pointer_finder (const region *reg, enum poison_kind pkind,
- region_model_manager *mgr)
- : m_reg (reg), m_pkind (pkind), m_mgr (mgr), m_count (0)
- {}
-
- void on_binding (const binding_key *, const svalue *&sval)
- {
- if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
- {
- const region *ptr_dst = ptr_sval->get_pointee ();
- /* Poison ptrs to descendents of REG, but not to REG itself,
- otherwise double-free detection doesn't work (since sm-state
- for "free" is stored on the original ptr svalue). */
- if (ptr_dst->descendent_of_p (m_reg)
- && ptr_dst != m_reg)
- {
- sval = m_mgr->get_or_create_poisoned_svalue (m_pkind,
- sval->get_type ());
- ++m_count;
- }
- }
- }
-
- const region *m_reg;
- enum poison_kind m_pkind;
- region_model_manager *const m_mgr;
- int m_count;
-};
-
/* Find any pointers to REG or its descendents; convert them to
- poisoned values of kind PKIND.
- Return the number of pointers that were poisoned. */
+ poisoned values of kind PKIND. */
-int
+void
region_model::poison_any_pointers_to_descendents (const region *reg,
- enum poison_kind pkind)
+ enum poison_kind pkind)
{
- bad_pointer_finder bv (reg, pkind, m_mgr);
- m_store.for_each_binding (bv);
- return bv.m_count;
+ for (const auto &cluster_iter : m_store)
+ {
+ binding_cluster *cluster = cluster_iter.second;
+ for (auto iter = cluster->begin ();
+ iter != cluster->end ();
+ ++iter)
+ {
+ auto bp = *iter;
+ const svalue *sval = bp.m_sval;
+ if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
+ {
+ const region *ptr_dst = ptr_sval->get_pointee ();
+ /* Poison ptrs to descendents of REG, but not to REG itself,
+ otherwise double-free detection doesn't work (since sm-state
+ for "free" is stored on the original ptr svalue). */
+ if (ptr_dst->descendent_of_p (reg)
+ && ptr_dst != reg)
+ {
+ const svalue *new_sval
+ = m_mgr->get_or_create_poisoned_svalue (pkind,
+ sval->get_type ());
+ cluster->get_map ().overwrite (iter, new_sval);
+ }
+ }
+ }
+ }
}
/* Attempt to merge THIS with OTHER_MODEL, writing the result
/* Find keys for uninit svals. */
for (auto iter : *compound_sval)
{
- const svalue *sval = iter.second;
+ const svalue *sval = iter.m_sval;
if (const poisoned_svalue *psval
= sval->dyn_cast_poisoned_svalue ())
if (psval->get_poison_kind () == poison_kind::uninit)
{
- const binding_key *key = iter.first;
+ const binding_key *key = iter.m_key;
const concrete_binding *ckey
= key->dyn_cast_concrete_binding ();
gcc_assert (ckey);
auto_vec<const concrete_binding *> uninit_keys;
for (auto iter : *compound_sval)
{
- const svalue *sval = iter.second;
+ const svalue *sval = iter.m_sval;
if (const poisoned_svalue *psval
= sval->dyn_cast_poisoned_svalue ())
if (psval->get_poison_kind () == poison_kind::uninit)
{
- const binding_key *key = iter.first;
+ const binding_key *key = iter.m_key;
const concrete_binding *ckey
= key->dyn_cast_concrete_binding ();
gcc_assert (ckey);
for (auto iter : *compound_sval)
{
- const svalue *sval = iter.second;
+ const svalue *sval = iter.m_sval;
if (const poisoned_svalue *psval
= sval->dyn_cast_poisoned_svalue ())
if (psval->get_poison_kind () == poison_kind::uninit)
region_model_manager mgr;
region_model model (&mgr);
- model.set_value (c_x, int_17, nullptr);
+ /* Set fields in order y, then x. */
model.set_value (c_y, int_m3, nullptr);
+ model.set_value (c_x, int_17, nullptr);
/* Verify get_offset for "c.x". */
{
ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, nullptr));
ASSERT_EQ (offset.get_bit_offset (), INT_TYPE_SIZE);
}
+
+ /* Check iteration order of binding_cluster (and thus of binding_map). */
+ {
+ std::vector<binding_map::binding_pair> vec;
+ auto cluster
+ = model.get_store ()->get_cluster (model.get_lvalue (c, nullptr));
+ for (auto iter : *cluster)
+ vec.push_back (iter);
+ ASSERT_EQ (vec.size (), 2);
+ /* we should get them back in ascending order in memory (x then y). */
+ /* x */
+ ASSERT_EQ (vec[0].m_key->dyn_cast_concrete_binding ()->get_bit_range (),
+ bit_range (0, INT_TYPE_SIZE));
+ ASSERT_TRUE (tree_int_cst_equal(vec[0].m_sval->maybe_get_constant (),
+ int_17));
+ /* y */
+ ASSERT_EQ (vec[1].m_key->dyn_cast_concrete_binding ()->get_bit_range (),
+ bit_range (INT_TYPE_SIZE, INT_TYPE_SIZE));
+ ASSERT_TRUE (tree_int_cst_equal(vec[1].m_sval->maybe_get_constant (),
+ int_m3));
+ }
}
/* Verify usage of an array element. */
region_model_context *ctxt,
std::unique_ptr<rejected_constraint> *out);
- int poison_any_pointers_to_descendents (const region *reg,
- enum poison_kind pkind);
+ void poison_any_pointers_to_descendents (const region *reg,
+ enum poison_kind pkind);
void on_top_level_param (tree param,
bool nonnull,
else
{
/* Get the value for REG within base_reg_init. */
- binding_cluster c (base_reg);
+ binding_cluster c (*mgr->get_store_manager (), base_reg);
c.bind (mgr->get_store_manager (), base_reg, base_reg_init);
const svalue *sval
= c.get_any_binding (mgr->get_store_manager (), this);
/* Create a binding map, applying ctor to it, using this
decl_region as the base region when building child regions
for offset calculations. */
- binding_map map;
+ binding_map map (*mgr->get_store_manager ());
if (!map.apply_ctor_to_region (this, ctor, mgr))
return mgr->get_or_create_unknown_svalue (get_type ());
if (!tracked_p ())
return nullptr;
- binding_cluster c (this);
+ binding_cluster c (*mgr->get_store_manager (), this);
c.zero_fill_region (mgr->get_store_manager (), this);
return mgr->get_or_create_compound_svalue (TREE_TYPE (m_decl),
c.get_map ());
return sval;
}
+/* class binding_map::const_iterator. */
+
+bool
+binding_map::const_iterator::operator== (const binding_map::const_iterator &other) const
+{
+ if (m_concrete != other.m_concrete)
+ return false;
+ if (m_symbolic != other.m_symbolic)
+ return false;
+ return true;
+}
+
+binding_map::const_iterator &
+binding_map::const_iterator::operator++ ()
+{
+ if (m_concrete != m_map.m_concrete.end ())
+ ++m_concrete;
+ else
+ ++m_symbolic;
+ return *this;
+}
+
+binding_map::binding_pair
+binding_map::const_iterator::operator* ()
+{
+ if (m_concrete != m_map.m_concrete.end ())
+ {
+ const bit_range &bits = m_concrete->first;
+ const svalue *sval = m_concrete->second;
+ return binding_pair (m_map.m_store_mgr.get_concrete_binding (bits), sval);
+ }
+ else
+ {
+ gcc_assert (m_symbolic != m_map.m_symbolic.end ());
+ const region *reg = m_symbolic->m_region;
+ const svalue *sval = m_symbolic->m_sval;
+ return binding_pair (m_map.m_store_mgr.get_symbolic_binding (reg), sval);
+ }
+}
+
+/* class binding_map::iterator. */
+
+bool
+binding_map::iterator::operator== (const binding_map::iterator &other) const
+{
+ if (m_concrete != other.m_concrete)
+ return false;
+ if (m_symbolic != other.m_symbolic)
+ return false;
+ return true;
+}
+
+binding_map::iterator &
+binding_map::iterator::operator++ ()
+{
+ if (m_concrete != m_map.m_concrete.end ())
+ ++m_concrete;
+ else
+ ++m_symbolic;
+ return *this;
+}
+
+binding_map::binding_pair
+binding_map::iterator::operator* ()
+{
+ if (m_concrete != m_map.m_concrete.end ())
+ {
+ const bit_range &bits = m_concrete->first;
+ const svalue *&sval = m_concrete->second;
+ return binding_pair (m_map.m_store_mgr.get_concrete_binding (bits), sval);
+ }
+ else
+ {
+ gcc_assert (m_symbolic != m_map.m_symbolic.end ());
+ const region *reg = m_symbolic->m_region;
+ const svalue *&sval = m_symbolic->m_sval;
+ return binding_pair (m_map.m_store_mgr.get_symbolic_binding (reg), sval);
+ }
+}
+
/* class binding_map. */
+// Construct an empty binding_map.
+
+binding_map::binding_map (store_manager &store_mgr)
+: m_store_mgr (store_mgr),
+ m_concrete (),
+ m_symbolic ()
+{
+}
+
/* binding_map's copy ctor. */
binding_map::binding_map (const binding_map &other)
-: m_map (other.m_map)
+: m_store_mgr (other.m_store_mgr),
+ m_concrete (other.m_concrete),
+ m_symbolic (other.m_symbolic)
{
}
/* binding_map's assignment operator. */
binding_map&
-binding_map::operator=(const binding_map &other)
+binding_map::operator= (const binding_map &other)
{
+ gcc_assert (&m_store_mgr == &other.m_store_mgr);
+
/* For now, assume we only ever copy to an empty cluster. */
- gcc_assert (m_map.elements () == 0);
- for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
- ++iter)
- {
- const binding_key *key = (*iter).first;
- const svalue *sval = (*iter).second;
- m_map.put (key, sval);
- }
+ gcc_assert (m_concrete.size () == 0);
+ gcc_assert (m_symbolic.size () == 0);
+
+ m_concrete = other.m_concrete;
+ m_symbolic = other.m_symbolic;
+
return *this;
}
bool
binding_map::operator== (const binding_map &other) const
{
- if (m_map.elements () != other.m_map.elements ())
+ if (m_concrete != other.m_concrete)
+ return false;
+ if (m_symbolic != other.m_symbolic)
return false;
- for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
- {
- const binding_key *key = (*iter).first;
- const svalue *sval = (*iter).second;
- const svalue **other_slot
- = const_cast <map_t &> (other.m_map).get (key);
- if (other_slot == nullptr)
- return false;
- if (sval != *other_slot)
- return false;
- }
- gcc_checking_assert (hash () == other.hash ());
return true;
}
binding_map::hash () const
{
hashval_t result = 0;
- for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ for (auto iter : *this)
{
/* Use a new hasher for each key to avoid depending on the ordering
of keys when accumulating the result. */
inchash::hash hstate;
- hstate.add_ptr ((*iter).first);
- hstate.add_ptr ((*iter).second);
+ hstate.add_ptr (iter.m_key);
+ hstate.add_ptr (iter.m_sval);
result ^= hstate.end ();
}
return result;
}
+const svalue *
+binding_map::get (const binding_key *key) const
+{
+ if (key->symbolic_p ())
+ {
+ const ana::symbolic_binding &sym_key
+ = *static_cast <const ana::symbolic_binding *> (key);
+ const region *reg = sym_key.get_region ();
+
+ for (auto iter : m_symbolic)
+ {
+ if (iter.m_region == reg)
+ return iter.m_sval;
+ }
+ return nullptr;
+ }
+ else
+ {
+ const concrete_binding &conc_key
+ = *static_cast <const concrete_binding *> (key);
+ const bit_range &bits = conc_key.get_bit_range ();
+
+ concrete_bindings_t::const_iterator iter (m_concrete.find (bits));
+ if (iter != m_concrete.end ())
+ return iter->second;
+ else
+ return nullptr;
+ }
+}
+
+void
+binding_map::put (const binding_key *key, const svalue *sval)
+{
+ if (key->symbolic_p ())
+ {
+ const ana::symbolic_binding &sym_key
+ = *static_cast <const ana::symbolic_binding *> (key);
+ const region *reg = sym_key.get_region ();
+
+ m_symbolic.clear ();
+
+ m_symbolic.push_back ({reg, sval});
+ }
+ else
+ {
+ const concrete_binding &conc_key
+ = *static_cast <const concrete_binding *> (key);
+ const bit_range &bits = conc_key.get_bit_range ();
+
+ concrete_bindings_t::iterator iter (m_concrete.find (bits));
+ if (iter != m_concrete.end ())
+ (*iter).second = sval;
+ else
+ m_concrete.insert ({bits, sval});
+ }
+}
+
+void
+binding_map::overwrite (iterator_t &pos, const svalue *v)
+{
+ gcc_assert (&pos.m_map == this);
+ if (pos.m_symbolic != m_symbolic.end ())
+ (*(pos.m_symbolic)).m_sval = v;
+ else
+ {
+ gcc_assert (pos.m_concrete != m_concrete.end ());
+ (*(pos.m_concrete)).second = v;
+ }
+}
+
+void
+binding_map::remove (const binding_key *key)
+{
+ if (key->symbolic_p ())
+ m_symbolic.clear ();
+ else
+ {
+ const concrete_binding &conc_key
+ = *static_cast <const concrete_binding *> (key);
+ const bit_range &bits = conc_key.get_bit_range ();
+ m_concrete.erase (bits);
+ }
+}
+
+binding_map::const_iterator_t
+binding_map::begin () const
+{
+ return binding_map::const_iterator_t (*this,
+ m_concrete.begin (),
+ m_symbolic.begin ());
+}
+
+binding_map::const_iterator_t
+binding_map::end () const
+{
+ return binding_map::const_iterator_t (*this,
+ m_concrete.end (),
+ m_symbolic.end ());
+}
+
+binding_map::iterator_t
+binding_map::begin ()
+{
+ return binding_map::iterator_t (*this,
+ m_concrete.begin (),
+ m_symbolic.begin ());
+}
+
+binding_map::iterator_t
+binding_map::end ()
+{
+ return binding_map::iterator_t (*this,
+ m_concrete.end (),
+ m_symbolic.end ());
+}
+
+size_t
+binding_map::elements () const
+{
+ return m_concrete.size () + m_symbolic.size ();
+}
+
/* Dump a representation of this binding_map to PP.
SIMPLE controls how values and regions are to be printed.
If MULTILINE, then split the dump over multiple lines and
binding_map::dump_to_pp (pretty_printer *pp, bool simple,
bool multiline) const
{
- auto_vec <const binding_key *> binding_keys;
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end (); ++iter)
- {
- const binding_key *key = (*iter).first;
- binding_keys.safe_push (key);
- }
- binding_keys.qsort (binding_key::cmp_ptrs);
-
- const binding_key *key;
- unsigned i;
- FOR_EACH_VEC_ELT (binding_keys, i, key)
+ bool first = true;
+ for (auto iter : *this)
{
- const svalue *value = *const_cast <map_t &> (m_map).get (key);
+ const binding_key *key = iter.m_key;
+ const svalue *value = iter.m_sval;
if (multiline)
{
pp_string (pp, " key: {");
}
else
{
- if (i > 0)
+ if (first)
+ first = false;
+ else
pp_string (pp, ", ");
pp_string (pp, "binding key: {");
key->dump_to_pp (pp, simple);
{
auto map_obj = std::make_unique<json::object> ();
- auto_vec <const binding_key *> binding_keys;
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end (); ++iter)
- {
- const binding_key *key = (*iter).first;
- binding_keys.safe_push (key);
- }
- binding_keys.qsort (binding_key::cmp_ptrs);
-
- const binding_key *key;
- unsigned i;
- FOR_EACH_VEC_ELT (binding_keys, i, key)
+ for (auto iter : *this)
{
- const svalue *value = *const_cast <map_t &> (m_map).get (key);
- label_text key_desc = key->get_desc ();
+ const svalue *value = iter.m_sval;
+ label_text key_desc = iter.m_key->get_desc ();
map_obj->set (key_desc.get (), value->to_json ());
}
binding_map::add_to_tree_widget (text_art::tree_widget &parent_widget,
const text_art::dump_widget_info &dwi) const
{
- auto_vec <const binding_key *> binding_keys;
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end (); ++iter)
- {
- const binding_key *key = (*iter).first;
- binding_keys.safe_push (key);
- }
- binding_keys.qsort (binding_key::cmp_ptrs);
-
- const binding_key *key;
- unsigned i;
- FOR_EACH_VEC_ELT (binding_keys, i, key)
+ for (auto iter : *this)
{
- const svalue *sval = *const_cast <map_t &> (m_map).get (key);
+ const binding_key *key = iter.m_key;
+ const svalue *sval = iter.m_sval;
add_binding_to_tree_widget (parent_widget, dwi,
key, sval);
}
return count_cmp;
auto_vec <const binding_key *> keys1 (map1.elements ());
- for (map_t::iterator iter = map1.begin ();
- iter != map1.end (); ++iter)
- keys1.quick_push ((*iter).first);
+ for (auto iter : map1)
+ keys1.quick_push (iter.m_key);
keys1.qsort (binding_key::cmp_ptrs);
auto_vec <const binding_key *> keys2 (map2.elements ());
- for (map_t::iterator iter = map2.begin ();
- iter != map2.end (); ++iter)
- keys2.quick_push ((*iter).first);
+ for (auto iter : map2)
+ keys2.quick_push (iter.m_key);
keys2.qsort (binding_key::cmp_ptrs);
for (size_t i = 0; i < keys1.length (); i++)
{
for (auto iter : *this)
{
- const binding_key *iter_key = iter.first;
+ const binding_key *iter_key = iter.m_key;
if (const concrete_binding *ckey
= key->dyn_cast_concrete_binding ())
{
auto_vec<const binding_key *> bindings;
if (always_overlap)
for (auto iter : *this)
- bindings.safe_push (iter.first); /* Add all bindings. */
+ bindings.safe_push (iter.m_key); /* Add all bindings. */
else
/* Just add overlapping bindings. */
get_overlapping_bindings (drop_key, &bindings);
maybe_live_values->add (old_sval);
/* Begin by removing the old binding. */
- m_map.remove (iter_binding);
+ remove (iter_binding);
/* Don't attempt to handle prefixes/suffixes for the
"always_overlap" case; everything's being removed. */
= old_sval->extract_bit_range (NULL_TREE,
rel_prefix,
mgr->get_svalue_manager ());
- m_map.put (prefix_key, prefix_sval);
+ put (prefix_key, prefix_sval);
}
if (iter_bits.get_next_bit_offset ()
= old_sval->extract_bit_range (NULL_TREE,
rel_suffix,
mgr->get_svalue_manager ());
- m_map.put (suffix_key, suffix_sval);
+ put (suffix_key, suffix_sval);
}
}
}
/* class binding_cluster. */
-binding_cluster::binding_cluster (const region *base_region)
-: m_base_region (base_region), m_map (),
+binding_cluster::binding_cluster (store_manager &store_mgr,
+ const region *base_region)
+: m_base_region (base_region), m_map (store_mgr),
m_escaped (false), m_touched (false)
{
}
int num_concrete = 0;
for (auto iter : m_map)
{
- if (iter.first->symbolic_p ())
+ if (iter.m_key->symbolic_p ())
num_symbolic++;
else
num_concrete++;
return;
}
- for (map_t::iterator iter = compound_sval->begin ();
- iter != compound_sval->end (); ++iter)
+ for (auto iter : *compound_sval)
{
- const binding_key *iter_key = (*iter).first;
- const svalue *iter_sval = (*iter).second;
+ const binding_key *iter_key = iter.m_key;
+ const svalue *iter_sval = iter.m_sval;
if (const concrete_binding *concrete_key
= iter_key->dyn_cast_concrete_binding ())
auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
for (auto iter : m_map)
{
- const binding_key *iter_key = iter.first;
+ const binding_key *iter_key = iter.m_key;
if (const symbolic_binding *symbolic_key
= iter_key->dyn_cast_symbolic_binding ())
{
if (reg->involves_p (sval))
to_remove.safe_push (iter_key);
}
- const svalue *iter_sval = iter.second;
+ const svalue *iter_sval = iter.m_sval;
if (iter_sval->involves_p (sval))
to_make_unknown.safe_push (std::make_pair(iter_key,
iter_sval->get_type ()));
perhaps we should have a spatial-organized data structure for
concrete keys, though. */
- binding_map result_map;
- binding_map default_map;
+ binding_map result_map (*mgr);
+ binding_map default_map (*mgr);
/* Set up default values in default_map. */
const svalue *default_sval;
return nullptr;
const concrete_binding *default_key_relative_to_reg
= mgr->get_concrete_binding (0, concrete_default_key->get_size_in_bits ());
+
default_map.put (default_key_relative_to_reg, default_sval);
- for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ for (auto iter : m_map)
{
- const binding_key *key = (*iter).first;
- const svalue *sval = (*iter).second;
+ const binding_key *key = iter.m_key;
+ const svalue *sval = iter.m_sval;
if (const concrete_binding *concrete_key
= key->dyn_cast_concrete_binding ())
/* Merge any bindings from default_map into result_map. */
for (auto iter : default_map)
{
- const binding_key *key = iter.first;
- const svalue *sval = iter.second;
+ const binding_key *key = iter.m_key;
+ const svalue *sval = iter.m_sval;
result_map.put (key, sval);
}
gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
hash_set<const binding_key *> keys;
- for (map_t::iterator iter_a = cluster_a->m_map.begin ();
- iter_a != cluster_a->m_map.end (); ++iter_a)
+ for (auto iter_a : cluster_a->m_map)
{
- const binding_key *key_a = (*iter_a).first;
+ const binding_key *key_a = iter_a.m_key;
keys.add (key_a);
}
- for (map_t::iterator iter_b = cluster_b->m_map.begin ();
- iter_b != cluster_b->m_map.end (); ++iter_b)
+ for (auto iter_b : cluster_b->m_map)
{
- const binding_key *key_b = (*iter_b).first;
+ const binding_key *key_b = iter_b.m_key;
keys.add (key_b);
}
int num_symbolic_keys = 0;
|| (num_concrete_keys > 0 && num_symbolic_keys > 0))
{
out_cluster->m_touched = true;
- out_cluster->m_map.empty ();
+ out_cluster->m_map.clear ();
}
/* We don't handle other kinds of overlaps yet. */
store *out_store,
store_manager *mgr)
{
- for (map_t::iterator iter = other->m_map.begin ();
- iter != other->m_map.end (); ++iter)
+ for (auto iter : *other)
{
- const binding_key *iter_key = (*iter).first;
- const svalue *iter_sval = (*iter).second;
+ const binding_key *iter_key = iter.m_key;
+ const svalue *iter_sval = iter.m_sval;
const svalue *unknown_sval
= mgr->get_svalue_manager ()->get_or_create_unknown_svalue
(iter_sval->get_type ());
if (base_reg->tracked_p ()
&& !base_reg->symbolic_for_unknown_ptr_p ())
{
- binding_cluster *c = out_store->get_or_create_cluster (base_reg);
+ binding_cluster *c
+ = out_store->get_or_create_cluster (*mgr, base_reg);
c->mark_as_escaped ();
}
}
{
if (m_escaped)
{
- m_map.empty ();
+ m_map.clear ();
if (!m_base_region->empty_p ())
{
store_manager *mgr,
const conjured_purge &p)
{
- m_map.empty ();
+ m_map.clear ();
/* Bind it to a new "conjured" value using CALL. */
const svalue *sval
{
sval = simplify_for_binding (sval);
- for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
+ for (auto iter : m_map)
{
- const binding_key *key = (*iter).first;
- const svalue *bound_sval = (*iter).second;
+ const binding_key *key = iter.m_key;
+ const svalue *bound_sval = iter.m_sval;
if (bound_sval == sval)
{
if (const concrete_binding *ckey
{
const region *ptr_dst = ptr_sval->get_pointee ();
const region *ptr_base_reg = ptr_dst->get_base_region ();
- mark_as_escaped (ptr_base_reg);
+ mark_as_escaped (*mgr, ptr_base_reg);
}
if (uncertainty)
uncertainty->on_maybe_bound_sval (rhs_sval);
}
else if (lhs_base_reg->tracked_p ())
{
- lhs_cluster = get_or_create_cluster (lhs_base_reg);
+ lhs_cluster = get_or_create_cluster (*mgr, lhs_base_reg);
lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
}
else
(e.g. marking regions as escaped).
We do this after the iteration to avoid potentially changing
m_cluster_map whilst iterating over it. */
- on_maybe_live_values (maybe_live_values);
+ on_maybe_live_values (*mgr, maybe_live_values);
}
/* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
/* Record all of the values in MAYBE_LIVE_VALUES as being possibly live. */
void
-store::on_maybe_live_values (const svalue_set &maybe_live_values)
+store::on_maybe_live_values (store_manager &mgr,
+ const svalue_set &maybe_live_values)
{
for (auto sval : maybe_live_values)
{
if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
{
const region *base_reg = ptr_sval->get_pointee ()->get_base_region ();
- mark_as_escaped (base_reg);
+ mark_as_escaped (mgr, base_reg);
}
}
}
if (base_reg->symbolic_for_unknown_ptr_p ()
|| !base_reg->tracked_p ())
return;
- binding_cluster *cluster = get_or_create_cluster (base_reg);
+ binding_cluster *cluster = get_or_create_cluster (*mgr, base_reg);
cluster->fill_region (mgr, reg, sval);
}
if (base_reg->symbolic_for_unknown_ptr_p ()
|| !base_reg->tracked_p ())
return;
- binding_cluster *cluster = get_or_create_cluster (base_reg);
+ binding_cluster *cluster = get_or_create_cluster (*mgr, base_reg);
cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty,
maybe_live_values);
}
/* Get the cluster for BASE_REG, creating it if doesn't already exist. */
binding_cluster *
-store::get_or_create_cluster (const region *base_reg)
+store::get_or_create_cluster (store_manager &store_mgr,
+ const region *base_reg)
{
gcc_assert (base_reg);
gcc_assert (base_reg->get_base_region () == base_reg);
if (binding_cluster **slot = m_cluster_map.get (base_reg))
return *slot;
- binding_cluster *cluster = new binding_cluster (base_reg);
+ binding_cluster *cluster = new binding_cluster (store_mgr, base_reg);
m_cluster_map.put (base_reg, cluster);
return cluster;
const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
/* At least one of cluster_a and cluster_b must be non-NULL. */
binding_cluster *out_cluster
- = out_store->get_or_create_cluster (base_reg);
+ = out_store->get_or_create_cluster (*mgr, base_reg);
if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
out_cluster, out_store, mgr, merger))
return false;
isn't reachable from args of those calls. */
void
-store::mark_as_escaped (const region *base_reg)
+store::mark_as_escaped (store_manager &mgr, const region *base_reg)
{
gcc_assert (base_reg);
gcc_assert (base_reg->get_base_region () == base_reg);
|| !base_reg->tracked_p ())
return;
- binding_cluster *cluster = get_or_create_cluster (base_reg);
+ binding_cluster *cluster = get_or_create_cluster (mgr, base_reg);
cluster->mark_as_escaped ();
}
binding_cluster *cluster = (*iter).second;
for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
bind_iter != cluster->m_map.end (); ++bind_iter)
- (*bind_iter).second->accept (&s);
+ (*bind_iter).m_sval->accept (&s);
}
/* Locate heap-allocated regions that have empty bindings that weren't
for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
bind_iter != cluster->m_map.end (); ++bind_iter)
{
- const binding_key *key = (*bind_iter).first;
- const svalue *sval = (*bind_iter).second;
+ const binding_key *key = (*bind_iter).m_key;
+ const svalue *sval = (*bind_iter).m_sval;
if (sval->get_kind () == SK_WIDENING)
{
binding_cluster *this_cluster
- = get_or_create_cluster (base_reg);
+ = get_or_create_cluster (*mgr->get_store_manager (),
+ base_reg);
const svalue *unknown
= mgr->get_or_create_unknown_svalue (sval->get_type ());
this_cluster->bind_key (key, unknown);
&& !caller_base_reg->symbolic_for_unknown_ptr_p ())
{
binding_cluster *caller_cluster
- = get_or_create_cluster (caller_base_reg);
+ = get_or_create_cluster (*mgr, caller_base_reg);
if (summary_cluster->escaped_p ())
caller_cluster->mark_as_escaped ();
if (summary_cluster->touched_p ())
ASSERT_DISJOINT (cb_8_23, cb_24_31);
}
+static void
+test_binding_map_ops ()
+{
+ region_model_manager region_mgr;
+ store_manager store_mgr (®ion_mgr);
+
+ /* Assignment of empty. */
+ {
+ binding_map src (store_mgr);
+ binding_map dst (store_mgr);
+ dst = src;
+
+ ASSERT_EQ (src, dst);
+ }
+}
+
/* Run all of the selftests within this file. */
void
test_bit_range_intersects_p ();
test_bit_range_from_mask ();
test_binding_key_overlap ();
+ test_binding_map_ops ();
}
} // namespace selftest
namespace ana {
/* A mapping from binding_keys to svalues, for use by binding_cluster
- and compound_svalue. */
+ and compound_svalue.
+ We store a map from concrete keys to svalues, which is ordered by
+ the start offset.
+ We also store a vector of (symbolic key, svalue) pairs, but for now
+ this has maximum length of 1. */
class binding_map
{
public:
- typedef hash_map <const binding_key *, const svalue *> map_t;
- typedef map_t::iterator iterator_t;
-
- binding_map () : m_map () {}
+ struct symbolic_binding
+ {
+ bool operator== (const symbolic_binding &other) const
+ {
+ return (m_region == other.m_region
+ && m_sval == other.m_sval);
+ }
+
+ const region *m_region;
+ const svalue *m_sval;
+ };
+ using concrete_bindings_t = std::map<bit_range, const svalue *>;
+ using symbolic_bindings_t = std::vector<symbolic_binding>;
+
+ struct binding_pair
+ {
+ binding_pair (const binding_key *key,
+ const svalue *sval)
+ : m_key (key),
+ m_sval (sval)
+ {
+ }
+
+ const binding_key *m_key;
+ const svalue *m_sval;
+ };
+
+ typedef class const_iterator
+ {
+ public:
+ const_iterator (const binding_map &map,
+ concrete_bindings_t::const_iterator concrete_iter,
+ symbolic_bindings_t::const_iterator symbolic_iter)
+ : m_map (map),
+ m_concrete (concrete_iter),
+ m_symbolic (symbolic_iter)
+ {
+ }
+ bool operator== (const const_iterator &other) const;
+ bool operator!= (const const_iterator &other) const
+ {
+ return !(*this == other);
+ }
+ const_iterator &operator++ ();
+
+ binding_pair operator* ();
+
+ private:
+ const binding_map &m_map;
+ concrete_bindings_t::const_iterator m_concrete;
+ symbolic_bindings_t::const_iterator m_symbolic;
+ } const_iterator_t;
+
+ typedef class iterator
+ {
+ public:
+ friend class binding_map;
+
+ iterator (const binding_map &map,
+ concrete_bindings_t::iterator concrete_iter,
+ symbolic_bindings_t::iterator symbolic_iter)
+ : m_map (map),
+ m_concrete (concrete_iter),
+ m_symbolic (symbolic_iter)
+ {
+ }
+ bool operator== (const iterator &other) const;
+ bool operator!= (const iterator &other) const
+ {
+ return !(*this == other);
+ }
+ iterator &operator++ ();
+
+ binding_pair operator* ();
+
+ const binding_key *get_key () const;
+
+ private:
+ const binding_map &m_map;
+ concrete_bindings_t::iterator m_concrete;
+ symbolic_bindings_t::iterator m_symbolic;
+ } iterator_t;
+
+ binding_map (store_manager &store_mgr);
binding_map (const binding_map &other);
binding_map& operator=(const binding_map &other);
hashval_t hash () const;
- const svalue *get (const binding_key *key) const
+ const svalue *get (const binding_key *key) const;
+ void put (const binding_key *k, const svalue *v);
+ void overwrite (iterator_t &pos, const svalue *v);
+
+ void remove (const binding_key *k);
+ void clear ()
{
- const svalue **slot = const_cast<map_t &> (m_map).get (key);
- if (slot)
- return *slot;
- else
- return nullptr;
+ m_concrete.clear ();
+ m_symbolic.clear ();
}
- bool put (const binding_key *k, const svalue *v)
+
+ bool empty_p () const
{
- gcc_assert (v);
- return m_map.put (k, v);
+ return m_concrete.empty () && m_symbolic.empty ();
}
- void remove (const binding_key *k) { m_map.remove (k); }
- void empty () { m_map.empty (); }
-
- iterator_t begin () const { return m_map.begin (); }
- iterator_t end () const { return m_map.end (); }
- size_t elements () const { return m_map.elements (); }
+ const_iterator_t begin () const;
+ const_iterator_t end () const;
+ iterator_t begin ();
+ iterator_t end ();
+ size_t elements () const;
void dump_to_pp (pretty_printer *pp, bool simple, bool multiline) const;
void dump (bool simple) const;
region_model_manager *mgr,
tree index, tree val);
- map_t m_map;
+ store_manager &m_store_mgr;
+ concrete_bindings_t m_concrete;
+ symbolic_bindings_t m_symbolic;
};
-/* Concept: BindingVisitor, for use by binding_cluster::for_each_binding
- and store::for_each_binding.
-
- Should implement:
- void on_binding (const binding_key *key, const svalue *&sval);
-*/
-
/* All of the bindings within a store for regions that share the same
base region. */
public:
friend class store;
- typedef hash_map <const binding_key *, const svalue *> map_t;
- typedef map_t::iterator iterator_t;
+ typedef binding_map::const_iterator const_iterator_t;
+ typedef binding_map::iterator iterator_t;
- binding_cluster (const region *base_region);
+ binding_cluster (store_manager &store_mgr, const region *base_region);
binding_cluster (const binding_cluster &other);
binding_cluster& operator=(const binding_cluster &other);
void for_each_value (void (*cb) (const svalue *sval, T user_data),
T user_data) const
{
- for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
- cb ((*iter).second, user_data);
+ for (auto iter : m_map)
+ cb (iter.m_sval, user_data);
}
static bool can_merge_p (const binding_cluster *cluster_a,
bool touched_p () const { return m_touched; }
bool redundant_p () const;
- bool empty_p () const { return m_map.elements () == 0; }
+ bool empty_p () const { return m_map.empty_p (); }
void get_representative_path_vars (const region_model *model,
svalue_set *visited,
const svalue *maybe_get_simple_value (store_manager *mgr) const;
- template <typename BindingVisitor>
- void for_each_binding (BindingVisitor &v) const
- {
- for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
- {
- const binding_key *key = (*iter).first;
- const svalue *&sval = (*iter).second;
- v.on_binding (key, sval);
- }
- }
+ const_iterator_t begin () const { return m_map.begin (); }
+ const_iterator_t end () const { return m_map.end (); }
- iterator_t begin () const { return m_map.begin (); }
- iterator_t end () const { return m_map.end (); }
+ iterator_t begin () { return m_map.begin (); }
+ iterator_t end () { return m_map.end (); }
const binding_map &get_map () const { return m_map; }
+ binding_map &get_map () { return m_map; }
private:
const svalue *get_any_value (const binding_key *key) const;
const binding_cluster *get_cluster (const region *base_reg) const;
binding_cluster *get_cluster (const region *base_reg);
- binding_cluster *get_or_create_cluster (const region *base_reg);
+ binding_cluster *get_or_create_cluster (store_manager &store_mgr,
+ const region *base_reg);
void purge_cluster (const region *base_reg);
template <typename T>
store *out_store, store_manager *mgr,
model_merger *merger);
- void mark_as_escaped (const region *base_reg);
+ void mark_as_escaped (store_manager &mgr, const region *base_reg);
void on_unknown_fncall (const gcall &call, store_manager *mgr,
const conjured_purge &p);
bool escaped_p (const region *reg) const;
tristate eval_alias (const region *base_reg_a,
const region *base_reg_b) const;
- template <typename BindingVisitor>
- void for_each_binding (BindingVisitor &v)
- {
- for (cluster_map_t::iterator iter = m_cluster_map.begin ();
- iter != m_cluster_map.end (); ++iter)
- (*iter).second->for_each_binding (v);
- }
-
void canonicalize (store_manager *mgr);
void loop_replay_fixup (const store *other_store,
region_model_manager *mgr);
void replay_call_summary_cluster (call_summary_replay &r,
const store &summary,
const region *base_reg);
- void on_maybe_live_values (const svalue_set &maybe_live_values);
+ void on_maybe_live_values (store_manager &mgr,
+ const svalue_set &maybe_live_values);
private:
void remove_overlapping_bindings (store_manager *mgr, const region *reg,
: svalue (calc_complexity (map), id, type), m_map (map)
{
#if CHECKING_P
- for (iterator_t iter = begin (); iter != end (); ++iter)
+ for (auto iter : *this)
{
/* All keys within the underlying binding_map are required to be concrete,
not symbolic. */
- const binding_key *key = (*iter).first;
+ const binding_key *key = iter.m_key;
gcc_assert (key->concrete_p ());
/* We don't nest compound svalues. */
- const svalue *sval = (*iter).second;
+ const svalue *sval = iter.m_sval;
gcc_assert (sval->get_kind () != SK_COMPOUND);
}
#endif
void
compound_svalue::accept (visitor *v) const
{
- for (binding_map::iterator_t iter = m_map.begin ();
- iter != m_map.end (); ++iter)
+ for (auto iter : m_map)
{
- //(*iter).first.accept (v);
- (*iter).second->accept (v);
+ //iter.first.accept (v);
+ iter.m_sval->accept (v);
}
v->visit_compound_svalue (this);
}
{
unsigned num_child_nodes = 0;
unsigned max_child_depth = 0;
- for (binding_map::iterator_t iter = map.begin ();
- iter != map.end (); ++iter)
+ for (auto iter : map)
{
- const complexity &sval_c = (*iter).second->get_complexity ();
+ const complexity &sval_c = iter.m_sval->get_complexity ();
num_child_nodes += sval_c.m_num_nodes;
max_child_depth = MAX (max_child_depth, sval_c.m_max_depth);
}
const bit_range &bits,
region_model_manager *mgr) const
{
- binding_map result_map;
+ binding_map result_map (*mgr->get_store_manager ());
for (auto iter : m_map)
{
- const binding_key *key = iter.first;
+ const binding_key *key = iter.m_key;
if (const concrete_binding *conc_key
= key->dyn_cast_concrete_binding ())
{
if (!conc_key->get_bit_range ().intersects_p (bits))
continue;
- const svalue *sval = iter.second;
+ const svalue *sval = iter.m_sval;
/* Get the position of conc_key relative to BITS. */
bit_range result_location (conc_key->get_start_bit_offset ()
- bits.get_start_bit_offset (),
class compound_svalue : public svalue
{
public:
+ typedef binding_map::const_iterator_t const_iterator_t;
typedef binding_map::iterator_t iterator_t;
/* A support class for uniquifying instances of compound_svalue.
const binding_map &get_map () const { return m_map; }
- iterator_t begin () const { return m_map.begin (); }
- iterator_t end () const { return m_map.end (); }
+ const_iterator_t begin () const { return m_map.begin (); }
+ const_iterator_t end () const { return m_map.end (); }
+ iterator_t begin () { return m_map.begin (); }
+ iterator_t end () { return m_map.end (); }
struct key_t make_key () const
{
/* -fanalyzer plugin for CPython extension modules */
/* { dg-options "-g" } */
-#define INCLUDE_MEMORY
-#define INCLUDE_STRING
-#define INCLUDE_VECTOR
+#include "analyzer/common.h"
#include "gcc-plugin.h"
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
#include "tree.h"
#include "function.h"
#include "basic-block.h"
for (const auto &binding : retval_binding_map)
{
- const svalue *binding_sval = binding.second;
+ const svalue *binding_sval = binding.m_sval;
const svalue *unwrapped_sval = binding_sval->unwrap_any_unmergeable ();
const region *pointee = unwrapped_sval->maybe_get_region ();
auto binding_cluster = cluster.second;
for (const auto &binding : binding_cluster->get_map ())
{
- const svalue *binding_sval = binding.second;
+ const svalue *binding_sval = binding.m_sval;
const svalue *unwrapped_sval
= binding_sval->unwrap_any_unmergeable ();
/* Proof-of-concept of a -fanalyzer plugin for the Linux kernel. */
/* { dg-options "-g" } */
-#define INCLUDE_MEMORY
-#define INCLUDE_STRING
-#define INCLUDE_VECTOR
+#include "analyzer/common.h"
#include "gcc-plugin.h"
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
#include "tree.h"
#include "function.h"
#include "basic-block.h"
/* Proof-of-concept of a -fanalyzer plugin to handle known functions. */
/* { dg-options "-g" } */
-#define INCLUDE_MEMORY
-#define INCLUDE_STRING
-#define INCLUDE_VECTOR
+#include "analyzer/common.h"
#include "gcc-plugin.h"
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
#include "tree.h"
#include "function.h"
#include "basic-block.h"