Modernization; no functional change intended.
gcc/analyzer/ChangeLog:
* access-diagram.cc: Use nullptr rather than NULL where
appropriate.
* analyzer-language.cc: Likewise.
* analyzer-language.h: Likewise.
* analyzer-logging.h: Likewise.
* analyzer-pass.cc: Likewise.
* analyzer.cc: Likewise.
* bounds-checking.cc: Likewise.
* call-details.cc: Likewise.
* call-string.cc: Likewise.
* call-string.h: Likewise.
* call-summary.cc: Likewise.
* checker-event.cc: Likewise.
* common.h: Likewise.
* constraint-manager.cc: Likewise.
* constraint-manager.h: Likewise.
* diagnostic-manager.cc: Likewise.
* engine.cc: Likewise.
* exploded-graph.h: Likewise.
* function-set.cc: Likewise
* infinite-recursion.cc: Likewise
* inlining-iterator.h: Likewise
* kf.cc: Likewise
* known-function-manager.cc: Likewise
* pending-diagnostic.cc: Likewise
* program-point.cc: Likewise
* program-point.h: Likewise
* program-state.cc: Likewise
* program-state.h: Likewise
* record-layout.cc: Likewise
* region-model-asm.cc: Likewise
* region-model-manager.cc: Likewise
* region-model-manager.h: Likewise
* region-model-reachability.cc: Likewise
* region-model.cc: Likewise
* region-model.h: Likewise
* region.cc: Likewise
* region.h: Likewise
* sm-fd.cc: Likewise
* sm-malloc.cc: Likewise
* sm-pattern-test.cc: Likewise
* sm-signal.cc: Likewise
* sm-taint.cc: Likewise
* sm.cc: Likewise
* sm.h: Likewise
* state-purge.cc: Likewise
* state-purge.h: Likewise
* store.cc: Likewise
* store.h: Likewise
* supergraph.cc: Likewise
* supergraph.h: Likewise
* svalue.cc: Likewise
* svalue.h: Likewise
* varargs.cc: Likewise
Signed-off-by: David Malcolm <dmalcolm@redhat.com>
{
logger.start_log_line ();
logger.log_partial ("table_x: %i", table_x);
- access_range range_for_column (NULL, bit_range (0, 0));
+ access_range range_for_column (nullptr, bit_range (0, 0));
if (maybe_get_access_range_for_table_x (table_x, &range_for_column))
{
logger.log_partial (": range: ");
for (int table_x = 0; table_x < t.get_size ().w; table_x++)
{
const int table_y = 1;
- access_range range_for_column (NULL, bit_range (0, 0));
+ access_range range_for_column (nullptr, bit_range (0, 0));
if (m_btm.maybe_get_access_range_for_table_x (table_x,
&range_for_column))
{
std::vector<bit_offset_t> bit_sizes (num_columns);
for (unsigned table_x = 0; table_x < num_columns; table_x++)
{
- access_range range_for_column (NULL, bit_range (0, 0));
+ access_range range_for_column (nullptr, bit_range (0, 0));
if (m_btm.maybe_get_access_range_for_table_x (table_x,
&range_for_column))
{
return;
FILE *logfile = get_or_create_any_logfile ();
- log_user the_logger (NULL);
+ log_user the_logger (nullptr);
if (logfile)
the_logger.set_logger (new logger (logfile, 0, 0,
*global_dc->get_reference_printer ()));
public:
/* Attempt to look up an value for identifier ID (e.g. in the headers that
have been seen). If it is defined and an integer (e.g. either as a
- macro or enum), return the INTEGER_CST value, otherwise return NULL. */
+ macro or enum), return the INTEGER_CST value, otherwise return NULL_TREE. */
virtual tree lookup_constant_by_id (tree id) const = 0;
virtual tree lookup_type_by_id (tree id) const = 0;
virtual tree lookup_global_var_by_id (tree id) const = 0;
/* The constructor for log_scope.
- The normal case is that the logger is NULL, in which case this should
+ The normal case is that the logger is nullptr, in which case this should
be largely a no-op.
If we do have a logger, notify it that we're entering the given scope.
}
}
-/* A log_user is something that potentially uses a logger (which could be NULL).
+/* A log_user is something that potentially uses a logger (which could be
+ nullptr).
The log_user class keeps the reference-count of a logger up-to-date. */
FILE *get_logger_file () const
{
- if (m_logger == NULL)
- return NULL;
+ if (m_logger == nullptr)
+ return nullptr;
return m_logger->get_file ();
}
};
/* A shortcut for calling log from a log_user, handling the common
- case where the underlying logger is NULL via a no-op. */
+ case where the underlying logger is nullptr via a no-op. */
inline void
log_user::log (const char *fmt, ...) const
}
/* A shortcut for starting a log line from a log_user,
- handling the common case where the underlying logger is NULL via
+ handling the common case where the underlying logger is nullptr via
a no-op. */
inline void
}
/* A shortcut for ending a log line from a log_user,
- handling the common case where the underlying logger is NULL via
+ handling the common case where the underlying logger is nullptr via
a no-op. */
inline void
}
/* A shortcut for recording entry into a scope from a log_user,
- handling the common case where the underlying logger is NULL via
+ handling the common case where the underlying logger is nullptr via
a no-op. */
inline void
}
/* A shortcut for recording exit from a scope from a log_user,
- handling the common case where the underlying logger is NULL via
+ handling the common case where the underlying logger is nullptr via
a no-op. */
inline void
public:
pass_analyzer(gcc::context *ctxt)
: ipa_opt_pass_d (pass_data_analyzer, ctxt,
- NULL, /* generate_summary */
- NULL, /* write_summary */
- NULL, /* read_summary */
- NULL, /* write_optimization_summary */
- NULL, /* read_optimization_summary */
- NULL, /* stmt_fixup */
+ nullptr, /* generate_summary */
+ nullptr, /* write_summary */
+ nullptr, /* read_summary */
+ nullptr, /* write_optimization_summary */
+ nullptr, /* read_optimization_summary */
+ nullptr, /* stmt_fixup */
0, /* function_transform_todo_flags_start */
- NULL, /* function_transform */
- NULL) /* variable_transform */
+ nullptr, /* function_transform */
+ nullptr) /* variable_transform */
{}
/* opt_pass methods: */
va_start (ap, fmt);
- text_info ti (_(fmt), &ap, 0, NULL, &rich_loc);
+ text_info ti (_(fmt), &ap, 0, nullptr, &rich_loc);
pp_format (pp.get (), &ti);
pp_output_formatted_text (pp.get ());
const char *fmt = ngettext (singular_fmt, plural_fmt, n);
- text_info ti (fmt, &ap, 0, NULL, &rich_loc);
+ text_info ti (fmt, &ap, 0, nullptr, &rich_loc);
pp_format (pp.get (), &ti);
pp_output_formatted_text (pp.get ());
concrete_buffer_over_read (const region_model &model,
const region *reg, tree diag_arg,
bit_range range, tree bit_bound)
- : concrete_past_the_end (model, reg, diag_arg, range, bit_bound, NULL)
+ : concrete_past_the_end (model, reg, diag_arg, range, bit_bound, nullptr)
{}
const char *get_kind () const final override
concrete_buffer_under_read (const region_model &model,
const region *reg, tree diag_arg,
bit_range range)
- : concrete_out_of_bounds (model, reg, diag_arg, range, NULL)
+ : concrete_out_of_bounds (model, reg, diag_arg, range, nullptr)
{}
const char *get_kind () const final override
const region *reg, tree diag_arg, tree offset,
tree num_bytes, tree capacity)
: symbolic_past_the_end (model, reg, diag_arg, offset, num_bytes, capacity,
- NULL)
+ nullptr)
{
}
call_details::call_details (const gcall &call, region_model *model,
region_model_context *ctxt)
: m_call (call), m_model (model), m_ctxt (ctxt),
- m_lhs_type (NULL_TREE), m_lhs_region (NULL)
+ m_lhs_type (NULL_TREE), m_lhs_region (nullptr)
{
m_lhs_type = NULL_TREE;
if (tree lhs = gimple_call_lhs (&call))
if (m_ctxt)
return m_ctxt->get_logger ();
else
- return NULL;
+ return nullptr;
}
/* Get any uncertainty_t associated with the region_model_context. */
if (m_ctxt)
return m_ctxt->get_uncertainty ();
else
- return NULL;
+ return nullptr;
}
/* If the callsite has a left-hand-side region, set it to RESULT
/* If this CD is known to be a call to a function with
__attribute__((const)), attempt to get a const_fn_result_svalue
- based on the arguments, or return NULL otherwise. */
+ based on the arguments, or return nullptr otherwise. */
static const svalue *
maybe_get_const_fn_result (const call_details &cd)
{
if (!const_fn_p (cd))
- return NULL;
+ return nullptr;
unsigned num_args = cd.num_args ();
if (num_args > const_fn_result_svalue::MAX_INPUTS)
/* Too many arguments. */
- return NULL;
+ return nullptr;
auto_vec<const svalue *> inputs (num_args);
for (unsigned arg_idx = 0; arg_idx < num_args; arg_idx++)
{
const svalue *arg_sval = cd.get_arg_svalue (arg_idx);
if (!arg_sval->can_have_associated_state_p ())
- return NULL;
+ return nullptr;
inputs.quick_push (arg_sval);
}
if (lookup_function_attribute ("malloc"))
{
const region *new_reg
- = m_model->get_or_create_region_for_heap_alloc (NULL, m_ctxt);
- m_model->mark_region_as_unknown (new_reg, NULL);
+ = m_model->get_or_create_region_for_heap_alloc (nullptr, m_ctxt);
+ m_model->mark_region_as_unknown (new_reg, nullptr);
sval = mgr->get_ptr_svalue (get_lhs_type (), new_reg);
}
else
/* If argument IDX's svalue at the callsite is of pointer type,
return the region it points to.
- Otherwise return NULL. */
+ Otherwise return nullptr. */
const region *
call_details::deref_ptr_arg (unsigned idx) const
return m_model->deref_rvalue (ptr_sval, get_arg_tree (idx), m_ctxt);
}
-/* Attempt to get the string literal for argument IDX, or return NULL
+/* Attempt to get the string literal for argument IDX, or return nullptr
otherwise.
For use when implementing "__analyzer_*" functions that take
string literals. */
tree string_cst = string_reg->get_string_cst ();
return TREE_STRING_POINTER (string_cst);
}
- return NULL;
+ return nullptr;
}
/* Attempt to get the fndecl used at this call, if known, or NULL_TREE
}
/* Return the pointer to callee of the topmost call in the stack,
- or NULL if stack is empty. */
+ or nullptr if stack is empty. */
const supernode *
call_string::get_callee_node () const
{
if(m_elements.is_empty ())
- return NULL;
+ return nullptr;
return m_elements[m_elements.length () - 1].m_callee;
}
/* Return the pointer to caller of the topmost call in the stack,
- or NULL if stack is empty. */
+ or nullptr if stack is empty. */
const supernode *
call_string::get_caller_node () const
{
if(m_elements.is_empty ())
- return NULL;
+ return nullptr;
return m_elements[m_elements.length () - 1].m_caller;
}
/* ctor for the root/empty call_string. */
call_string::call_string ()
-: m_parent (NULL), m_elements ()
+: m_parent (nullptr), m_elements ()
{
}
}
template <typename T> static inline void remove (T &entry)
{
- entry.m_key = element_t (NULL, NULL);
+ entry.m_key = element_t (nullptr, nullptr);
}
static const bool empty_zero_p = true;
template <typename T> static inline bool is_empty (const T &entry)
{
- return entry.m_key.m_caller == NULL;
+ return entry.m_key.m_caller == nullptr;
}
template <typename T> static inline bool is_deleted (const T &entry)
{
}
template <typename T> static inline void mark_empty (T &entry)
{
- entry.m_key = element_t (NULL, NULL);
- entry.m_value = NULL;
+ entry.m_key = element_t (nullptr, nullptr);
+ entry.m_value = nullptr;
}
template <typename T> static inline void mark_deleted (T &entry)
{
if (tree result = DECL_RESULT (fndecl))
{
const region *result_reg
- = get_state ().m_region_model->get_lvalue (result, NULL);
+ = get_state ().m_region_model->get_lvalue (result, nullptr);
const svalue *result_sval
- = get_state ().m_region_model->get_store_value (result_reg, NULL);
+ = get_state ().m_region_model->get_store_value (result_reg, nullptr);
switch (result_sval->get_kind ())
{
default:
This will be a top-level frame, since that's what's in
the summary. */
const frame_region *summary_frame
- = mgr->get_frame_region (NULL, called_fn);
+ = mgr->get_frame_region (nullptr, called_fn);
unsigned idx = 0;
for (tree iter_parm = DECL_ARGUMENTS (fndecl); iter_parm;
/* Try to convert SUMMARY_SVAL in the summary to a corresponding svalue
in the caller, caching the result.
- Return NULL if the conversion is not possible. */
+ Return nullptr if the conversion is not possible. */
const svalue *
call_summary_replay::convert_svalue_from_summary (const svalue *summary_sval)
const region *summary_reg = region_summary_sval->get_pointee ();
const region *caller_reg = convert_region_from_summary (summary_reg);
if (!caller_reg)
- return NULL;
+ return nullptr;
region_model_manager *mgr = get_manager ();
const svalue *caller_ptr
= mgr->get_ptr_svalue (summary_sval->get_type (),
return summary_sval;
case SK_SETJMP:
- return NULL; // TODO
+ return nullptr; // TODO
case SK_INITIAL:
{
const region *summary_reg = initial_summary_sval->get_region ();
const region *caller_reg = convert_region_from_summary (summary_reg);
if (!caller_reg)
- return NULL;
+ return nullptr;
const svalue *caller_sval
= m_cd.get_model ()->get_store_value (caller_reg, m_cd.get_ctxt ());
return caller_sval;
const svalue *summary_arg = unaryop_summary_sval->get_arg ();
const svalue *caller_arg = convert_svalue_from_summary (summary_arg);
if (!caller_arg)
- return NULL;
+ return nullptr;
region_model_manager *mgr = get_manager ();
return mgr->get_or_create_unaryop (summary_sval->get_type (),
unaryop_summary_sval->get_op (),
const svalue *summary_arg0 = binop_summary_sval->get_arg0 ();
const svalue *caller_arg0 = convert_svalue_from_summary (summary_arg0);
if (!caller_arg0)
- return NULL;
+ return nullptr;
const svalue *summary_arg1 = binop_summary_sval->get_arg1 ();
const svalue *caller_arg1 = convert_svalue_from_summary (summary_arg1);
if (!caller_arg1)
- return NULL;
+ return nullptr;
region_model_manager *mgr = get_manager ();
return mgr->get_or_create_binop (summary_sval->get_type (),
binop_summary_sval->get_op (),
region_model_manager *mgr = get_manager ();
const svalue *summary_parent_sval = sub_summary_sval->get_parent ();
if (!summary_parent_sval)
- return NULL;
+ return nullptr;
const region *summary_subregion = sub_summary_sval->get_subregion ();
if (!summary_subregion)
- return NULL;
+ return nullptr;
return mgr->get_or_create_sub_svalue (summary_sval->get_type (),
summary_parent_sval,
summary_subregion);
const svalue *caller_outer_size
= convert_svalue_from_summary (summary_outer_size);
if (!caller_outer_size)
- return NULL;
+ return nullptr;
const svalue *summary_inner_sval
= repeated_summary_sval->get_inner_svalue ();
const svalue *caller_inner_sval
= convert_svalue_from_summary (summary_inner_sval);
if (!caller_inner_sval)
- return NULL;
+ return nullptr;
region_model_manager *mgr = get_manager ();
return mgr->get_or_create_repeated_svalue (summary_sval->get_type (),
caller_outer_size,
const svalue *caller_inner_sval
= convert_svalue_from_summary (summary_inner_sval);
if (!caller_inner_sval)
- return NULL;
+ return nullptr;
region_model_manager *mgr = get_manager ();
return mgr->get_or_create_bits_within (summary_sval->get_type (),
bits,
const svalue *caller_arg_sval
= convert_svalue_from_summary (summary_arg_sval);
if (!caller_arg_sval)
- return NULL;
+ return nullptr;
region_model_manager *mgr = get_manager ();
return mgr->get_or_create_unmergeable (caller_arg_sval);
}
= convert_svalue_from_summary (summary_base_sval);
if (!(caller_base_sval
&& caller_base_sval->can_have_associated_state_p ()))
- return NULL;
+ return nullptr;
const svalue *summary_iter_sval
= widening_summary_sval->get_iter_svalue ();
const svalue *caller_iter_sval
= convert_svalue_from_summary (summary_iter_sval);
if (!(caller_iter_sval
&& caller_iter_sval->can_have_associated_state_p ()))
- return NULL;
+ return nullptr;
region_model_manager *mgr = get_manager ();
return mgr->get_or_create_widening_svalue
(summary_iter_sval->get_type (),
const svalue *caller_input
= convert_svalue_from_summary (summary_input);
if (!caller_input)
- return NULL;
+ return nullptr;
inputs.safe_push (caller_input);
}
region_model_manager *mgr = get_manager ();
const svalue *caller_input
= convert_svalue_from_summary (summary_input);
if (!caller_input)
- return NULL;
+ return nullptr;
inputs.safe_push (caller_input);
}
region_model_manager *mgr = get_manager ();
/* Try to convert SUMMARY_REG in the summary to a corresponding region
in the caller, caching the result.
- Return NULL if the conversion is not possible. */
+ Return nullptr if the conversion is not possible. */
const region *
call_summary_replay::convert_region_from_summary (const region *summary_reg)
const svalue *caller_ptr_sval
= convert_svalue_from_summary (summary_ptr_sval);
if (!caller_ptr_sval)
- return NULL;
+ return nullptr;
const region *caller_reg
= get_caller_model ()->deref_rvalue (caller_ptr_sval,
NULL_TREE,
case SSA_NAME:
/* We don't care about writes to locals within
the summary. */
- return NULL;
+ return nullptr;
case VAR_DECL:
/* We don't care about writes to locals within
the summary. */
return summary_reg;
else
/* Otherwise, we don't care about locals. */
- return NULL;
+ return nullptr;
case RESULT_DECL:
return m_cd.get_lhs_region ();
case PARM_DECL:
/* Writes (by value) to parms should be visible to the caller. */
- return NULL;
+ return nullptr;
}
}
break;
const region *caller_parent_reg
= convert_region_from_summary (summary_parent_reg);
if (!caller_parent_reg)
- return NULL;
+ return nullptr;
tree field = summary_field_reg->get_field ();
return mgr->get_field_region (caller_parent_reg, field);
}
const region *caller_parent_reg
= convert_region_from_summary (summary_parent_reg);
if (!caller_parent_reg)
- return NULL;
+ return nullptr;
const svalue *summary_index = summary_element_reg->get_index ();
const svalue *caller_index
= convert_svalue_from_summary (summary_index);
if (!caller_index)
- return NULL;
+ return nullptr;
return mgr->get_element_region (caller_parent_reg,
summary_reg->get_type (),
caller_index);
const region *caller_parent_reg
= convert_region_from_summary (summary_parent_reg);
if (!caller_parent_reg)
- return NULL;
+ return nullptr;
const svalue *summary_byte_offset
= summary_offset_reg->get_byte_offset ();
const svalue *caller_byte_offset
= convert_svalue_from_summary (summary_byte_offset);
if (!caller_byte_offset)
- return NULL;
+ return nullptr;
return mgr->get_offset_region (caller_parent_reg,
summary_reg->get_type (),
caller_byte_offset);
const region *caller_parent_reg
= convert_region_from_summary (summary_parent_reg);
if (!caller_parent_reg)
- return NULL;
+ return nullptr;
const svalue *summary_byte_size
= summary_sized_reg->get_byte_size_sval (mgr);
const svalue *caller_byte_size
= convert_svalue_from_summary (summary_byte_size);
if (!caller_byte_size)
- return NULL;
+ return nullptr;
return mgr->get_sized_region (caller_parent_reg,
summary_reg->get_type (),
caller_byte_size);
const region *caller_parent_reg
= convert_region_from_summary (summary_parent_reg);
if (!caller_parent_reg)
- return NULL;
+ return nullptr;
return mgr->get_cast_region (caller_parent_reg,
summary_reg->get_type ());
}
}
break;
case RK_ALLOCA:
- return NULL;
+ return nullptr;
case RK_BIT_RANGE:
{
const bit_range_region *summary_bit_range_reg
const region *caller_parent_reg
= convert_region_from_summary (summary_parent_reg);
if (!caller_parent_reg)
- return NULL;
+ return nullptr;
const bit_range &bits = summary_bit_range_reg->get_bits ();
return mgr->get_bit_range (caller_parent_reg,
summary_reg->get_type (),
}
break;
case RK_VAR_ARG:
- return NULL;
+ return nullptr;
}
}
/* Try to convert SUMMARY_KEY in the summary to a corresponding binding key
in the caller.
- Return NULL if the conversion is not possible. */
+ Return nullptr if the conversion is not possible. */
const binding_key *
call_summary_replay::convert_key_from_summary (const binding_key *summary_key)
const region *summary_reg = symbolic_key->get_region ();
const region *caller_reg = convert_region_from_summary (summary_reg);
if (!caller_reg)
- return NULL;
+ return nullptr;
region_model_manager *mgr = get_manager ();
store_manager *store_mgr = mgr->get_store_manager ();
return store_mgr->get_symbolic_binding (caller_reg);
const svalue *caller_sval)
{
gcc_assert (summary_sval);
- // CALLER_SVAL can be NULL
+ // CALLER_SVAL can be nullptr
m_map_svalue_from_summary_to_caller.put (summary_sval, caller_sval);
}
const region *caller_reg)
{
gcc_assert (summary_reg);
- // CALLER_REG can be NULL
+ // CALLER_REG can be nullptr
m_map_region_from_summary_to_caller.put (summary_reg, caller_reg);
}
m_effective_fndecl (loc_info.m_fndecl),
m_original_depth (loc_info.m_depth),
m_effective_depth (loc_info.m_depth),
- m_pending_diagnostic (NULL), m_emission_id (),
+ m_pending_diagnostic (nullptr), m_emission_id (),
m_logical_loc
(tree_logical_location_manager::key_from_tree (loc_info.m_fndecl))
{
}
else
{
- gcc_assert (m_origin == NULL);
+ gcc_assert (m_origin == nullptr);
pp_printf (&pp,
"global state: %qs -> %qs",
m_from->get_name (),
lhs, op, rhs);
}
}
- return label_text::borrow (NULL);
+ return label_text::borrow (nullptr);
}
/* Subroutine of maybe_describe_condition above.
/* Only attempt to generate text for sufficiently simple expressions. */
if (!should_print_expr_p (lhs))
- return label_text::borrow (NULL);
+ return label_text::borrow (nullptr);
if (!should_print_expr_p (rhs))
- return label_text::borrow (NULL);
+ return label_text::borrow (nullptr);
/* Special cases for pointer comparisons against NULL. */
if (POINTER_TYPE_P (TREE_TYPE (lhs))
{
public:
region_offset ()
- : m_base_region (NULL), m_offset (0), m_sym_offset (NULL)
+ : m_base_region (nullptr), m_offset (0), m_sym_offset (nullptr)
{
}
static region_offset make_concrete (const region *base_region,
bit_offset_t offset)
{
- return region_offset (base_region, offset, NULL);
+ return region_offset (base_region, offset, nullptr);
}
static region_offset make_symbolic (const region *base_region,
const svalue *sym_offset)
const region *get_base_region () const { return m_base_region; }
- bool concrete_p () const { return m_sym_offset == NULL; }
- bool symbolic_p () const { return m_sym_offset != NULL; }
+ bool concrete_p () const { return m_sym_offset == nullptr; }
+ bool symbolic_p () const { return m_sym_offset != nullptr; }
bit_offset_t get_bit_offset () const
{
}
virtual const builtin_known_function *
- dyn_cast_builtin_kf () const { return NULL; }
+ dyn_cast_builtin_kf () const { return nullptr; }
};
/* Subclass of known_function for builtin functions. */
delete (*iter).second;
}
- /* Get the instance of T for K if one exists, or NULL. */
+ /* Get the instance of T for K if one exists, or nullptr. */
T *get (const key_t &k) const
{
if (instance_t **slot = const_cast<inner_map_t &> (m_inner_map).get (k))
return *slot;
- return NULL;
+ return nullptr;
}
/* Take ownership of INSTANCE. */
{
bounded_range *prev = &m_ranges[i - 1];
const bounded_range *next = &m_ranges[i];
- if (prev->intersects_p (*next, NULL)
+ if (prev->intersects_p (*next, nullptr)
|| (can_plus_one_p (prev->m_upper)
&& tree_int_cst_equal (plus_one (prev->m_upper),
next->m_lower)))
/* equiv_class's default ctor. */
equiv_class::equiv_class ()
-: m_constant (NULL_TREE), m_cst_sval (NULL), m_vars ()
+: m_constant (NULL_TREE), m_cst_sval (nullptr), m_vars ()
{
}
const region_model *model)
{
dead_svalue_purger p (live_svalues, model);
- purge (p, NULL);
+ purge (p, nullptr);
}
class svalue_purger
constraint_manager::purge_state_involving (const svalue *sval)
{
svalue_purger p (sval);
- purge (p, NULL);
+ purge (p, nullptr);
}
/* Comparator for use by constraint_manager::canonicalize.
{
/* Special-case for widening. */
if (lhs->get_kind () == SK_WIDENING)
- if (!m_cm_b->get_equiv_class_by_svalue (lhs, NULL))
+ if (!m_cm_b->get_equiv_class_by_svalue (lhs, nullptr))
{
/* LHS isn't constrained within m_cm_b. */
bool sat = m_out->add_constraint (lhs, code, rhs);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, x);
ADD_SAT_CONSTRAINT (model, int_42, EQ_EXPR, int_42);
/* ...even when done directly via svalues: */
- const svalue *sval_int_42 = model.get_rvalue (int_42, NULL);
+ const svalue *sval_int_42 = model.get_rvalue (int_42, nullptr);
bool sat = model.get_constraints ()->add_constraint (sval_int_42,
EQ_EXPR,
sval_int_42);
ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 1);
/* Purge state for "a". */
- const svalue *sval_a = model.get_rvalue (a, NULL);
- model.purge_state_involving (sval_a, NULL);
+ const svalue *sval_a = model.get_rvalue (a, nullptr);
+ model.purge_state_involving (sval_a, nullptr);
model.canonicalize ();
/* We should have an empty constraint_manager. */
ASSERT_EQ (model.get_constraints ()->m_equiv_classes.length (), 0);
ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 2);
/* Purge state for "a". */
- const svalue *sval_a = model.get_rvalue (a, NULL);
- model.purge_state_involving (sval_a, NULL);
+ const svalue *sval_a = model.get_rvalue (a, nullptr);
+ model.purge_state_involving (sval_a, nullptr);
model.canonicalize ();
/* We should just have the constraint/ECs involving b != 0. */
ASSERT_EQ (model.get_constraints ()->m_equiv_classes.length (), 2);
ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 1);
/* Purge state for "a". */
- const svalue *sval_a = model.get_rvalue (a, NULL);
- model.purge_state_involving (sval_a, NULL);
+ const svalue *sval_a = model.get_rvalue (a, nullptr);
+ model.purge_state_involving (sval_a, nullptr);
model.canonicalize ();
/* We should just have the EC involving b == 0. */
ASSERT_EQ (model.get_constraints ()->m_equiv_classes.length (), 1);
ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 0);
/* Purge state for "a". */
- const svalue *sval_a = model.get_rvalue (a, NULL);
- model.purge_state_involving (sval_a, NULL);
+ const svalue *sval_a = model.get_rvalue (a, nullptr);
+ model.purge_state_involving (sval_a, nullptr);
model.canonicalize ();
/* We should have an empty constraint_manager. */
ASSERT_EQ (model.get_constraints ()->m_equiv_classes.length (), 0);
ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 1);
/* Purge state for "a". */
- const svalue *sval_a = model.get_rvalue (a, NULL);
- model.purge_state_involving (sval_a, NULL);
+ const svalue *sval_a = model.get_rvalue (a, nullptr);
+ model.purge_state_involving (sval_a, nullptr);
model.canonicalize ();
/* We should just have the constraint/ECs involving b != 0. */
ASSERT_EQ (model.get_constraints ()->m_equiv_classes.length (), 2);
ASSERT_EQ (model.get_constraints ()->m_constraints.length (), 0);
/* Purge state for "a". */
- const svalue *sval_a = model.get_rvalue (a, NULL);
- model.purge_state_involving (sval_a, NULL);
+ const svalue *sval_a = model.get_rvalue (a, nullptr);
+ model.purge_state_involving (sval_a, nullptr);
model.canonicalize ();
/* We should just have the EC involving b == 0. */
ASSERT_EQ (model.get_constraints ()->m_equiv_classes.length (), 1);
bounded_range br_u8_64_128 (u8_64, u8_128);
ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_64_128, "[64, 128]");
- ASSERT_FALSE (br_u8_0.intersects_p (br_u8_64_128, NULL));
- ASSERT_FALSE (br_u8_64_128.intersects_p (br_u8_0, NULL));
+ ASSERT_FALSE (br_u8_0.intersects_p (br_u8_64_128, nullptr));
+ ASSERT_FALSE (br_u8_64_128.intersects_p (br_u8_0, nullptr));
bounded_range br_u8_128_255 (u8_128, u8_255);
ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_128_255, "[128, 255]");
{
return k->get_hash ();
}
- static inline bool is_empty (key_type k) { return k == NULL; }
- static inline void mark_empty (key_type &k) { k = NULL; }
+ static inline bool is_empty (key_type k) { return k == nullptr; }
+ static inline void mark_empty (key_type &k) { k = nullptr; }
static inline bool is_deleted (key_type k)
{
return k == reinterpret_cast<key_type> (1);
public:
epath_finder (const exploded_graph &eg)
: m_eg (eg),
- m_sep (NULL)
+ m_sep (nullptr)
{
/* This is shared by all diagnostics, but only needed if
!flag_analyzer_feasibility. */
within ENODE.
Ideally we want to report the shortest feasible path.
- Return NULL if we could not find a feasible path
+ Return nullptr if we could not find a feasible path
(when flag_analyzer_feasibility is true).
If flag_analyzer_feasibility is false, then simply return the
logger->log ("rejecting %qs at EN: %i, SN: %i (sd: %i)"
" due to not finding feasible path",
desc, enode->m_index, snode_idx, diag_idx);
- return NULL;
+ return nullptr;
}
}
else
{
public:
feasible_worklist (const shortest_paths<eg_traits, exploded_path> &sep)
- : m_queue (key_t (*this, NULL)),
+ : m_queue (key_t (*this, nullptr)),
m_sep (sep)
{
}
a limit. */
/* Set this if we find a feasible path to TARGET_ENODE. */
- std::unique_ptr<exploded_path> best_path = NULL;
+ std::unique_ptr<exploded_path> best_path = nullptr;
{
auto_checking_feasibility sentinel (mgr);
std::unique_ptr<rejected_constraint> rc;
if (succ_state.maybe_update_for_edge (logger, succ_eedge, nullptr, &rc))
{
- gcc_assert (rc == NULL);
+ gcc_assert (rc == nullptr);
feasible_node *succ_fnode
= fg->add_node (succ_eedge->m_dest,
succ_state,
pp_printf (&pp, "%s.%s.%i.to-en%i.tg.dot",
dump_base_name, desc, diag_idx, target_enode->m_index);
char *filename = xstrdup (pp_formatted_text (&pp));
- tg.dump_dot (filename, NULL, args);
+ tg.dump_dot (filename, nullptr, args);
free (filename);
}
pp_printf (&pp, "%s.%s.%i.to-en%i.fg.dot",
dump_base_name, desc, diag_idx, target_enode->m_index);
char *filename = xstrdup (pp_formatted_text (&pp));
- fg.dump_dot (filename, NULL, args);
+ fg.dump_dot (filename, nullptr, args);
free (filename);
}
/* Use PF to find the best exploded_path for this saved_diagnostic,
and store it in m_best_epath.
- If we don't have a specific location in m_loc and m_stmt is still NULL,
+ If we don't have a specific location in m_loc and m_stmt is still nullptr,
use m_stmt_finder on the epath to populate m_stmt.
Return true if a best path was found. */
{
logger *logger = pf->get_logger ();
LOG_SCOPE (logger);
- m_problem = NULL;
+ m_problem = nullptr;
m_best_epath = pf->get_best_epath (m_enode, m_stmt,
*m_d, m_d->get_kind (), m_idx,
&m_problem);
/* Handle failure to find a feasible path. */
- if (m_best_epath == NULL)
+ if (m_best_epath == nullptr)
return false;
gcc_assert (m_best_epath);
if (m_loc == UNKNOWN_LOCATION)
{
- if (m_stmt == NULL)
+ if (m_stmt == nullptr)
{
gcc_assert (m_stmt_finder);
m_stmt = m_stmt_finder->find_stmt (*m_best_epath);
std::unique_ptr<pending_diagnostic> d)
{
gcc_assert (ploc.m_enode);
- return add_diagnostic (NULL, ploc, NULL_TREE, NULL, 0, std::move (d));
+ return add_diagnostic (nullptr, ploc, NULL_TREE, nullptr, 0, std::move (d));
}
/* Add PN to the most recent saved_diagnostic. */
template <typename T>
static inline void mark_empty (T &entry)
{
- entry.m_key = NULL;
+ entry.m_key = nullptr;
}
template <typename T>
static inline bool is_deleted (const T &entry)
template <typename T>
static inline bool is_empty (const T &entry)
{
- return entry.m_key == NULL;
+ return entry.m_key == nullptr;
}
static const bool empty_zero_p = true;
};
trailing eedge stashed, add any events for it. This is for use
in handling longjmp, to show where a longjmp is rewinding to. */
if (sd.m_trailing_eedge)
- add_events_for_eedge (pb, *sd.m_trailing_eedge, &emission_path, NULL);
+ add_events_for_eedge (pb, *sd.m_trailing_eedge, &emission_path, nullptr);
emission_path.inject_any_inlined_call_events (get_logger ());
{
emission_path->add_region_creation_events
(pb.get_pending_diagnostic (),
- reg, NULL,
+ reg, nullptr,
event_loc_info (DECL_SOURCE_LOCATION (decl),
NULL_TREE,
0),
= src_model->get_dynamic_extents (base_reg);
const svalue *new_extents
= dst_model->get_dynamic_extents (base_reg);
- if (old_extents == NULL && new_extents != NULL)
+ if (old_extents == nullptr && new_extents != nullptr)
switch (base_reg->get_kind ())
{
default:
tree var) final override
{
const svalue *var_old_sval
- = m_old_state->m_region_model->get_rvalue (var, NULL);
+ = m_old_state->m_region_model->get_rvalue (var, nullptr);
const sm_state_map *old_smap = m_old_state->m_checker_states[m_sm_idx];
state_machine::state_t current
return;
const svalue *var_new_sval
- = m_new_state->m_region_model->get_rvalue (var, NULL);
+ = m_new_state->m_region_model->get_rvalue (var, nullptr);
const supernode *supernode = m_point->get_supernode ();
int stack_depth = m_point->get_stack_depth ();
if (!assign_stmt)
return NULL_TREE;
if (const svalue *sval
- = m_new_state->m_region_model->get_gassign_result (assign_stmt, NULL))
+ = m_new_state->m_region_model->get_gassign_result (assign_stmt, nullptr))
if (tree cst = sval->maybe_get_constant ())
if (::zerop(cst))
return gimple_assign_lhs (assign_stmt);
{
const extrinsic_state &ext_state = pb.get_ext_state ();
program_state old_state (iter_state);
- iter_state.m_region_model->on_assignment (assign, NULL);
+ iter_state.m_region_model->on_assignment (assign, nullptr);
for (unsigned i = 0; i < ext_state.get_num_checkers (); i++)
{
const state_machine &sm = ext_state.get_sm (i);
= src_model->get_dynamic_extents (base_reg);
const svalue *new_extents
= dst_model->get_dynamic_extents (base_reg);
- if (old_extents == NULL && new_extents != NULL)
+ if (old_extents == nullptr && new_extents != nullptr)
switch (base_reg->get_kind ())
{
default:
const checker_event *ev = path->get_checker_event (idx);
expanded_location idx_exp_loc = expand_location (ev->get_location ());
gcc_assert (ref_exp_loc.file);
- if (idx_exp_loc.file == NULL)
+ if (idx_exp_loc.file == nullptr)
return false;
if (strcmp (ref_exp_loc.file, idx_exp_loc.file))
return false;
= path->get_checker_event (start_idx);
expanded_location start_exp_loc
= expand_location (old_start_ev->get_location ());
- if (start_exp_loc.file == NULL)
+ if (start_exp_loc.file == nullptr)
continue;
if (!same_line_as_p (start_exp_loc, path, start_idx + 1))
continue;
const extrinsic_state &ext_state,
uncertainty_t *uncertainty,
logger *logger)
-: m_eg (NULL), m_logger (logger), m_enode_for_diag (NULL),
- m_old_state (NULL),
+: m_eg (nullptr), m_logger (logger), m_enode_for_diag (nullptr),
+ m_old_state (nullptr),
m_new_state (state),
- m_stmt (NULL),
- m_stmt_finder (NULL),
+ m_stmt (nullptr),
+ m_stmt_finder (nullptr),
m_ext_state (ext_state),
m_uncertainty (uncertainty),
- m_path_ctxt (NULL),
+ m_path_ctxt (nullptr),
m_out_could_have_done_work (nullptr)
{
}
{
LOG_FUNC (get_logger ());
auto curr_stmt_finder = custom_finder ? custom_finder : m_stmt_finder;
- if (m_stmt == NULL && curr_stmt_finder == NULL)
+ if (m_stmt == nullptr && curr_stmt_finder == nullptr)
{
if (get_logger ())
get_logger ()->log ("rejecting diagnostic: no stmt");
const sm_state_map *old_smap,
sm_state_map *new_smap,
path_context *path_ctxt,
- const stmt_finder *stmt_finder = NULL,
+ const stmt_finder *stmt_finder = nullptr,
bool unknown_side_effects = false)
: sm_context (sm_idx, sm),
m_logger (eg.get_logger ()),
tree get_fndecl_for_call (const gcall &call) final override
{
impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
- NULL, &call);
+ (m_eg, m_enode_for_diag, nullptr, nullptr, nullptr/*m_enode->get_state ()*/,
+ nullptr, &call);
region_model *model = m_new_state->m_region_model;
return model->get_fndecl_for_call (call, &old_ctxt);
}
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
- /* Use NULL ctxt on this get_rvalue call to avoid triggering
+ /* Use nullptr ctxt on this get_rvalue call to avoid triggering
uninitialized value warnings. */
const svalue *var_old_sval
- = m_old_state->m_region_model->get_rvalue (var, NULL);
+ = m_old_state->m_region_model->get_rvalue (var, nullptr);
state_machine::state_t current
= m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
logger * const logger = get_logger ();
LOG_FUNC (logger);
const svalue *var_new_sval
- = m_new_state->m_region_model->get_rvalue (var, NULL);
+ = m_new_state->m_region_model->get_rvalue (var, nullptr);
const svalue *origin_new_sval
- = m_new_state->m_region_model->get_rvalue (origin, NULL);
+ = m_new_state->m_region_model->get_rvalue (origin, nullptr);
/* We use the new sval here to avoid issues with uninitialized values. */
state_machine::state_t current
logger * const logger = get_logger ();
LOG_FUNC (logger);
impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
- NULL, stmt);
+ (m_eg, m_enode_for_diag, nullptr, nullptr, nullptr/*m_enode->get_state ()*/,
+ nullptr, stmt);
const svalue *origin_new_sval
- = m_new_state->m_region_model->get_rvalue (origin, NULL);
+ = m_new_state->m_region_model->get_rvalue (origin, nullptr);
state_machine::state_t current
= m_old_smap->get_state (sval, m_eg.get_ext_state ());
LOG_FUNC (get_logger ());
gcc_assert (d);
const svalue *var_old_sval
- = m_old_state->m_region_model->get_rvalue (var, NULL);
+ = m_old_state->m_region_model->get_rvalue (var, nullptr);
state_machine::state_t current
= (var
? m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ())
return expr;
gcc_assert (m_new_state);
- const svalue *sval = m_new_state->m_region_model->get_rvalue (expr, NULL);
+ const svalue *sval = m_new_state->m_region_model->get_rvalue (expr, nullptr);
/* Find trees for all regions storing the value. */
if (tree t = m_new_state->m_region_model->get_representative_tree (sval))
return t;
if (!assign_stmt)
return NULL_TREE;
impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, stmt);
+ (m_eg, m_enode_for_diag, m_old_state, m_new_state, nullptr, nullptr, stmt);
if (const svalue *sval
= m_new_state->m_region_model->get_gassign_result (assign_stmt,
&old_ctxt))
}
gcc_unreachable ();
- return NULL;
+ return nullptr;
}
void update_event_loc_info (event_loc_info &) final override
sm.on_condition (sm_ctxt,
(m_enode_for_diag
? m_enode_for_diag->get_supernode ()
- : NULL),
+ : nullptr),
m_stmt,
lhs, op, rhs);
}
sm.on_bounded_ranges (sm_ctxt,
(m_enode_for_diag
? m_enode_for_diag->get_supernode ()
- : NULL),
+ : nullptr),
m_stmt, sval, ranges);
}
}
= old_state.m_checker_states[sm_idx];
sm_state_map *new_smap = state->m_checker_states[sm_idx];
impl_sm_context sm_ctxt (eg, sm_idx, sm, this, &old_state, state,
- old_smap, new_smap, path_ctxt, NULL,
+ old_smap, new_smap, path_ctxt, nullptr,
unknown_side_effects);
/* Allow the state_machine to handle the stmt. */
stale_jmp_buf (const gcall &setjmp_call, const gcall &longjmp_call,
const program_point &setjmp_point)
: m_setjmp_call (setjmp_call), m_longjmp_call (longjmp_call),
- m_setjmp_point (setjmp_point), m_stack_pop_event (NULL)
+ m_setjmp_point (setjmp_point), m_stack_pop_event (nullptr)
{}
int get_controlling_option () const final override
setjmp_point.get_stack_depth (), ctxt);
/* Detect leaks in the new state relative to the old state. */
- program_state::detect_leaks (get_state (), *new_state, NULL,
+ program_state::detect_leaks (get_state (), *new_state, nullptr,
eg.get_ext_state (), ctxt);
program_point next_point
if (next)
{
exploded_edge *eedge
- = eg.add_edge (const_cast<exploded_node *> (this), next, NULL, true,
+ = eg.add_edge (const_cast<exploded_node *> (this), next, nullptr, true,
std::make_unique<rewind_info_t> (tmp_setjmp_record,
longjmp_call));
if (!next_enode)
return;
- add_edge (iter_enode, next_enode, NULL, false, nullptr);
+ add_edge (iter_enode, next_enode, nullptr, false, nullptr);
return;
}
else
throw_stmt);
program_state::detect_leaks (iter_enode->get_state (),
unwound_state,
- NULL,
+ nullptr,
get_ext_state (), &ctxt);
}
const call_string &cs = iter_enode->get_point ().get_call_string ();
if (!after_unwind_enode)
return;
- add_edge (iter_enode, after_unwind_enode, NULL, true,
+ add_edge (iter_enode, after_unwind_enode, nullptr, true,
std::move (unwind_edge_info));
iter_enode = after_unwind_enode;
}
return;
/* Create custom exploded_edge for a throw. */
- eg.add_edge (this, after_throw_enode, NULL, true,
+ eg.add_edge (this, after_throw_enode, nullptr, true,
std::move (throw_edge_info));
eg.unwind_from_exception (*after_throw_enode, &throw_call, ctxt);
uncertainty_t uncertainty;
impl_region_model_context ctxt (eg, this,
- &old_state, &new_state, &uncertainty, NULL,
+ &old_state, &new_state, &uncertainty, nullptr,
get_stmt ());
- const svalue *result = NULL;
- new_state.m_region_model->pop_frame (NULL, &result, &ctxt, nullptr);
+ const svalue *result = nullptr;
+ new_state.m_region_model->pop_frame (nullptr, &result, &ctxt, nullptr);
program_state::detect_leaks (old_state, new_state, result,
eg.get_ext_state (), &ctxt);
}
model->on_longjmp (get_longjmp_call (),
get_setjmp_call (),
- setjmp_point.get_stack_depth (), NULL);
+ setjmp_point.get_stack_depth (), nullptr);
return true;
}
worklist::worklist (const exploded_graph &eg, const analysis_plan &plan)
: m_scc (eg.get_supergraph (), eg.get_logger ()),
m_plan (plan),
- m_queue (key_t (*this, NULL))
+ m_queue (key_t (*this, nullptr))
{
}
if (flag_analyzer_call_summaries
&& call_string_a.empty_p ()
&& call_string_b.empty_p ()
- && point_a.get_function () != NULL
- && point_b.get_function () != NULL
+ && point_a.get_function () != nullptr
+ && point_b.get_function () != nullptr
&& point_a.get_function () != point_b.get_function ())
{
if (int cmp = ka.m_worklist.m_plan.cmp_function (point_a.get_function (),
ordering). */
const supernode *snode_a = ka.m_enode->get_supernode ();
const supernode *snode_b = kb.m_enode->get_supernode ();
- if (snode_a == NULL)
+ if (snode_a == nullptr)
{
- if (snode_b != NULL)
- /* One is NULL. */
+ if (snode_b != nullptr)
+ /* One is nullptr. */
return -1;
else
- /* Both are NULL. */
+ /* Both are nullptr. */
return 0;
}
- if (snode_b == NULL)
- /* One is NULL. */
+ if (snode_b == nullptr)
+ /* One is nullptr. */
return 1;
- /* Neither are NULL. */
+ /* Neither are nullptr. */
gcc_assert (snode_a && snode_b);
if (snode_a->m_index != snode_b->m_index)
return snode_a->m_index - snode_b->m_index;
{
m_origin = get_or_create_node
(program_point::origin (*ext_state.get_model_manager ()),
- program_state (ext_state), NULL);
+ program_state (ext_state), nullptr);
for (int i = 0; i < m_sg.num_nodes (); i++)
m_PK_AFTER_SUPERNODE_per_snode.quick_push (i);
}
tree param = iter_parm;
if (tree parm_default_ssa = ssa_default_def (fun, iter_parm))
param = parm_default_ssa;
- const region *param_reg = state->m_region_model->get_lvalue (param, NULL);
+ const region *param_reg = state->m_region_model->get_lvalue (param, nullptr);
const svalue *init_sval = mgr->get_or_create_initial_value (param_reg);
smap->set_state (state->m_region_model, init_sval,
- tainted, NULL /*origin_new_sval*/, ext_state);
+ tainted, nullptr /*origin_new_sval*/, ext_state);
if (POINTER_TYPE_P (TREE_TYPE (param)))
{
const region *pointee_reg = mgr->get_symbolic_region (init_sval);
const svalue *init_pointee_sval
= mgr->get_or_create_initial_value (pointee_reg);
smap->set_state (state->m_region_model, init_pointee_sval,
- tainted, NULL /*origin_new_sval*/, ext_state);
+ tainted, nullptr /*origin_new_sval*/, ext_state);
}
}
logger * const logger = get_logger ();
if (logger)
logger->log ("entrypoint for %qE already exists", fun.decl);
- return NULL;
+ return nullptr;
}
program_point point
program_state state (m_ext_state);
state.push_frame (m_ext_state, fun);
- std::unique_ptr<custom_edge_info> edge_info = NULL;
+ std::unique_ptr<custom_edge_info> edge_info = nullptr;
if (lookup_attribute ("tainted_args", DECL_ATTRIBUTES (fun.decl)))
{
}
if (!state.m_valid)
- return NULL;
+ return nullptr;
- exploded_node *enode = get_or_create_node (point, state, NULL);
+ exploded_node *enode = get_or_create_node (point, state, nullptr);
if (!enode)
- return NULL;
+ return nullptr;
- add_edge (m_origin, enode, NULL, false, std::move (edge_info));
+ add_edge (m_origin, enode, nullptr, false, std::move (edge_info));
m_functions_with_enodes.add (key);
{
if (logger)
logger->log ("invalid state; not creating node");
- return NULL;
+ return nullptr;
}
auto_cfun sentinel (point.get_function ());
"terminating analysis for this program point: %s",
pp_formatted_text (&pp));
per_point_data->m_excess_enodes++;
- return NULL;
+ return nullptr;
}
ps.validate (m_ext_state);
}
/* Get this graph's per-program-point-data for POINT if there is any,
- otherwise NULL. */
+ otherwise nullptr. */
per_program_point_data *
exploded_graph::get_per_program_point_data (const program_point &point) const
= const_cast <point_map_t &> (m_per_point_data).get (&point))
return *slot;
- return NULL;
+ return nullptr;
}
/* Ensure that this graph has per-call_string-data for CS;
}
/* Get this graph's per-function-data for FUN if there is any,
- otherwise NULL. */
+ otherwise nullptr. */
per_function_data *
exploded_graph::get_per_function_data (function *fun) const
= const_cast <per_function_data_t &> (m_per_function_data).get (fun))
return *slot;
- return NULL;
+ return nullptr;
}
/* Return true if FUN should be traversed directly, rather than only as
if (!state.m_valid)
return;
- exploded_node *enode = eg->get_or_create_node (point, state, NULL);
+ exploded_node *enode = eg->get_or_create_node (point, state, nullptr);
if (logger)
{
if (enode)
}
}
- eg->add_edge (eg->get_origin (), enode, NULL, false,
+ eg->add_edge (eg->get_origin (), enode, nullptr, false,
std::make_unique<tainted_args_call_info> (field, fndecl, loc));
}
tree init = DECL_INITIAL (decl);
if (!init)
continue;
- walk_tree (&init, add_any_callbacks, this, NULL);
+ walk_tree (&init, add_any_callbacks, this, nullptr);
}
}
if (merged_state == state)
{
/* Then merge node_2 into node by adding an edge. */
- add_edge (node_2, node, NULL, false);
+ add_edge (node_2, node, nullptr, false);
/* Remove node_2 from the worklist. */
m_worklist.take_next ();
/* Then merge node into node_2, and leave node_2
in the worklist, to be processed on the next
iteration. */
- add_edge (node, node_2, NULL, false);
+ add_edge (node, node_2, nullptr, false);
node->set_status (exploded_node::status::merger);
continue;
}
exploded_node *merged_enode
= get_or_create_node (node->get_point (),
merged_state, node);
- if (merged_enode == NULL)
+ if (merged_enode == nullptr)
continue;
if (logger)
m_worklist.add_node (merged_enode);
else
{
- add_edge (node, merged_enode, NULL, false);
+ add_edge (node, merged_enode, nullptr, false);
node->set_status (exploded_node::status::merger);
}
m_worklist.add_node (merged_enode);
else
{
- add_edge (node_2, merged_enode, NULL, false);
+ add_edge (node_2, merged_enode, nullptr, false);
node_2->set_status (exploded_node::status::merger);
}
uncertainty_t uncertainty;
impl_region_model_context ctxt (*this, iter_enode,
&state, next_state,
- &uncertainty, NULL, NULL);
+ &uncertainty, nullptr, nullptr);
const cfg_superedge *last_cfg_superedge
= iter_sedge->dyn_cast_cfg_superedge ();
if (last_cfg_superedge)
= first_item_for_each_merged_state[i]->m_input_enode;
exploded_node *next
= get_or_create_node (next_point, *merged_state, src_enode);
- /* "next" could be NULL; we handle that when adding the edges below. */
+ /* "next" could be nullptr; we handle that when adding the edges below. */
next_enodes.quick_push (next);
if (logger)
{
{
exploded_node *next = next_enodes[it->m_merger_idx];
if (next)
- add_edge (it->m_input_enode, next, NULL,
+ add_edge (it->m_input_enode, next, nullptr,
false); /* no "work" is done during merger. */
it->m_input_enode->set_status (exploded_node::status::bulk_merged);
}
program_point new_point
= program_point::before_supernode (sn_entry,
- NULL,
+ nullptr,
this_point->get_call_string ());
new_point.push_to_call_stack (sn_exit,
next_state,
node);
if (enode)
- add_edge (node,enode, NULL,
+ add_edge (node,enode, nullptr,
false, /* No work is done by the call itself. */
std::make_unique<dynamic_call_info_t> (call));
return true;
{
impl_region_model_context ctxt (*this, node,
&state, &next_state,
- &uncertainty, NULL, NULL);
+ &uncertainty, nullptr, nullptr);
const cfg_superedge *last_cfg_superedge
= point.get_from_edge ()->dyn_cast_cfg_superedge ();
if (last_cfg_superedge)
(node->get_supernode (),
last_cfg_superedge,
&ctxt);
- program_state::detect_leaks (state, next_state, NULL,
+ program_state::detect_leaks (state, next_state, nullptr,
get_ext_state (), &ctxt);
}
program_point next_point (point.get_next ());
exploded_node *next = get_or_create_node (next_point, next_state, node);
if (next)
- add_edge (node, next, NULL,
+ add_edge (node, next, nullptr,
false); /* Assume no work is done at phi nodes. */
}
break;
uncertainty_t uncertainty;
const supernode *snode = point.get_supernode ();
unsigned stmt_idx;
- const gimple *prev_stmt = NULL;
+ const gimple *prev_stmt = nullptr;
for (stmt_idx = point.get_stmt_idx ();
stmt_idx < snode->m_stmts.length ();
stmt_idx++)
{
impl_region_model_context ctxt (*this, node,
&old_state, &next_state,
- &uncertainty, NULL, stmt);
- program_state::detect_leaks (old_state, next_state, NULL,
+ &uncertainty, nullptr, stmt);
+ program_state::detect_leaks (old_state, next_state, nullptr,
get_ext_state (), &ctxt);
}
node->m_num_processed_stmts--;
if (logger)
logger->log ("creating edge to split_enode");
- add_edge (node, split_enode, NULL, could_have_done_work);
+ add_edge (node, split_enode, nullptr, could_have_done_work);
return;
}
else
exploded_node *next
= get_or_create_node (next_point, next_state, node);
if (next)
- add_edge (node, next, NULL, could_have_done_work);
+ add_edge (node, next, nullptr, could_have_done_work);
}
/* If we have custom edge infos, "bifurcate" the state
node, // enode_for_diag
&path_ctxt.get_state_at_bifurcation (),
&bifurcated_new_state,
- NULL, // uncertainty_t *uncertainty
- NULL, // path_context *path_ctxt
+ nullptr, // uncertainty_t *uncertainty
+ nullptr, // path_context *path_ctxt
stmt);
if (edge_info->update_state (&bifurcated_new_state,
- NULL, /* no exploded_edge yet. */
+ nullptr, /* no exploded_edge yet. */
&bifurcation_ctxt))
{
if (exploded_node *next2
node,
&bifurcation_ctxt))
{
- add_edge (node, next2, NULL,
+ add_edge (node, next2, nullptr,
true /* assume that work could be done */,
std::move (edge_info));
}
&state,
&next_state,
&uncertainty,
- NULL,
+ nullptr,
point.get_stmt());
region_model *model = state.m_region_model;
logger);
if (!call_discovered)
{
- /* Check for jump through NULL. */
+ /* Check for jump through nullptr. */
if (tree fn_ptr = gimple_call_fn (&call))
{
const svalue *fn_ptr_sval
const call_string &cs = point.get_call_string ();
program_point next_point
= program_point::before_supernode (cs.get_caller_node (),
- NULL,
+ nullptr,
cs);
program_state next_state (state);
uncertainty_t uncertainty;
next_state,
node);
if (enode)
- add_edge (node, enode, NULL, false,
+ add_edge (node, enode, nullptr, false,
std::make_unique<dynamic_call_info_t> (*call, true));
}
}
}
/* Ensure that this graph has a stats instance for FN, return it.
- FN can be NULL, in which case a stats instances is returned covering
+ FN can be nullptr, in which case a stats instances is returned covering
"functionless" parts of the graph (the origin node). */
stats *
feasibility_state::update_for_stmt (const gimple *stmt)
{
if (const gassign *assign = dyn_cast <const gassign *> (stmt))
- m_model.on_assignment (assign, NULL);
+ m_model.on_assignment (assign, nullptr);
else if (const gasm *asm_stmt = dyn_cast <const gasm *> (stmt))
- m_model.on_asm_stmt (asm_stmt, NULL);
+ m_model.on_asm_stmt (asm_stmt, nullptr);
else if (const gcall *call = dyn_cast <const gcall *> (stmt))
{
- bool unknown_side_effects = m_model.on_call_pre (*call, NULL);
- m_model.on_call_post (*call, unknown_side_effects, NULL);
+ bool unknown_side_effects = m_model.on_call_pre (*call, nullptr);
+ m_model.on_call_post (*call, unknown_side_effects, nullptr);
}
else if (const greturn *return_ = dyn_cast <const greturn *> (stmt))
- m_model.on_return (return_, NULL);
+ m_model.on_return (return_, nullptr);
}
/* Dump this object to PP. */
inline void
pod_hash_traits<function_call_string>::mark_empty (value_type &v)
{
- v.m_fun = NULL;
+ v.m_fun = nullptr;
}
template <>
inline bool
inline bool
pod_hash_traits<function_call_string>::is_empty (value_type v)
{
- return v.m_fun == NULL;
+ return v.m_fun == nullptr;
}
namespace ana {
{
auto_timevar tv (TV_ANALYZER_DUMP);
char *filename
- = concat (dump_base_name, ".eg.txt", NULL);
+ = concat (dump_base_name, ".eg.txt", nullptr);
FILE *outf = fopen (filename, "w");
if (!outf)
error_at (UNKNOWN_LOCATION, "unable to open %qs for writing", filename);
// TODO
viz_callgraph vcg (sg);
- vcg.dump_dot (filename, NULL, viz_callgraph_traits::dump_args_t (eg));
+ vcg.dump_dot (filename, nullptr, viz_callgraph_traits::dump_args_t (eg));
fclose (outf);
}
dump_callgraph (const supergraph &sg, const exploded_graph *eg)
{
auto_timevar tv (TV_ANALYZER_DUMP);
- char *filename = concat (dump_base_name, ".callgraph.dot", NULL);
+ char *filename = concat (dump_base_name, ".callgraph.dot", nullptr);
dump_callgraph (sg, filename, eg);
free (filename);
}
const exploded_graph &eg)
{
auto_timevar tv (TV_ANALYZER_DUMP);
- char *filename = concat (dump_base_name, ".analyzer.json.gz", NULL);
+ char *filename = concat (dump_base_name, ".analyzer.json.gz", nullptr);
gzFile output = gzopen (filename, "w");
if (!output)
{
engine eng (&sg, logger);
- state_purge_map *purge_map = NULL;
+ state_purge_map *purge_map = nullptr;
if (flag_analyzer_state_purge)
purge_map = new state_purge_map (sg, eng.get_model_manager (), logger);
{
/* Dump supergraph pre-analysis. */
auto_timevar tv (TV_ANALYZER_DUMP);
- char *filename = concat (dump_base_name, ".supergraph.dot", NULL);
- supergraph::dump_args_t args ((enum supergraph_dot_flags)0, NULL);
+ char *filename = concat (dump_base_name, ".supergraph.dot", nullptr);
+ supergraph::dump_args_t args ((enum supergraph_dot_flags)0, nullptr);
sg.dump_dot (filename, args);
free (filename);
}
{
auto_timevar tv (TV_ANALYZER_DUMP);
state_purge_annotator a (purge_map);
- char *filename = concat (dump_base_name, ".state-purge.dot", NULL);
+ char *filename = concat (dump_base_name, ".state-purge.dot", nullptr);
supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
sg.dump_dot (filename, args);
free (filename);
{
auto_timevar tv (TV_ANALYZER_DUMP);
char *filename
- = concat (dump_base_name, ".eg.dot", NULL);
+ = concat (dump_base_name, ".eg.dot", nullptr);
exploded_graph::dump_args_t args (eg);
root_cluster c;
eg.dump_dot (filename, &c, args);
{
/* Dump post-analysis form of supergraph. */
auto_timevar tv (TV_ANALYZER_DUMP);
- char *filename = concat (dump_base_name, ".supergraph-eg.dot", NULL);
+ char *filename = concat (dump_base_name, ".supergraph-eg.dot", nullptr);
exploded_graph_annotator a (eg);
supergraph::dump_args_t args ((enum supergraph_dot_flags)0, &a);
sg.dump_dot (filename, args);
}
/* Handle -fdump-analyzer and -fdump-analyzer-stderr. */
-static FILE *dump_fout = NULL;
+static FILE *dump_fout = nullptr;
/* Track if we're responsible for closing dump_fout. */
static bool owns_dump_fout = false;
dump_fout = stderr;
else if (flag_dump_analyzer)
{
- char *dump_filename = concat (dump_base_name, ".analyzer.txt", NULL);
+ char *dump_filename = concat (dump_base_name, ".analyzer.txt", nullptr);
dump_fout = fopen (dump_filename, "w");
free (dump_filename);
if (dump_fout)
location_t saved_input_location = input_location;
{
- log_user the_logger (NULL);
+ log_user the_logger (nullptr);
get_or_create_any_logfile ();
if (dump_fout)
the_logger.set_logger (new logger (dump_fout, 0, 0,
{
fclose (dump_fout);
owns_dump_fout = false;
- dump_fout = NULL;
+ dump_fout = nullptr;
}
/* Restore input_location. Subsequent passes may assume that input_location
path_context *path_ctxt,
const gimple *stmt,
- stmt_finder *stmt_finder = NULL,
+ stmt_finder *stmt_finder = nullptr,
bool *out_could_have_done_work = nullptr);
impl_region_model_context (program_state *state,
const extrinsic_state &ext_state,
uncertainty_t *uncertainty,
- logger *logger = NULL);
+ logger *logger = nullptr);
bool warn (std::unique_ptr<pending_diagnostic> d,
- const stmt_finder *custom_finder = NULL) final override;
+ const stmt_finder *custom_finder = nullptr) final override;
void add_note (std::unique_ptr<pending_note> pn) final override;
void add_event (std::unique_ptr<checker_event> event) final override;
void on_svalue_leak (const svalue *) override;
//private:
const superedge *const m_sedge;
- /* NULL for most edges; will be non-NULL for special cases
+ /* nullptr for most edges; will be non-NULL for special cases
such as an unwind from a longjmp to a setjmp, or when
a signal is delivered to a signal-handler. */
std::unique_ptr<custom_edge_info> m_custom_info;
static inline hashval_t hash (const key_type &k)
{
- gcc_assert (k != NULL);
+ gcc_assert (k != nullptr);
gcc_assert (k != reinterpret_cast<key_type> (1));
return k->hash ();
}
static inline bool equal_keys (const key_type &k1, const key_type &k2)
{
- gcc_assert (k1 != NULL);
- gcc_assert (k2 != NULL);
+ gcc_assert (k1 != nullptr);
+ gcc_assert (k2 != nullptr);
gcc_assert (k1 != reinterpret_cast<key_type> (1));
gcc_assert (k2 != reinterpret_cast<key_type> (1));
if (k1 && k2)
template <typename T>
static inline void mark_empty (T &entry)
{
- entry.m_key = NULL;
+ entry.m_key = nullptr;
}
template <typename T>
static inline bool is_deleted (const T &entry)
template <typename T>
static inline bool is_empty (const T &entry)
{
- return entry.m_key == NULL;
+ return entry.m_key == nullptr;
}
static const bool empty_zero_p = false;
};
static inline hashval_t hash (const key_type &k)
{
- gcc_assert (k != NULL);
+ gcc_assert (k != nullptr);
gcc_assert (k != reinterpret_cast<key_type> (1));
return k->hash ();
}
static inline bool equal_keys (const key_type &k1, const key_type &k2)
{
- gcc_assert (k1 != NULL);
- gcc_assert (k2 != NULL);
+ gcc_assert (k1 != nullptr);
+ gcc_assert (k2 != nullptr);
gcc_assert (k1 != reinterpret_cast<key_type> (1));
gcc_assert (k2 != reinterpret_cast<key_type> (1));
if (k1 && k2)
template <typename T>
static inline void mark_empty (T &entry)
{
- entry.m_key = NULL;
+ entry.m_key = nullptr;
}
template <typename T>
static inline bool is_deleted (const T &entry)
template <typename T>
static inline bool is_empty (const T &entry)
{
- return entry.m_key == NULL;
+ return entry.m_key == nullptr;
}
static const bool empty_zero_p = false;
};
int get_scc_id (const exploded_node *enode) const
{
const supernode *snode = enode->get_supernode ();
- if (snode == NULL)
+ if (snode == nullptr)
return 0;
return m_worklist.m_scc.get_scc_id (snode->m_index);
}
bool add_to_worklist = true);
exploded_edge *add_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge, bool could_do_work,
- std::unique_ptr<custom_edge_info> custom = NULL);
+ std::unique_ptr<custom_edge_info> custom = nullptr);
per_program_point_data *
get_or_create_per_program_point_data (const program_point &);
void dump_to_pp (pretty_printer *pp,
const extrinsic_state *ext_state) const;
void dump (FILE *fp, const extrinsic_state *ext_state) const;
- void dump (const extrinsic_state *ext_state = NULL) const;
+ void dump (const extrinsic_state *ext_state = nullptr) const;
void dump_to_file (const char *filename,
const extrinsic_state &ext_state) const;
typedef shortest_paths<eg_traits, exploded_path> shortest_exploded_paths;
-/* Abstract base class for use when passing NULL as the stmt for
+/* Abstract base class for use when passing nullptr as the stmt for
a possible warning, allowing the choice of stmt to be deferred
until after we have an emission path (and know we're emitting a
warning). */
static void
test_empty ()
{
- function_set fs (NULL, 0);
+ function_set fs (nullptr, 0);
fs.assert_sorted ();
fs.assert_sane ();
ASSERT_FALSE (fs.contains_name_p (""));
: m_prev_entry_enode (prev_entry_enode),
m_new_entry_enode (new_entry_enode),
m_callee_fndecl (callee_fndecl),
- m_prev_entry_event (NULL)
+ m_prev_entry_event (nullptr)
{}
const char *get_kind () const final override
const program_point &dst_point = dst_node->get_point ();
if (eedge.m_dest == m_prev_entry_enode)
{
- gcc_assert (m_prev_entry_event == NULL);
+ gcc_assert (m_prev_entry_event == nullptr);
std::unique_ptr<checker_event> prev_entry_event
= std::make_unique <recursive_function_entry_event>
(dst_point,
bool m_found_conjured_svalues;
};
- const svalue *sval = model.get_rvalue (expr, NULL);
+ const svalue *sval = model.get_rvalue (expr, nullptr);
conjured_svalue_finder v;
sval->accept (&v);
return v.m_found_conjured_svalues;
}
/* Not found. */
- return NULL;
+ return nullptr;
}
/* Given BASE_REG within ENCLOSING_FRAME (such as a function parameter),
const decl_region *decl_reg = (const decl_region *)base_reg;
return equiv_prev_frame->get_region_for_local (mgr,
decl_reg->get_decl (),
- NULL);
+ nullptr);
}
}
}
/* Get the value within the new frame. */
const svalue *new_sval
- = new_model.get_store_value (base_reg, NULL);
+ = new_model.get_store_value (base_reg, nullptr);
/* If any part of the value is UNKNOWN (e.g. due to hitting
complexity limits) assume that it differs from the previous
to the recursion. */
const int old_stack_depth = prev_entry_enode->get_stack_depth ();
if (enclosing_frame->get_stack_depth () < old_stack_depth)
- prev_sval = prev_model.get_store_value (base_reg, NULL);
+ prev_sval = prev_model.get_store_value (base_reg, nullptr);
else
{
/* Ignore bindings within frames below the new entry node. */
equiv_prev_frame,
new_model.get_manager ());
prev_sval
- = prev_model.get_store_value (equiv_prev_base_reg, NULL);
+ = prev_model.get_store_value (equiv_prev_base_reg, nullptr);
}
}
else
- prev_sval = prev_model.get_store_value (base_reg, NULL);
+ prev_sval = prev_model.get_store_value (base_reg, nullptr);
/* If the prev_sval contains UNKNOWN (e.g. due to hitting complexity limits)
assume that it will differ from any new value. */
public:
inlining_iterator (location_t loc)
: m_abstract_origin (LOCATION_BLOCK (loc)),
- m_callsite (UNKNOWN_LOCATION), m_fndecl (NULL),
+ m_callsite (UNKNOWN_LOCATION), m_fndecl (NULL_TREE),
m_next_abstract_origin (NULL)
{
prepare_iteration ();
return;
tree block = m_abstract_origin;
m_callsite = BLOCK_SOURCE_LOCATION (block);
- m_fndecl = NULL;
+ m_fndecl = NULL_TREE;
block = BLOCK_SUPERCONTEXT (block);
while (block && TREE_CODE (block) == BLOCK
&& BLOCK_ABSTRACT_ORIGIN (block))
const svalue *copied_size_sval
= get_copied_size (model, old_size_sval, new_size_sval);
const region *copied_old_reg
- = mgr->get_sized_region (freed_reg, NULL, copied_size_sval);
+ = mgr->get_sized_region (freed_reg, nullptr, copied_size_sval);
const svalue *buffer_content_sval
= model->get_store_value (copied_old_reg, cd.get_ctxt ());
const region *copied_new_reg
- = mgr->get_sized_region (new_reg, NULL, copied_size_sval);
+ = mgr->get_sized_region (new_reg, nullptr, copied_size_sval);
model->set_value (copied_new_reg, buffer_content_sval,
cd.get_ctxt ());
}
}
private:
/* (strlen + 1) of the source string if it has a terminator,
- or NULL for the case where UB would happen before
+ or nullptr for the case where UB would happen before
finding any terminator. */
const svalue *m_num_bytes_with_terminator_sval;
region_model_manager *mgr = cd.get_manager ();
/* Ideally we'd get the size here, and simulate copying the bytes. */
const region *new_reg
- = model->get_or_create_region_for_heap_alloc (NULL, cd.get_ctxt ());
- model->mark_region_as_unknown (new_reg, NULL);
+ = model->get_or_create_region_for_heap_alloc (nullptr, cd.get_ctxt ());
+ model->mark_region_as_unknown (new_reg, nullptr);
if (cd.get_lhs_type ())
{
const svalue *ptr_sval
The call must match all assumptions made by the known_function (such as
e.g. "argument 1's type must be a pointer type").
- Return NULL if no known_function is found, or it does not match the
+ Return nullptr if no known_function is found, or it does not match the
assumption(s). */
const known_function *
if (DECL_CONTEXT (fndecl)
&& TREE_CODE (DECL_CONTEXT (fndecl)) != TRANSLATION_UNIT_DECL)
- return NULL;
+ return nullptr;
if (tree identifier = DECL_NAME (fndecl))
if (const known_function *candidate = get_by_identifier (identifier))
if (candidate->matches_call_types_p (cd))
return candidate;
- return NULL;
+ return nullptr;
}
-/* Get any known_function for IFN, or NULL. */
+/* Get any known_function for IFN, or nullptr. */
const known_function *
known_function_manager::get_internal_fn (enum internal_fn ifn) const
}
/* Get any known_function for NAME, without type-checking.
- Return NULL if there isn't one. */
+ Return nullptr if there isn't one. */
const known_function *
known_function_manager::get_normal_builtin (enum built_in_function name) const
}
/* Get any known_function matching IDENTIFIER, without type-checking.
- Return NULL if there isn't one. */
+ Return nullptr if there isn't one. */
const known_function *
known_function_manager::get_by_identifier (tree identifier) const
if (slot)
return *slot;
else
- return NULL;
+ return nullptr;
}
/* Get any known_function in C++ std:: namespace matching IDENTIFIER, without
const line_map_macro *macro_map = linemap_check_macro (map);
if (fixup_location_in_macro_p (macro_map->macro))
loc = linemap_resolve_location (line_table, loc,
- LRK_MACRO_EXPANSION_POINT, NULL);
+ LRK_MACRO_EXPANSION_POINT, nullptr);
}
return loc;
}
if (m_supernode)
return m_supernode->m_fun;
else
- return NULL;
+ return nullptr;
}
/* Get the gimple stmt for this function_point, if any. */
else if (m_kind == PK_AFTER_SUPERNODE)
return m_supernode->get_last_stmt ();
else
- return NULL;
+ return nullptr;
}
/* Get a location for this function_point, if any. */
function_point
function_point::from_function_entry (const supergraph &sg, const function &fun)
{
- return before_supernode (sg.get_node_for_function_entry (fun), NULL);
+ return before_supernode (sg.get_node_for_function_entry (fun), nullptr);
}
/* Create a function_point representing entering supernode SUPERNODE,
- having reached it via FROM_EDGE (which could be NULL). */
+ having reached it via FROM_EDGE (which could be nullptr). */
function_point
function_point::before_supernode (const supernode *supernode,
const superedge *from_edge)
{
if (from_edge && from_edge->get_kind () != SUPEREDGE_CFG_EDGE)
- from_edge = NULL;
+ from_edge = nullptr;
return function_point (supernode, from_edge, 0, PK_BEFORE_SUPERNODE);
}
program_point
program_point::origin (const region_model_manager &mgr)
{
- return program_point (function_point (NULL, NULL,
+ return program_point (function_point (nullptr, nullptr,
0, PK_ORIGIN),
mgr.get_empty_call_string ());
}
static void
test_function_point_equality ()
{
- const supernode *snode = NULL;
+ const supernode *snode = nullptr;
- function_point a = function_point (snode, NULL, 0,
+ function_point a = function_point (snode, nullptr, 0,
PK_BEFORE_SUPERNODE);
- function_point b = function_point::before_supernode (snode, NULL);
+ function_point b = function_point::before_supernode (snode, nullptr);
ASSERT_EQ (a, b);
}
static void
test_function_point_ordering ()
{
- const supernode *snode = NULL;
+ const supernode *snode = nullptr;
/* Populate an array with various points within the same
snode, in order. */
auto_vec<function_point> points;
- points.safe_push (function_point::before_supernode (snode, NULL));
+ points.safe_push (function_point::before_supernode (snode, nullptr));
points.safe_push (function_point::before_stmt (snode, 0));
points.safe_push (function_point::before_stmt (snode, 1));
points.safe_push (function_point::after_supernode (snode));
{
region_model_manager mgr;
- const supernode *snode = NULL;
+ const supernode *snode = nullptr;
const call_string &cs = mgr.get_empty_call_string ();
- program_point a = program_point::before_supernode (snode, NULL,
+ program_point a = program_point::before_supernode (snode, nullptr,
cs);
- program_point b = program_point::before_supernode (snode, NULL,
+ program_point b = program_point::before_supernode (snode, nullptr,
cs);
ASSERT_EQ (a, b);
static function_point before_stmt (const supernode *supernode,
unsigned stmt_idx)
{
- return function_point (supernode, NULL, stmt_idx, PK_BEFORE_STMT);
+ return function_point (supernode, nullptr, stmt_idx, PK_BEFORE_STMT);
}
static function_point after_supernode (const supernode *supernode)
{
- return function_point (supernode, NULL, 0, PK_AFTER_SUPERNODE);
+ return function_point (supernode, nullptr, 0, PK_AFTER_SUPERNODE);
}
/* Support for hash_map. */
static function_point empty ()
{
- return function_point (NULL, NULL, 0, PK_EMPTY);
+ return function_point (nullptr, nullptr, 0, PK_EMPTY);
}
static function_point deleted ()
{
- return function_point (NULL, NULL, 0, PK_DELETED);
+ return function_point (nullptr, nullptr, 0, PK_DELETED);
}
static int cmp_within_supernode_1 (const function_point &point_a,
private:
program_point (const function_point &fn_point)
: m_function_point (fn_point),
- m_call_string (NULL)
+ m_call_string (nullptr)
{
}
if (m_engine)
return m_engine->get_model_manager ();
else
- return NULL; /* for selftests. */
+ return nullptr; /* for selftests. */
}
/* Try to find a state machine named NAME.
sm_state_map::dump (bool simple) const
{
tree_dump_pretty_printer pp (stderr);
- print (NULL, simple, true, &pp);
+ print (nullptr, simple, true, &pp);
pp_newline (&pp);
}
const svalue *sval = (*iter).first;
entry_t e = (*iter).second;
entry_t *other_slot = const_cast <map_t &> (other.m_map).get (sval);
- if (other_slot == NULL)
+ if (other_slot == nullptr)
return false;
if (e != *other_slot)
return false;
if (slot)
return slot->m_origin;
else
- return NULL;
+ return nullptr;
}
/* Set the state of SID within MODEL to STATE, recording that
const svalue *origin,
const extrinsic_state &ext_state)
{
- if (model == NULL)
+ if (model == nullptr)
return;
/* Reject attempts to set state on UNKNOWN/POISONED. */
for (svalue_set::iterator iter = svals_to_unset.begin ();
iter != svals_to_unset.end (); ++iter)
- impl_set_state (*iter, (state_machine::state_t)0, NULL, ext_state);
+ impl_set_state (*iter, (state_machine::state_t)0, nullptr, ext_state);
}
/* Purge state for things involving SVAL.
for (svalue_set::iterator iter = svals_to_unset.begin ();
iter != svals_to_unset.end (); ++iter)
- impl_set_state (*iter, (state_machine::state_t)0, NULL, ext_state);
+ impl_set_state (*iter, (state_machine::state_t)0, nullptr, ext_state);
}
/* Comparator for imposing an order on sm_state_map instances. */
state_machine::state_t other_state = other.get_state (sval, ext_state);
if (state_machine::state_t merged_state
= sm.maybe_get_merged_state (this_state, other_state))
- (*out)->impl_set_state (sval, merged_state, NULL, ext_state);
+ (*out)->impl_set_state (sval, merged_state, nullptr, ext_state);
else
return false;
}
/* program_state's ctor. */
program_state::program_state (const extrinsic_state &ext_state)
-: m_region_model (NULL),
+: m_region_model (nullptr),
m_checker_states (ext_state.get_num_checkers ()),
m_valid (true)
{
const svalue *caller_origin
= (summary_origin
? r.convert_svalue_from_summary (summary_origin)
- : NULL);
- // caller_origin can be NULL.
+ : nullptr);
+ // caller_origin can be nullptr.
m_map.put (caller_sval, entry_t (kv.second.m_state, caller_origin));
}
m_global_state = summary.m_global_state;
: m_region_model (other.m_region_model),
m_checker_states (other.m_checker_states.length ())
{
- other.m_region_model = NULL;
+ other.m_region_model = nullptr;
int i;
sm_state_map *smap;
return false;
program_state::detect_leaks (enode->get_state (), *this,
- NULL, eg.get_ext_state (),
+ nullptr, eg.get_ext_state (),
&ctxt);
return true;
&enode->get_state (),
this,
uncertainty,
- NULL,
+ nullptr,
last_stmt);
m_region_model->update_for_gcall (call_stmt, &ctxt);
}
&enode->get_state (),
this,
uncertainty,
- NULL,
+ nullptr,
last_stmt);
m_region_model->update_for_return_gcall (call_stmt, &ctxt);
}
temporaries keep the value reachable until the frame is
popped. */
const svalue *sval
- = new_state.m_region_model->get_store_value (reg, NULL);
+ = new_state.m_region_model->get_store_value (reg, nullptr);
if (!new_state.can_purge_p (eg.get_ext_state (), sval)
&& SSA_NAME_VAR (ssa_name))
{
impl_region_model_context ctxt (eg, enode_for_diag,
this,
&new_state,
- uncertainty, NULL,
+ uncertainty, nullptr,
point.get_stmt ());
- detect_leaks (*this, new_state, NULL, eg.get_ext_state (), &ctxt);
+ detect_leaks (*this, new_state, nullptr, eg.get_ext_state (), &ctxt);
}
}
*might* still be reachable in dst_state. */
svalue_set known_src_svalues;
src_state.m_region_model->get_reachable_svalues (&known_src_svalues,
- NULL, NULL);
+ nullptr, nullptr);
svalue_set maybe_dest_svalues;
dest_state.m_region_model->get_reachable_svalues (&maybe_dest_svalues,
extra_sval, uncertainty);
tree y = build_global_decl ("y", integer_type_node);
tree z = build_global_decl ("z", integer_type_node);
- std::unique_ptr<state_machine> sm = make_malloc_state_machine (NULL);
+ std::unique_ptr<state_machine> sm = make_malloc_state_machine (nullptr);
state_machine::state_t start = sm->get_start_state ();
std::vector<std::unique_ptr<state_machine>> checkers;
const state_machine &borrowed_sm = *sm.get ();
const state_machine::state_t TEST_STATE_42 = &test_state_42;
region_model_manager mgr;
region_model model (&mgr);
- const svalue *x_sval = model.get_rvalue (x, NULL);
- const svalue *y_sval = model.get_rvalue (y, NULL);
- const svalue *z_sval = model.get_rvalue (z, NULL);
+ const svalue *x_sval = model.get_rvalue (x, nullptr);
+ const svalue *y_sval = model.get_rvalue (y, nullptr);
+ const svalue *z_sval = model.get_rvalue (z, nullptr);
sm_state_map map (borrowed_sm);
ASSERT_TRUE (map.is_empty_p ());
{
region_model_manager mgr;
region_model model (&mgr);
- const svalue *x_sval = model.get_rvalue (x, NULL);
- const svalue *y_sval = model.get_rvalue (y, NULL);
- const svalue *z_sval = model.get_rvalue (z, NULL);
+ const svalue *x_sval = model.get_rvalue (x, nullptr);
+ const svalue *y_sval = model.get_rvalue (y, nullptr);
+ const svalue *z_sval = model.get_rvalue (z, nullptr);
sm_state_map map (borrowed_sm);
ASSERT_TRUE (map.is_empty_p ());
ASSERT_EQ (map.get_state (x_sval, ext_state), start);
ASSERT_EQ (map.get_state (y_sval, ext_state), start);
- model.add_constraint (x, EQ_EXPR, y, NULL);
+ model.add_constraint (x, EQ_EXPR, y, nullptr);
/* Setting x to a state should also update y, as they
are in the same equivalence class. */
{
region_model_manager mgr;
region_model model (&mgr);
- const svalue *y_sval = model.get_rvalue (y, NULL);
- const svalue *z_sval = model.get_rvalue (z, NULL);
+ const svalue *y_sval = model.get_rvalue (y, nullptr);
+ const svalue *z_sval = model.get_rvalue (z, nullptr);
sm_state_map map0 (borrowed_sm);
sm_state_map map1 (borrowed_sm);
region_model_manager mgr;
region_model model (&mgr);
- const svalue *x_sval = model.get_rvalue (x, NULL);
- const svalue *y_sval = model.get_rvalue (y, NULL);
- const svalue *z_sval = model.get_rvalue (z, NULL);
+ const svalue *x_sval = model.get_rvalue (x, nullptr);
+ const svalue *y_sval = model.get_rvalue (y, nullptr);
+ const svalue *z_sval = model.get_rvalue (z, nullptr);
- map1.impl_set_state (x_sval, TEST_STATE_2, NULL, ext_state);
- map1.impl_set_state (y_sval, TEST_STATE_3, NULL, ext_state);
- map1.impl_set_state (z_sval, TEST_STATE_2, NULL, ext_state);
+ map1.impl_set_state (x_sval, TEST_STATE_2, nullptr, ext_state);
+ map1.impl_set_state (y_sval, TEST_STATE_3, nullptr, ext_state);
+ map1.impl_set_state (z_sval, TEST_STATE_2, nullptr, ext_state);
- map2.impl_set_state (z_sval, TEST_STATE_2, NULL, ext_state);
- map2.impl_set_state (y_sval, TEST_STATE_3, NULL, ext_state);
- map2.impl_set_state (x_sval, TEST_STATE_2, NULL, ext_state);
+ map2.impl_set_state (z_sval, TEST_STATE_2, nullptr, ext_state);
+ map2.impl_set_state (y_sval, TEST_STATE_3, nullptr, ext_state);
+ map2.impl_set_state (x_sval, TEST_STATE_2, nullptr, ext_state);
ASSERT_EQ (map1.hash (), map2.hash ());
ASSERT_EQ (map1, map2);
malloc sm-state, pointing to a region on the heap. */
tree p = build_global_decl ("p", ptr_type_node);
- std::unique_ptr<state_machine> sm = make_malloc_state_machine (NULL);
+ std::unique_ptr<state_machine> sm = make_malloc_state_machine (nullptr);
const state_machine::state_t UNCHECKED_STATE
= sm->get_state_by_name ("unchecked");
const svalue *size_in_bytes
= mgr->get_or_create_unknown_svalue (size_type_node);
const region *new_reg
- = model->get_or_create_region_for_heap_alloc (size_in_bytes, NULL);
+ = model->get_or_create_region_for_heap_alloc (size_in_bytes, nullptr);
const svalue *ptr_sval = mgr->get_ptr_svalue (ptr_type_node, new_reg);
- model->set_value (model->get_lvalue (p, NULL),
- ptr_sval, NULL);
+ model->set_value (model->get_lvalue (p, nullptr),
+ ptr_sval, nullptr);
sm_state_map *smap = s.m_checker_states[0];
- smap->impl_set_state (ptr_sval, UNCHECKED_STATE, NULL, ext_state);
+ smap->impl_set_state (ptr_sval, UNCHECKED_STATE, nullptr, ext_state);
ASSERT_EQ (smap->get_state (ptr_sval, ext_state), UNCHECKED_STATE);
}
program_state s (ext_state);
region_model *model = s.m_region_model;
- const region *p_reg = model->get_lvalue (p, NULL);
- const svalue *str_sval = model->get_rvalue (string_cst_ptr, NULL);
- model->set_value (p_reg, str_sval, NULL);
+ const region *p_reg = model->get_lvalue (p, nullptr);
+ const svalue *str_sval = model->get_rvalue (string_cst_ptr, nullptr);
+ model->set_value (p_reg, str_sval, nullptr);
}
/* Verify that program_states with identical sm-state can be merged,
engine eng;
region_model_manager *mgr = eng.get_model_manager ();
program_point point (program_point::origin (*mgr));
- extrinsic_state ext_state (make_malloc_state_machine (NULL),
+ extrinsic_state ext_state (make_malloc_state_machine (nullptr),
&eng);
program_state s0 (ext_state);
const svalue *size_in_bytes
= mgr->get_or_create_unknown_svalue (size_type_node);
const region *new_reg
- = model0->get_or_create_region_for_heap_alloc (size_in_bytes, NULL);
+ = model0->get_or_create_region_for_heap_alloc (size_in_bytes, nullptr);
const svalue *ptr_sval = mgr->get_ptr_svalue (ptr_type_node, new_reg);
model0->set_value (model0->get_lvalue (p, &ctxt),
ptr_sval, &ctxt);
sm_state_map *smap = s0.m_checker_states[0];
const state_machine::state test_state ("test state", 0);
const state_machine::state_t TEST_STATE = &test_state;
- smap->impl_set_state (ptr_sval, TEST_STATE, NULL, ext_state);
+ smap->impl_set_state (ptr_sval, TEST_STATE, nullptr, ext_state);
ASSERT_EQ (smap->get_state (ptr_sval, ext_state), TEST_STATE);
model0->canonicalize ();
/* Verify that canonicalization preserves sm-state. */
- ASSERT_EQ (smap->get_state (model0->get_rvalue (p, NULL), ext_state),
+ ASSERT_EQ (smap->get_state (model0->get_rvalue (p, nullptr), ext_state),
TEST_STATE);
/* Make a copy of the program_state. */
/* Verify that the merged state has the sm-state for "p". */
region_model *merged_model = merged.m_region_model;
sm_state_map *merged_smap = merged.m_checker_states[0];
- ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, NULL),
+ ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, nullptr),
ext_state),
TEST_STATE);
merged.validate (ext_state);
/* Verify that the merged state still has the sm-state for "p". */
- ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, NULL),
+ ASSERT_EQ (merged_smap->get_state (merged_model->get_rvalue (p, nullptr),
ext_state),
TEST_STATE);
engine eng;
region_model_manager *mgr = eng.get_model_manager ();
program_point point (program_point::origin (*mgr));
- extrinsic_state ext_state (make_signal_state_machine (NULL), &eng);
+ extrinsic_state ext_state (make_signal_state_machine (nullptr), &eng);
const state_machine::state test_state_0 ("test state 0", 0);
const state_machine::state test_state_1 ("test state 1", 1);
public:
extrinsic_state (std::vector<std::unique_ptr<state_machine>> &&checkers,
engine *eng,
- logger *logger = NULL)
+ logger *logger = nullptr)
: m_checkers (std::move (checkers)),
m_logger (logger),
m_engine (eng)
// For use in selftests that use just one state machine
extrinsic_state (std::unique_ptr<state_machine> sm,
engine *eng,
- logger *logger = NULL)
+ logger *logger = nullptr)
: m_logger (logger),
m_engine (eng)
{
{
/* Default ctor needed by hash_map::empty. */
entry_t ()
- : m_state (0), m_origin (NULL)
+ : m_state (0), m_origin (nullptr)
{
}
FOR_EACH_VEC_ELT (m_items, i, it)
if (it->contains_p (offset))
return it;
- return NULL;
+ return nullptr;
}
/* Subroutine of ctor. Add padding item to NEXT_OFFSET if necessary. */
void
region_model::on_asm_stmt (const gasm *stmt, region_model_context *ctxt)
{
- logger *logger = ctxt ? ctxt->get_logger () : NULL;
+ logger *logger = ctxt ? ctxt->get_logger () : nullptr;
LOG_SCOPE (logger);
const unsigned noutputs = gimple_asm_noutputs (stmt);
tree src_expr = input_tvec[i];
const svalue *src_sval = get_rvalue (src_expr, ctxt);
- check_for_poison (src_sval, src_expr, NULL, ctxt);
+ check_for_poison (src_sval, src_expr, nullptr, ctxt);
input_svals.quick_push (src_sval);
reachable_regs.handle_sval (src_sval);
m_root_region (alloc_symbol_id ()),
m_stack_region (alloc_symbol_id (), &m_root_region),
m_heap_region (alloc_symbol_id (), &m_root_region),
- m_unknown_NULL (NULL),
+ m_unknown_NULL (nullptr),
m_checking_feasibility (false),
m_max_complexity (0, 0),
m_code_region (alloc_symbol_id (), &m_root_region),
return get_or_create_int_cst (pointer_type, 0);
}
-/* Return the svalue * for a unknown_svalue for TYPE (which can be NULL),
+/* Return the svalue * for a unknown_svalue for TYPE (which can be NULL_TREE),
creating it if necessary.
The unknown_svalue instances are reused, based on pointer equality
of the types */
/* Subroutine of region_model_manager::get_or_create_unaryop.
Attempt to fold the inputs and return a simpler svalue *.
- Otherwise, return NULL. */
+ Otherwise, return nullptr. */
const svalue *
region_model_manager::maybe_fold_unaryop (tree type, enum tree_code op,
}
}
- return NULL;
+ return nullptr;
}
/* Return the svalue * for an unary operation OP on ARG with a result of
If COMPOUND_SVAL has a value for the appropriate bits, return it,
shifted accordingly.
- Otherwise return NULL. */
+ Otherwise return nullptr. */
const svalue *
region_model_manager::
if (!type)
return nullptr;
if (!INTEGRAL_TYPE_P (type))
- return NULL;
+ return nullptr;
const binding_map &map = compound_sval->get_map ();
unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (cst);
compound_sval has a value for those bits. */
bit_range bits (0, 0);
if (!bit_range::from_mask (mask, &bits))
- return NULL;
+ return nullptr;
bit_range bound_bits (bits);
if (BYTES_BIG_ENDIAN)
= get_store_manager ()->get_concrete_binding (bound_bits);
const svalue *sval = map.get (conc);
if (!sval)
- return NULL;
+ return nullptr;
/* We have a value;
shift it by the correct number of bits. */
/* Subroutine of region_model_manager::get_or_create_binop.
Attempt to fold the inputs and return a simpler svalue *.
- Otherwise, return NULL. */
+ Otherwise, return nullptr. */
const svalue *
region_model_manager::maybe_fold_binop (tree type, enum tree_code op,
if ((type && FLOAT_TYPE_P (type))
|| (arg0->get_type () && FLOAT_TYPE_P (arg0->get_type ()))
|| (arg1->get_type () && FLOAT_TYPE_P (arg1->get_type ())))
- return NULL;
+ return nullptr;
switch (op)
{
/* etc. */
- return NULL;
+ return nullptr;
}
/* Return the svalue * for an binary operation OP on ARG0 and ARG1
}
/* Subroutine of region_model_manager::get_or_create_sub_svalue.
- Return a folded svalue, or NULL. */
+ Return a folded svalue, or nullptr. */
const svalue *
region_model_manager::maybe_fold_sub_svalue (tree type,
if (type)
return get_or_create_cast (type, repeated_sval->get_inner_svalue ());
- return NULL;
+ return nullptr;
}
/* Return the svalue * for extracting a subvalue of type TYPE from
}
/* Subroutine of region_model_manager::get_or_create_repeated_svalue.
- Return a folded svalue, or NULL. */
+ Return a folded svalue, or nullptr. */
const svalue *
region_model_manager::maybe_fold_repeated_svalue (tree type,
if (zerop (cst) && type)
return get_or_create_cast (type, inner_svalue);
- return NULL;
+ return nullptr;
}
/* Return the svalue * of type TYPE in which INNER_SVALUE is repeated
}
break;
}
- return NULL;
+ return nullptr;
}
/* Return the svalue * of type TYPE for extracting BITS from INNER_SVALUE,
}
/* Subroutine of region_model_manager::get_or_create_asm_output_svalue.
- Return a folded svalue, or NULL. */
+ Return a folded svalue, or nullptr. */
const svalue *
region_model_manager::
if (iter->get_kind () == SK_UNKNOWN)
return get_or_create_unknown_svalue (type);
- return NULL;
+ return nullptr;
}
/* Return the svalue * of type TYPE for OUTPUT_IDX of the deterministic
/* Given DATA_CST (a STRING_CST or RAW_DATA_CST) and BYTE_OFFSET_CST a constant,
attempt to get the character at that offset, returning either
- the svalue for the character constant, or NULL if unsuccessful. */
+ the svalue for the character constant, or nullptr if unsuccessful. */
const svalue *
region_model_manager::maybe_get_char_from_cst (tree data_cst,
/* Given STRING_CST, a STRING_CST and BYTE_OFFSET_CST a constant,
attempt to get the character at that offset, returning either
- the svalue for the character constant, or NULL if unsuccessful. */
+ the svalue for the character constant, or nullptr if unsuccessful. */
const svalue *
region_model_manager::maybe_get_char_from_string_cst (tree string_cst,
if (compare_constants (byte_offset_cst,
GE_EXPR,
get_string_cst_size (string_cst)).is_true ())
- return NULL;
+ return nullptr;
int char_val;
if (compare_tree_int (byte_offset_cst,
= build_int_cst_type (TREE_TYPE (TREE_TYPE (string_cst)), char_val);
return get_or_create_constant_svalue (char_cst);
}
- return NULL;
+ return nullptr;
}
/* Given RAW_DATA_CST, a RAW_DATA_CST and BYTE_OFFSET_CST a constant,
attempt to get the character at that offset, returning either
- the svalue for the character constant, or NULL if unsuccessful. */
+ the svalue for the character constant, or nullptr if unsuccessful. */
const svalue *
region_model_manager::maybe_get_char_from_raw_data_cst (tree raw_data_cst,
}
/* Return the frame_region for call to FUN from CALLING_FRAME, creating it
- if necessary. CALLING_FRAME may be NULL. */
+ if necessary. CALLING_FRAME may be nullptr. */
const frame_region *
region_model_manager::get_frame_region (const frame_region *calling_frame,
class region_model_manager
{
public:
- region_model_manager (logger *logger = NULL);
+ region_model_manager (logger *logger = nullptr);
~region_model_manager ();
unsigned get_num_symbols () const { return m_next_symbol_id; }
if (const symbolic_region *sym_reg = base_reg->dyn_cast_symbolic_region ())
{
const svalue *ptr = sym_reg->get_pointer ();
- if (ptr->implicitly_live_p (NULL, m_model))
+ if (ptr->implicitly_live_p (nullptr, m_model))
add (base_reg, true);
switch (ptr->get_kind ())
{
const region *other_base_reg = init_sval_reg->get_base_region ();
const binding_cluster *other_cluster
= m_store->get_cluster (other_base_reg);
- if (other_cluster == NULL
+ if (other_cluster == nullptr
|| !other_cluster->touched_p ())
add (base_reg, true);
}
if (binding_cluster *bind_cluster = m_store->get_cluster (base_reg))
bind_cluster->for_each_value (handle_sval_cb, this);
else
- handle_sval (m_model->get_store_value (reg, NULL));
+ handle_sval (m_model->get_store_value (reg, nullptr));
}
void
const region *reg = iter.first;
const svalue *sval = iter.second;
const svalue * const *other_slot = other.get (reg);
- if (other_slot == NULL)
+ if (other_slot == nullptr)
return false;
if (sval != *other_slot)
return false;
/* Ctor for region_model: construct an "empty" model. */
region_model::region_model (region_model_manager *mgr)
-: m_mgr (mgr), m_store (), m_current_frame (NULL),
+: m_mgr (mgr), m_store (), m_current_frame (nullptr),
m_thrown_exceptions_stack (),
m_caught_exceptions_stack (),
m_dynamic_extents ()
/* Couldn't get state; accept this diagnostic. */
return true;
- const svalue *fsval = emission_model.get_rvalue (m_check_expr, NULL);
+ const svalue *fsval = emission_model.get_rvalue (m_check_expr, nullptr);
/* Check to see if the expr is also poisoned in FNODE (and in the
same way). */
const poisoned_svalue * fspval = fsval->dyn_cast_poisoned_svalue ();
/* If ASSIGN is a stmt that can be modelled via
set_value (lhs_reg, SVALUE, CTXT)
for some SVALUE, get the SVALUE.
- Otherwise return NULL. */
+ Otherwise return nullptr. */
const svalue *
region_model::get_gassign_result (const gassign *assign,
switch (op)
{
default:
- return NULL;
+ return nullptr;
case POINTER_PLUS_EXPR:
{
that implies that the value of the second arg doesn't matter, i.e.
1 for bitwise or, 0 for bitwise and. */
tree other_arg = gimple_assign_rhs1 (use_assign);
- /* Use a NULL ctxt here to avoid generating warnings. */
- const svalue *other_arg_sval = model->get_rvalue (other_arg, NULL);
+ /* Use a nullptr ctxt here to avoid generating warnings. */
+ const svalue *other_arg_sval = model->get_rvalue (other_arg, nullptr);
tree other_arg_cst = other_arg_sval->maybe_get_constant ();
if (!other_arg_cst)
return false;
/* Check for SVAL being poisoned, adding a warning to CTXT.
Return SVAL, or, if a warning is added, another value, to avoid
repeatedly complaining about the same poisoned value in followup code.
- SRC_REGION is a hint about where SVAL came from, and can be NULL. */
+ SRC_REGION is a hint about where SVAL came from, and can be nullptr. */
const svalue *
region_model::check_for_poison (const svalue *sval,
the tree other than via the def stmts, using
fixup_tree_for_diagnostic. */
tree diag_arg = fixup_tree_for_diagnostic (expr);
- if (src_region == NULL && pkind == poison_kind::uninit)
+ if (src_region == nullptr && pkind == poison_kind::uninit)
src_region = get_region_for_poisoned_expr (expr);
/* Can we reliably get the poisoned value from "expr"?
Hence we only query its value now, and only use it if we get the
poisoned value back again. */
tree check_expr = expr;
- const svalue *foo_sval = get_rvalue (expr, NULL);
+ const svalue *foo_sval = get_rvalue (expr, nullptr);
if (foo_sval == sval)
check_expr = expr;
else
- check_expr = NULL;
+ check_expr = nullptr;
if (ctxt->warn
(std::make_unique<poisoned_value_diagnostic> (diag_arg,
pkind,
/* Attempt to get a region for describing EXPR, the source of region of
a poisoned_svalue for use in a poisoned_value_diagnostic.
- Return NULL if there is no good region to use. */
+ Return nullptr if there is no good region to use. */
const region *
region_model::get_region_for_poisoned_expr (tree expr) const
if (decl && DECL_P (decl))
expr = decl;
else
- return NULL;
+ return nullptr;
}
- return get_lvalue (expr, NULL);
+ return get_lvalue (expr, nullptr);
}
/* Update this model for the ASSIGN stmt, using CTXT to report any
if (const svalue *sval = get_gassign_result (assign, ctxt))
{
tree expr = get_diagnostic_tree_for_gassign (assign);
- check_for_poison (sval, expr, NULL, ctxt);
+ check_for_poison (sval, expr, nullptr, ctxt);
set_value (lhs_reg, sval, ctxt);
return;
}
/* e.g. "struct s2 x = {{'A', 'B', 'C', 'D'}};". */
const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
- ctxt ? ctxt->get_uncertainty () : NULL);
+ ctxt ? ctxt->get_uncertainty () : nullptr);
}
break;
}
to set an upper bound on the size of a copy_to_user.
Attempt to simplify such sizes by trying to get the upper bound as a
constant.
- Return the simplified svalue if possible, or NULL otherwise. */
+ Return the simplified svalue if possible, or nullptr otherwise. */
static const svalue *
maybe_simplify_upper_bound (const svalue *num_bytes_sval,
when recording the diagnostic, or note that we're using
the upper bound. */
}
- return NULL;
+ return nullptr;
}
/* Attempt to get an upper bound for the size of a copy when simulating a
that, use the size of SRC_REG if constant.
Return a symbolic value for an upper limit on the number of bytes
- copied, or NULL if no such value could be determined. */
+ copied, or nullptr if no such value could be determined. */
const svalue *
region_model::maybe_get_copy_bounds (const region *src_reg,
return num_bytes_sval;
/* Non-constant: give up. */
- return NULL;
+ return nullptr;
}
/* Get any known_function for FNDECL for call CD.
The call must match all assumptions made by the known_function (such as
e.g. "argument 1's type must be a pointer type").
- Return NULL if no known_function is found, or it does not match the
+ Return nullptr if no known_function is found, or it does not match the
assumption(s). */
const known_function *
return known_fn_mgr->get_match (fndecl, cd);
}
-/* Get any known_function for IFN, or NULL. */
+/* Get any known_function for IFN, or nullptr. */
const known_function *
region_model::get_known_function (enum internal_fn ifn) const
}
/* Get any builtin_known_function for CALL and emit any warning to CTXT
- if not NULL.
+ if not nullptr.
The call must match all assumptions made by the known_function (such as
e.g. "argument 1's type must be a pointer type").
- Return NULL if no builtin_known_function is found, or it does
+ Return nullptr if no builtin_known_function is found, or it does
not match the assumption(s).
Internally calls get_known_function to find a known_function and cast it
const builtin_known_function *
region_model::get_builtin_kf (const gcall &call,
- region_model_context *ctxt /* = NULL */) const
+ region_model_context *ctxt /* = nullptr */) const
{
region_model *mut_this = const_cast <region_model *> (this);
tree callee_fndecl = mut_this->get_fndecl_for_call (call, ctxt);
if (! callee_fndecl)
- return NULL;
+ return nullptr;
call_details cd (call, mut_this, ctxt);
if (const known_function *kf = get_known_function (callee_fndecl, cd))
return kf->dyn_cast_builtin_kf ();
- return NULL;
+ return nullptr;
}
/* Subclass of custom_edge_info for use by exploded_edges that represent
}
}
- uncertainty_t *uncertainty = ctxt ? ctxt->get_uncertainty () : NULL;
+ uncertainty_t *uncertainty = ctxt ? ctxt->get_uncertainty () : nullptr;
/* Purge sm-state for the svalues that were reachable,
both in non-mutable and mutable form. */
setjmp was called. */
gcc_assert (get_stack_depth () >= setjmp_stack_depth);
while (get_stack_depth () > setjmp_stack_depth)
- pop_frame (NULL, NULL, ctxt, nullptr, false);
+ pop_frame (nullptr, nullptr, ctxt, nullptr, false);
gcc_assert (get_stack_depth () == setjmp_stack_depth);
region_model::get_lvalue (path_var pv, region_model_context *ctxt) const
{
if (pv.m_tree == NULL_TREE)
- return NULL;
+ return nullptr;
const region *result_reg = get_lvalue_1 (pv, ctxt);
assert_compat_types (result_reg->get_type (), TREE_TYPE (pv.m_tree));
region_model::get_rvalue (path_var pv, region_model_context *ctxt) const
{
if (pv.m_tree == NULL_TREE)
- return NULL;
+ return nullptr;
const svalue *result_sval = get_rvalue_1 (pv, ctxt);
assert_compat_types (result_sval->get_type (), TREE_TYPE (pv.m_tree));
- result_sval = check_for_poison (result_sval, pv.m_tree, NULL, ctxt);
+ result_sval = check_for_poison (result_sval, pv.m_tree, nullptr, ctxt);
return result_sval;
}
region_model::check_for_writable_region (const region* dest_reg,
region_model_context *ctxt) const
{
- /* Fail gracefully if CTXT is NULL. */
+ /* Fail gracefully if CTXT is nullptr. */
if (!ctxt)
return;
{
tree type = TREE_TYPE (decl);
tree size = TYPE_SIZE (type);
- return get_rvalue (size, NULL);
+ return get_rvalue (size, nullptr);
}
else
{
tree size = decl_init_size (decl, false);
if (size)
- return get_rvalue (size, NULL);
+ return get_rvalue (size, nullptr);
}
}
break;
region_model::check_region_for_read (const region *src_reg,
region_model_context *ctxt) const
{
- return check_region_access (src_reg, access_direction::read, NULL, ctxt);
+ return check_region_access (src_reg, access_direction::read, nullptr, ctxt);
}
/* Concrete subclass for casts of pointers that lead to trailing bytes. */
region_model::check_region_size (const region *lhs_reg, const svalue *rhs_sval,
region_model_context *ctxt) const
{
- if (!ctxt || ctxt->get_stmt () == NULL)
+ if (!ctxt || ctxt->get_stmt () == nullptr)
return;
/* Only report warnings on assignments that actually change the type. */
if (!is_any_cast_p (ctxt->get_stmt ()))
check_region_for_write (lhs_reg, rhs_sval, ctxt);
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
- ctxt ? ctxt->get_uncertainty () : NULL);
+ ctxt ? ctxt->get_uncertainty () : nullptr);
}
/* Set the value of the region given by LHS to the value given by RHS. */
Simulate scanning through the buffer, reading until we find a 0 byte
(equivalent to calling strlen).
- Complain and return NULL if:
+ Complain and return nullptr if:
- the buffer pointed to isn't null-terminated
- the buffer pointed to has any uninitalized bytes before any 0-terminator
- any of the reads aren't within the bounds of the underlying base region
Simulate scanning through the buffer, reading until we find a 0 byte
(equivalent to calling strlen).
- Complain and return NULL if:
+ Complain and return nullptr if:
- the buffer pointed to isn't null-terminated
- the buffer pointed to has any uninitalized bytes before any 0-terminator
- any of the reads aren't within the bounds of the underlying base region
(including the null terminator) if INCLUDE_TERMINATOR is true, or strlen
of the buffer (not including the null terminator) if it is false.
- Also, when returning an svalue, if OUT_SVAL is non-NULL, write to
+ Also, when returning an svalue, if OUT_SVAL is non-nullptr, write to
*OUT_SVAL with an svalue representing the content of the buffer up to
and including the terminator.
svalue_set *visited,
logger *logger) const
{
- if (sval == NULL)
+ if (sval == nullptr)
return path_var (NULL_TREE, 0);
LOG_SCOPE (logger);
break;
}
- if (last_stmt == NULL)
+ if (last_stmt == nullptr)
return true;
/* Apply any constraints for conditionals/switch/computed-goto statements. */
so that pop_frame can determine the region with respect to the
*caller* frame. */
tree lhs = gimple_call_lhs (&call_stmt);
- pop_frame (lhs, NULL, ctxt, &call_stmt);
+ pop_frame (lhs, nullptr, ctxt, &call_stmt);
}
/* Extract calling information from the superedge and update the model for the
std::unique_ptr<rejected_constraint> *out)
{
::edge cfg_edge = sedge.get_cfg_edge ();
- gcc_assert (cfg_edge != NULL);
+ gcc_assert (cfg_edge != nullptr);
gcc_assert (cfg_edge->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE));
enum tree_code op = gimple_cond_code (cond_stmt);
has_nondefault_case_for_value_p (const gswitch *switch_stmt, tree int_cst)
{
/* We expect the initial label to be the default; skip it. */
- gcc_assert (CASE_LOW (gimple_switch_label (switch_stmt, 0)) == NULL);
+ gcc_assert (CASE_LOW (gimple_switch_label (switch_stmt, 0)) == NULL_TREE);
unsigned min_idx = 1;
unsigned max_idx = gimple_switch_num_labels (switch_stmt) - 1;
/* Evaluate the result, within the callee frame. */
tree fndecl = m_current_frame->get_function ().decl;
tree result = DECL_RESULT (fndecl);
- const svalue *retval = NULL;
+ const svalue *retval = nullptr;
if (result
&& TREE_TYPE (result) != void_type_node
&& eval_return_svalue)
class contains_floating_point_visitor : public visitor
{
public:
- contains_floating_point_visitor (const svalue *root_sval) : m_result (NULL)
+ contains_floating_point_visitor (const svalue *root_sval) : m_result (nullptr)
{
root_sval->accept (this);
}
m_dynamic_extents.put (reg, size_in_bytes);
}
-/* Get the recording of REG in bytes, or NULL if no dynamic size was
+/* Get the recording of REG in bytes, or nullptr if no dynamic size was
recorded. */
const svalue *
{
if (const svalue * const *slot = m_dynamic_extents.get (reg))
return *slot;
- return NULL;
+ return nullptr;
}
/* Unset any recorded dynamic size of REG. */
Check that COPIED_SVAL is fully initialized. If not, complain about
an infoleak to CTXT.
- SRC_REG can be NULL; if non-NULL it is used as a hint in the diagnostic
+ SRC_REG can be nullptr; if non-NULL it is used as a hint in the diagnostic
as to where COPIED_SVAL came from. */
void
rejected_op_constraint::dump_to_pp (pretty_printer *pp) const
{
region_model m (m_model);
- const svalue *lhs_sval = m.get_rvalue (m_lhs, NULL);
- const svalue *rhs_sval = m.get_rvalue (m_rhs, NULL);
+ const svalue *lhs_sval = m.get_rvalue (m_lhs, nullptr);
+ const svalue *rhs_sval = m.get_rvalue (m_rhs, nullptr);
lhs_sval->dump_to_pp (pp, true);
pp_printf (pp, " %s ", op_symbol_code (m_op));
rhs_sval->dump_to_pp (pp, true);
rejected_ranges_constraint::dump_to_pp (pretty_printer *pp) const
{
region_model m (m_model);
- const svalue *sval = m.get_rvalue (m_expr, NULL);
+ const svalue *sval = m.get_rvalue (m_expr, nullptr);
sval->dump_to_pp (pp, true);
pp_string (pp, " in ");
m_ranges->dump_to_pp (pp, true);
tree lhs, tree_code op, tree rhs,
tristate expected)
{
- tristate actual = model.eval_condition (lhs, op, rhs, NULL);
+ tristate actual = model.eval_condition (lhs, op, rhs, nullptr);
ASSERT_EQ_AT (loc, actual, expected);
}
TYPE_NAME (t) = get_identifier (name);
TYPE_SIZE (t) = 0;
- tree fieldlist = NULL;
+ tree fieldlist = NULL_TREE;
int i;
tree field;
FOR_EACH_VEC_ELT (*fields, i, field)
region_model_manager mgr;
region_model model (&mgr);
- model.set_value (c_x, int_17, NULL);
- model.set_value (c_y, int_m3, NULL);
+ model.set_value (c_x, int_17, nullptr);
+ model.set_value (c_y, int_m3, nullptr);
/* Verify get_offset for "c.x". */
{
- const region *c_x_reg = model.get_lvalue (c_x, NULL);
+ const region *c_x_reg = model.get_lvalue (c_x, nullptr);
region_offset offset = c_x_reg->get_offset (&mgr);
- ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, NULL));
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, nullptr));
ASSERT_EQ (offset.get_bit_offset (), 0);
}
/* Verify get_offset for "c.y". */
{
- const region *c_y_reg = model.get_lvalue (c_y, NULL);
+ const region *c_y_reg = model.get_lvalue (c_y, nullptr);
region_offset offset = c_y_reg->get_offset (&mgr);
- ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, NULL));
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (c, nullptr));
ASSERT_EQ (offset.get_bit_offset (), INT_TYPE_SIZE);
}
}
tree a_0 = build4 (ARRAY_REF, char_type_node,
a, int_0, NULL_TREE, NULL_TREE);
tree char_A = build_int_cst (char_type_node, 'A');
- model.set_value (a_0, char_A, NULL);
+ model.set_value (a_0, char_A, nullptr);
}
/* Verify that region_model::get_representative_tree works as expected. */
{
tree string_cst = build_string (4, "foo");
region_model m (&mgr);
- const svalue *str_sval = m.get_rvalue (string_cst, NULL);
+ const svalue *str_sval = m.get_rvalue (string_cst, nullptr);
tree rep = m.get_representative_tree (str_sval);
ASSERT_EQ (rep, string_cst);
}
{
tree string_cst_ptr = build_string_literal (4, "foo");
region_model m (&mgr);
- const svalue *str_sval = m.get_rvalue (string_cst_ptr, NULL);
+ const svalue *str_sval = m.get_rvalue (string_cst_ptr, nullptr);
tree rep = m.get_representative_tree (str_sval);
ASSERT_DUMP_TREE_EQ (rep, "&\"foo\"[0]");
}
/* Different types (or the NULL type) should have different
unknown_svalues. */
- const svalue *unknown_NULL_type = mgr.get_or_create_unknown_svalue (NULL);
+ const svalue *unknown_NULL_type = mgr.get_or_create_unknown_svalue (nullptr);
ASSERT_NE (unknown_NULL_type, unknown_int);
/* Repeated calls with NULL for the type should get the same "unknown"
svalue. */
- const svalue *unknown_NULL_type_2 = mgr.get_or_create_unknown_svalue (NULL);
+ const svalue *unknown_NULL_type_2 = mgr.get_or_create_unknown_svalue (nullptr);
ASSERT_EQ (unknown_NULL_type, unknown_NULL_type_2);
}
region_model model (&mgr);
ADD_SAT_CONSTRAINT (model, x, EQ_EXPR, int_0);
ASSERT_CONDITION_UNKNOWN (model, y, EQ_EXPR, int_0);
- model.set_value (model.get_lvalue (y, NULL),
- model.get_rvalue (int_0, NULL),
- NULL);
+ model.set_value (model.get_lvalue (y, nullptr),
+ model.get_rvalue (int_0, nullptr),
+ nullptr);
ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, int_0);
ASSERT_CONDITION_TRUE (model, y, EQ_EXPR, x);
}
region_model_manager mgr;
region_model model (&mgr);
- model.set_value (c_x, int_17, NULL);
- model.set_value (c_y, int_m3, NULL);
+ model.set_value (c_x, int_17, nullptr);
+ model.set_value (c_y, int_m3, nullptr);
/* Copy c to d. */
- const svalue *sval = model.get_rvalue (c, NULL);
- model.set_value (model.get_lvalue (d, NULL), sval, NULL);
+ const svalue *sval = model.get_rvalue (c, nullptr);
+ model.set_value (model.get_lvalue (d, nullptr), sval, nullptr);
/* Check that the fields have the same svalues. */
- ASSERT_EQ (model.get_rvalue (c_x, NULL), model.get_rvalue (d_x, NULL));
- ASSERT_EQ (model.get_rvalue (c_y, NULL), model.get_rvalue (d_y, NULL));
+ ASSERT_EQ (model.get_rvalue (c_x, nullptr), model.get_rvalue (d_x, nullptr));
+ ASSERT_EQ (model.get_rvalue (c_y, nullptr), model.get_rvalue (d_y, nullptr));
}
/* Verify the details of pushing and popping stack frames. */
model.set_value (p_in_globals_reg,
mgr.get_ptr_svalue (ptr_type_node, x_in_child_reg),
&ctxt);
- ASSERT_EQ (p_in_globals_reg->maybe_get_frame_region (), NULL);
+ ASSERT_EQ (p_in_globals_reg->maybe_get_frame_region (), nullptr);
/* Point another global pointer at p: q = &p. */
const region *q_in_globals_reg = model.get_lvalue (q, &ctxt);
ASSERT_FALSE (a_in_parent_reg->descendent_of_p (child_frame_reg));
/* Pop the "child_fn" frame from the stack. */
- model.pop_frame (NULL, NULL, &ctxt, nullptr);
+ model.pop_frame (nullptr, nullptr, &ctxt, nullptr);
ASSERT_FALSE (model.region_exists_p (child_frame_reg));
ASSERT_TRUE (model.region_exists_p (parent_frame_reg));
/* Verify that p (which was pointing at the local "x" in the popped
frame) has been poisoned. */
- const svalue *new_p_sval = model.get_rvalue (p, NULL);
+ const svalue *new_p_sval = model.get_rvalue (p, nullptr);
ASSERT_EQ (new_p_sval->get_kind (), SK_POISONED);
ASSERT_EQ (new_p_sval->dyn_cast_poisoned_svalue ()->get_poison_kind (),
poison_kind::popped_stack);
}
/* ...and that we can lookup lvalues for locals for all frames,
not just the top. */
- ASSERT_EQ (model.get_lvalue (path_var (n, depth), NULL),
+ ASSERT_EQ (model.get_lvalue (path_var (n, depth), nullptr),
parm_regs[depth]);
/* ...and that we can locate the svalues. */
{
/* Verify that setting state in model1 makes the models non-equal. */
tree x = build_global_decl ("x", integer_type_node);
- model0.set_value (x, int_42, NULL);
- ASSERT_EQ (model0.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
+ model0.set_value (x, int_42, nullptr);
+ ASSERT_EQ (model0.get_rvalue (x, nullptr)->maybe_get_constant (), int_42);
ASSERT_NE (model0, model1);
/* Verify the copy-ctor. */
region_model model2 (model0);
ASSERT_EQ (model0, model2);
- ASSERT_EQ (model2.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
+ ASSERT_EQ (model2.get_rvalue (x, nullptr)->maybe_get_constant (), int_42);
ASSERT_NE (model1, model2);
/* Verify that models obtained from copy-ctor are independently editable
w/o affecting the original model. */
- model2.set_value (x, int_17, NULL);
+ model2.set_value (x, int_17, nullptr);
ASSERT_NE (model0, model2);
- ASSERT_EQ (model2.get_rvalue (x, NULL)->maybe_get_constant (), int_17);
- ASSERT_EQ (model0.get_rvalue (x, NULL)->maybe_get_constant (), int_42);
+ ASSERT_EQ (model2.get_rvalue (x, nullptr)->maybe_get_constant (), int_17);
+ ASSERT_EQ (model0.get_rvalue (x, nullptr)->maybe_get_constant (), int_42);
}
/* Verify that region models for
region_model_manager mgr;
region_model model0 (&mgr);
- model0.set_value (model0.get_lvalue (x, NULL),
- model0.get_rvalue (int_42, NULL),
- NULL);
- model0.set_value (model0.get_lvalue (y, NULL),
- model0.get_rvalue (int_113, NULL),
- NULL);
+ model0.set_value (model0.get_lvalue (x, nullptr),
+ model0.get_rvalue (int_42, nullptr),
+ nullptr);
+ model0.set_value (model0.get_lvalue (y, nullptr),
+ model0.get_rvalue (int_113, nullptr),
+ nullptr);
region_model model1 (&mgr);
- model1.set_value (model1.get_lvalue (y, NULL),
- model1.get_rvalue (int_113, NULL),
- NULL);
- model1.set_value (model1.get_lvalue (x, NULL),
- model1.get_rvalue (int_42, NULL),
- NULL);
+ model1.set_value (model1.get_lvalue (y, nullptr),
+ model1.get_rvalue (int_113, nullptr),
+ nullptr);
+ model1.set_value (model1.get_lvalue (x, nullptr),
+ model1.get_rvalue (int_42, nullptr),
+ nullptr);
ASSERT_EQ (model0, model1);
}
region_model_manager mgr;
region_model model0 (&mgr);
- model0.add_constraint (x, GT_EXPR, int_3, NULL);
- model0.add_constraint (y, GT_EXPR, int_42, NULL);
+ model0.add_constraint (x, GT_EXPR, int_3, nullptr);
+ model0.add_constraint (y, GT_EXPR, int_42, nullptr);
region_model model1 (&mgr);
- model1.add_constraint (y, GT_EXPR, int_42, NULL);
- model1.add_constraint (x, GT_EXPR, int_3, NULL);
+ model1.add_constraint (y, GT_EXPR, int_42, nullptr);
+ model1.add_constraint (x, GT_EXPR, int_3, nullptr);
model0.canonicalize ();
model1.canonicalize ();
region_model model (&mgr);
for (tree cst : csts)
- model.get_rvalue (cst, NULL);
+ model.get_rvalue (cst, nullptr);
model.canonicalize ();
}
with values VAL_A and VAL_B for EXPR that they are
mergable. Write the merged model to *OUT_MERGED_MODEL,
and the merged svalue ptr to *OUT_MERGED_SVALUE.
- If VAL_A or VAL_B are NULL_TREE, don't populate EXPR
+ If VAL_A or VAL_B are nullptr_TREE, don't populate EXPR
for that region_model. */
static void
region_model model0 (&mgr);
model0.push_frame (*DECL_STRUCT_FUNCTION (test_fndecl),
nullptr, nullptr, nullptr);
- model0.set_value (model0.get_lvalue (p, NULL),
- model0.get_rvalue (addr_of_a, NULL), NULL);
+ model0.set_value (model0.get_lvalue (p, nullptr),
+ model0.get_rvalue (addr_of_a, nullptr), nullptr);
region_model model1 (model0);
ASSERT_EQ (model0, model1);
const region_svalue *merged_p_ptr
= merged_p_sval->dyn_cast_region_svalue ();
const region *merged_p_star_reg = merged_p_ptr->get_pointee ();
- ASSERT_EQ (merged_p_star_reg, merged.get_lvalue (y, NULL));
+ ASSERT_EQ (merged_p_star_reg, merged.get_lvalue (y, nullptr));
}
/* Pointers: non-NULL ptrs to different globals: should be unknown. */
region_model model0 (&mgr);
model0.push_frame (*DECL_STRUCT_FUNCTION (test_fndecl),
nullptr, nullptr, nullptr);
- const region *q_in_first_frame = model0.get_lvalue (q, NULL);
+ const region *q_in_first_frame = model0.get_lvalue (q, nullptr);
/* Push a second frame. */
const region *reg_2nd_frame
/* Have a pointer in the older frame point to a local in the
more recent frame. */
- const svalue *sval_ptr = model0.get_rvalue (addr_of_a, NULL);
- model0.set_value (q_in_first_frame, sval_ptr, NULL);
+ const svalue *sval_ptr = model0.get_rvalue (addr_of_a, nullptr);
+ model0.set_value (q_in_first_frame, sval_ptr, nullptr);
/* Verify that it's pointing at the newer frame. */
const region *reg_pointee = sval_ptr->maybe_get_region ();
region_model model0 (&mgr);
model0.push_frame (*DECL_STRUCT_FUNCTION (test_fndecl),
nullptr, nullptr, nullptr);
- model0.set_value (model0.get_lvalue (q, NULL),
- model0.get_rvalue (addr_of_y, NULL), NULL);
+ model0.set_value (model0.get_lvalue (q, nullptr),
+ model0.get_rvalue (addr_of_y, nullptr), nullptr);
region_model model1 (model0);
ASSERT_EQ (model0, model1);
/* model0: 0 <= (x == y) < n. */
region_model model0 (&mgr);
model0.add_constraint (x, EQ_EXPR, y, &ctxt);
- model0.add_constraint (x, GE_EXPR, int_0, NULL);
- model0.add_constraint (x, LT_EXPR, n, NULL);
+ model0.add_constraint (x, GE_EXPR, int_0, nullptr);
+ model0.add_constraint (x, LT_EXPR, n, nullptr);
/* model1: z != 5 && (0 <= x < n). */
region_model model1 (&mgr);
- model1.add_constraint (z, NE_EXPR, int_5, NULL);
- model1.add_constraint (x, GE_EXPR, int_0, NULL);
- model1.add_constraint (x, LT_EXPR, n, NULL);
+ model1.add_constraint (z, NE_EXPR, int_5, nullptr);
+ model1.add_constraint (x, GE_EXPR, int_0, nullptr);
+ model1.add_constraint (x, LT_EXPR, n, nullptr);
/* They should be mergeable; the merged constraints should
be: (0 <= x < n). */
const svalue *size_in_bytes
= mgr.get_or_create_unknown_svalue (size_type_node);
const region *reg
- = model.get_or_create_region_for_heap_alloc (size_in_bytes, NULL);
+ = model.get_or_create_region_for_heap_alloc (size_in_bytes, nullptr);
const svalue *sval = mgr.get_ptr_svalue (ptr_type_node, reg);
- model.set_value (model.get_lvalue (p, NULL), sval, NULL);
- model.set_value (q, p, NULL);
+ model.set_value (model.get_lvalue (p, nullptr), sval, nullptr);
+ model.set_value (q, p, nullptr);
ASSERT_CONDITION_UNKNOWN (model, p, NE_EXPR, null_ptr);
ASSERT_CONDITION_UNKNOWN (model, p, EQ_EXPR, null_ptr);
ASSERT_CONDITION_UNKNOWN (model, q, NE_EXPR, null_ptr);
ASSERT_CONDITION_UNKNOWN (model, q, EQ_EXPR, null_ptr);
- model.add_constraint (p, NE_EXPR, null_ptr, NULL);
+ model.add_constraint (p, NE_EXPR, null_ptr, nullptr);
ASSERT_CONDITION_TRUE (model, p, NE_EXPR, null_ptr);
ASSERT_CONDITION_FALSE (model, p, EQ_EXPR, null_ptr);
region_model_manager mgr;
region_model model (&mgr);
- const region *i_reg = model.get_lvalue (i, NULL);
+ const region *i_reg = model.get_lvalue (i, nullptr);
ASSERT_EQ (i_reg->get_kind (), RK_DECL);
/* Reading "i" should give a symbolic "initial value". */
- const svalue *sval_init = model.get_rvalue (i, NULL);
+ const svalue *sval_init = model.get_rvalue (i, nullptr);
ASSERT_EQ (sval_init->get_kind (), SK_INITIAL);
ASSERT_EQ (sval_init->dyn_cast_initial_svalue ()->get_region (), i_reg);
/* ..and doing it again should give the same "initial value". */
- ASSERT_EQ (model.get_rvalue (i, NULL), sval_init);
+ ASSERT_EQ (model.get_rvalue (i, nullptr), sval_init);
/* "i = 17;". */
- model.set_value (i, int_17, NULL);
- ASSERT_EQ (model.get_rvalue (i, NULL),
- model.get_rvalue (int_17, NULL));
+ model.set_value (i, int_17, nullptr);
+ ASSERT_EQ (model.get_rvalue (i, nullptr),
+ model.get_rvalue (int_17, nullptr));
/* "i = -3;". */
- model.set_value (i, int_m3, NULL);
- ASSERT_EQ (model.get_rvalue (i, NULL),
- model.get_rvalue (int_m3, NULL));
+ model.set_value (i, int_m3, nullptr);
+ ASSERT_EQ (model.get_rvalue (i, nullptr),
+ model.get_rvalue (int_m3, nullptr));
/* Verify get_offset for "i". */
{
region_model_manager mgr;
region_model model (&mgr);
/* "arr[0] = 17;". */
- model.set_value (arr_0, int_17, NULL);
+ model.set_value (arr_0, int_17, nullptr);
/* "arr[1] = -3;". */
- model.set_value (arr_1, int_m3, NULL);
+ model.set_value (arr_1, int_m3, nullptr);
- ASSERT_EQ (model.get_rvalue (arr_0, NULL), model.get_rvalue (int_17, NULL));
- ASSERT_EQ (model.get_rvalue (arr_1, NULL), model.get_rvalue (int_m3, NULL));
+ ASSERT_EQ (model.get_rvalue (arr_0, nullptr),
+ model.get_rvalue (int_17, nullptr));
+ ASSERT_EQ (model.get_rvalue (arr_1, nullptr),
+ model.get_rvalue (int_m3, nullptr));
/* Overwrite a pre-existing binding: "arr[1] = 42;". */
- model.set_value (arr_1, int_42, NULL);
- ASSERT_EQ (model.get_rvalue (arr_1, NULL), model.get_rvalue (int_42, NULL));
+ model.set_value (arr_1, int_42, nullptr);
+ ASSERT_EQ (model.get_rvalue (arr_1, nullptr),
+ model.get_rvalue (int_42, nullptr));
/* Verify get_offset for "arr[0]". */
{
- const region *arr_0_reg = model.get_lvalue (arr_0, NULL);
+ const region *arr_0_reg = model.get_lvalue (arr_0, nullptr);
region_offset offset = arr_0_reg->get_offset (&mgr);
- ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, NULL));
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, nullptr));
ASSERT_EQ (offset.get_bit_offset (), 0);
}
/* Verify get_offset for "arr[1]". */
{
- const region *arr_1_reg = model.get_lvalue (arr_1, NULL);
+ const region *arr_1_reg = model.get_lvalue (arr_1, nullptr);
region_offset offset = arr_1_reg->get_offset (&mgr);
- ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, NULL));
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, nullptr));
ASSERT_EQ (offset.get_bit_offset (), INT_TYPE_SIZE);
}
/* Verify get_offset for "arr[i]". */
{
- const region *arr_i_reg = model.get_lvalue (arr_i, NULL);
+ const region *arr_i_reg = model.get_lvalue (arr_i, nullptr);
region_offset offset = arr_i_reg->get_offset (&mgr);
- ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, NULL));
+ ASSERT_EQ (offset.get_base_region (), model.get_lvalue (arr, nullptr));
const svalue *offset_sval = offset.get_symbolic_byte_offset ();
if (const svalue *cast = offset_sval->maybe_undo_cast ())
offset_sval = cast;
}
/* "arr[i] = i;" - this should remove the earlier bindings. */
- model.set_value (arr_i, i, NULL);
- ASSERT_EQ (model.get_rvalue (arr_i, NULL), model.get_rvalue (i, NULL));
- ASSERT_EQ (model.get_rvalue (arr_0, NULL)->get_kind (), SK_UNKNOWN);
+ model.set_value (arr_i, i, nullptr);
+ ASSERT_EQ (model.get_rvalue (arr_i, nullptr), model.get_rvalue (i, nullptr));
+ ASSERT_EQ (model.get_rvalue (arr_0, nullptr)->get_kind (), SK_UNKNOWN);
/* "arr[0] = 17;" - this should remove the arr[i] binding. */
- model.set_value (arr_0, int_17, NULL);
- ASSERT_EQ (model.get_rvalue (arr_0, NULL), model.get_rvalue (int_17, NULL));
- ASSERT_EQ (model.get_rvalue (arr_i, NULL)->get_kind (), SK_UNKNOWN);
+ model.set_value (arr_0, int_17, nullptr);
+ ASSERT_EQ (model.get_rvalue (arr_0, nullptr),
+ model.get_rvalue (int_17, nullptr));
+ ASSERT_EQ (model.get_rvalue (arr_i, nullptr)->get_kind (), SK_UNKNOWN);
}
/* Smoketest of dereferencing a pointer via MEM_REF. */
region_model model (&mgr);
/* "x = 17;". */
- model.set_value (x, int_17, NULL);
+ model.set_value (x, int_17, nullptr);
/* "p = &x;". */
- model.set_value (p, addr_of_x, NULL);
+ model.set_value (p, addr_of_x, nullptr);
- const svalue *sval = model.get_rvalue (star_p, NULL);
+ const svalue *sval = model.get_rvalue (star_p, nullptr);
ASSERT_EQ (sval->maybe_get_constant (), int_17);
}
region_model m (&mgr);
tree int_42 = build_int_cst (integer_type_node, 42);
- m.set_value (mem_ref, int_42, NULL);
- ASSERT_EQ (m.get_rvalue (mem_ref, NULL)->maybe_get_constant (), int_42);
+ m.set_value (mem_ref, int_42, nullptr);
+ ASSERT_EQ (m.get_rvalue (mem_ref, nullptr)->maybe_get_constant (), int_42);
}
/* Verify that malloc works. */
/* Verify that the pointers to the alloca region are replaced by
poisoned values when the frame is popped. */
- model.pop_frame (NULL, NULL, &ctxt, nullptr);
- ASSERT_EQ (model.get_rvalue (p, NULL)->get_kind (), SK_POISONED);
+ model.pop_frame (nullptr, nullptr, &ctxt, nullptr);
+ ASSERT_EQ (model.get_rvalue (p, nullptr)->get_kind (), SK_POISONED);
}
/* Verify that svalue::involves_p works. */
std::unique_ptr<rejected_constraint> *out);
void update_for_gcall (const gcall &call_stmt,
- region_model_context *ctxt,
- function *callee = NULL);
+ region_model_context *ctxt,
+ function *callee = nullptr);
void update_for_return_gcall (const gcall &call_stmt,
- region_model_context *ctxt);
+ region_model_context *ctxt);
const region *push_frame (const function &fun,
const gcall *call_stmt,
bool can_merge_with_p (const region_model &other_model,
const program_point &point,
region_model *out_model,
- const extrinsic_state *ext_state = NULL,
- const program_state *state_a = NULL,
- const program_state *state_b = NULL) const;
+ const extrinsic_state *ext_state = nullptr,
+ const program_state *state_a = nullptr,
+ const program_state *state_b = nullptr) const;
tree get_fndecl_for_call (const gcall &call,
region_model_context *ctxt);
const builtin_known_function *
get_builtin_kf (const gcall &call,
- region_model_context *ctxt = NULL) const;
+ region_model_context *ctxt = nullptr) const;
static void
register_pop_frame_callback (const pop_frame_callback &callback)
Return true if the diagnostic was stored, or false if it was deleted.
Optionally provide a custom stmt_finder. */
virtual bool warn (std::unique_ptr<pending_diagnostic> d,
- const stmt_finder *custom_finder = NULL) = 0;
+ const stmt_finder *custom_finder = nullptr) = 0;
/* Hook for clients to add a note to the last previously stored
pending diagnostic. */
const state_machine **out_sm,
unsigned *out_sm_idx)
{
- return get_state_map_by_name ("malloc", out_smap, out_sm, out_sm_idx, NULL);
+ return get_state_map_by_name ("malloc", out_smap, out_sm, out_sm_idx,
+ nullptr);
}
bool get_taint_map (sm_state_map **out_smap,
const state_machine **out_sm,
unsigned *out_sm_idx)
{
- return get_state_map_by_name ("taint", out_smap, out_sm, out_sm_idx, NULL);
+ return get_state_map_by_name ("taint", out_smap, out_sm, out_sm_idx,
+ nullptr);
}
bool possibly_tainted_p (const svalue *sval);
void on_svalue_leak (const svalue *) override {}
void on_liveness_change (const svalue_set &,
const region_model *) override {}
- logger *get_logger () override { return NULL; }
+ logger *get_logger () override { return nullptr; }
void on_condition (const svalue *lhs ATTRIBUTE_UNUSED,
enum tree_code op ATTRIBUTE_UNUSED,
const svalue *rhs ATTRIBUTE_UNUSED) override
void on_escaped_function (tree) override {}
- uncertainty_t *get_uncertainty () override { return NULL; }
+ uncertainty_t *get_uncertainty () override { return nullptr; }
void purge_state_involving (const svalue *sval ATTRIBUTE_UNUSED) override {}
void bifurcate (std::unique_ptr<custom_edge_info> info) override;
void terminate_path () override;
- const extrinsic_state *get_ext_state () const override { return NULL; }
+ const extrinsic_state *get_ext_state () const override { return nullptr; }
bool get_state_map_by_name (const char *,
sm_state_map **,
return false;
}
- const gimple *get_stmt () const override { return NULL; }
- const exploded_graph *get_eg () const override { return NULL; }
+ const gimple *get_stmt () const override { return nullptr; }
+ const exploded_graph *get_eg () const override { return nullptr; }
const program_state *get_state () const override { return nullptr; }
void maybe_did_work () override {}
class engine
{
public:
- engine (const supergraph *sg = NULL, logger *logger = NULL);
+ engine (const supergraph *sg = nullptr, logger *logger = nullptr);
const supergraph *get_supergraph () { return m_sg; }
region_model_manager *get_model_manager () { return &m_mgr; }
known_function_manager *get_known_function_manager ()
#define ADD_SAT_CONSTRAINT(MODEL, LHS, OP, RHS) \
SELFTEST_BEGIN_STMT \
- bool sat = (MODEL).add_constraint (LHS, OP, RHS, NULL); \
+ bool sat = (MODEL).add_constraint (LHS, OP, RHS, nullptr); \
ASSERT_TRUE (sat); \
SELFTEST_END_STMT
#define ADD_UNSAT_CONSTRAINT(MODEL, LHS, OP, RHS) \
SELFTEST_BEGIN_STMT \
- bool sat = (MODEL).add_constraint (LHS, OP, RHS, NULL); \
+ bool sat = (MODEL).add_constraint (LHS, OP, RHS, nullptr); \
ASSERT_FALSE (sat); \
SELFTEST_END_STMT
}
/* An svalue that matches the pattern (BASE * FACTOR) + OFFSET
- where FACTOR or OFFSET could be the identity (represented as NULL). */
+ where FACTOR or OFFSET could be the identity (represented as nullptr). */
struct linear_op
{
{
*out = linear_op (binop_sval.get_arg0 (),
binop_sval.get_arg1 (),
- NULL);
+ nullptr);
return true;
}
else if (binop_sval.get_op () == PLUS_EXPR)
}
*out = linear_op (binop_sval.get_arg0 (),
- NULL,
+ nullptr,
binop_sval.get_arg1 ());
return true;
}
const svalue &a_sval = *a.get_symbolic_byte_offset ();
const svalue &b_sval = *b.get_symbolic_byte_offset ();
- linear_op op_a (NULL, NULL, NULL);
- linear_op op_b (NULL, NULL, NULL);
+ linear_op op_a (nullptr, nullptr, nullptr);
+ linear_op op_b (nullptr, nullptr, nullptr);
if (linear_op::from_svalue (a_sval, &op_a)
&& linear_op::from_svalue (b_sval, &op_b))
{
const svalue &a_sval = *a.get_symbolic_byte_offset ();
const svalue &b_sval = *b.get_symbolic_byte_offset ();
- linear_op op_a (NULL, NULL, NULL);
- linear_op op_b (NULL, NULL, NULL);
+ linear_op op_a (nullptr, nullptr, nullptr);
+ linear_op op_b (nullptr, nullptr, nullptr);
if (linear_op::from_svalue (a_sval, &op_a)
&& linear_op::from_svalue (b_sval, &op_b))
{
}
/* If this region is a frame_region, or a descendent of one, return it.
- Otherwise return NULL. */
+ Otherwise return nullptr. */
const frame_region *
region::maybe_get_frame_region () const
return frame_reg;
iter = iter->get_parent_region ();
}
- return NULL;
+ return nullptr;
}
/* Get the memory space of this region. */
}
/* If this region is a decl_region, return the decl.
- Otherwise return NULL. */
+ Otherwise return NULL_TREE. */
tree
region::maybe_get_decl () const
{
gcc_assert (TREE_CODE (record_type) == RECORD_TYPE);
if (bit_offset < 0)
- return NULL;
+ return nullptr;
/* Find the first field that has an offset > BIT_OFFSET,
then return the one preceding it.
{
const region *iter_region = this;
bit_offset_t accum_bit_offset = 0;
- const svalue *accum_byte_sval = NULL;
+ const svalue *accum_byte_sval = nullptr;
while (iter_region)
{
region::region (complexity c, symbol::id_t id, const region *parent, tree type)
: symbol (c, id),
m_parent (parent), m_type (type),
- m_cached_offset (NULL), m_cached_init_sval_at_main (NULL)
+ m_cached_offset (nullptr), m_cached_init_sval_at_main (nullptr)
{
gcc_assert (type == NULL_TREE || TYPE_P (type));
}
/* root_region's ctor. */
root_region::root_region (symbol::id_t id)
-: region (complexity (1, 1), id, NULL, NULL_TREE)
+: region (complexity (1, 1), id, nullptr, NULL_TREE)
{
}
int
decl_region::get_stack_depth () const
{
- if (get_parent_region () == NULL)
+ if (get_parent_region () == nullptr)
return 0;
if (const frame_region *frame_reg
= get_parent_region ()->dyn_cast_frame_region ())
/* If the underlying decl is in the global constant pool,
return an svalue representing the constant value.
- Otherwise return NULL. */
+ Otherwise return nullptr. */
const svalue *
decl_region::maybe_get_constant_value (region_model_manager *mgr) const
&& DECL_INITIAL (m_decl)
&& TREE_CODE (DECL_INITIAL (m_decl)) == CONSTRUCTOR)
return get_svalue_for_constructor (DECL_INITIAL (m_decl), mgr);
- return NULL;
+ return nullptr;
}
/* Implementation of decl_region::get_svalue_for_constructor
"main" (either based on DECL_INITIAL, or implicit initialization to
zero.
- Return NULL if there is a problem. */
+ Return nullptr if there is a problem. */
const svalue *
decl_region::get_svalue_for_initializer (region_model_manager *mgr) const
/* If we have an "extern" decl then there may be an initializer in
another TU. */
if (DECL_EXTERNAL (m_decl))
- return NULL;
+ return nullptr;
if (empty_p ())
- return NULL;
+ return nullptr;
/* Implicit initialization to zero; use a compound_svalue for it.
Doing so requires that we have a concrete binding for this region,
const binding_key *binding
= binding_key::make (mgr->get_store_manager (), this);
if (binding->symbolic_p ())
- return NULL;
+ return nullptr;
/* If we don't care about tracking the content of this region, then
it's unused, and the value doesn't matter. */
if (!tracked_p ())
- return NULL;
+ return nullptr;
binding_cluster c (this);
c.zero_fill_region (mgr->get_store_manager (), this);
/* LTO can write out error_mark_node as the DECL_INITIAL for simple scalar
values (to avoid writing out an extra section). */
if (init == error_mark_node)
- return NULL;
+ return nullptr;
if (TREE_CODE (init) == CONSTRUCTOR)
return get_svalue_for_constructor (init, mgr);
/* Reuse the get_rvalue logic from region_model. */
region_model m (mgr);
- return m.get_rvalue (path_var (init, 0), NULL);
+ return m.get_rvalue (path_var (init, 0), nullptr);
}
/* Subroutine of symnode_requires_tracking_p; return true if REF
if (ref->use != IPA_REF_ADDR)
return true;
- if (ref->stmt == NULL)
+ if (ref->stmt == nullptr)
return true;
switch (ref->stmt->code)
case GIMPLE_CALL:
{
cgraph_node *caller_cnode = dyn_cast <cgraph_node *> (ref->referring);
- if (caller_cnode == NULL)
+ if (caller_cnode == nullptr)
return true;
cgraph_edge *edge = caller_cnode->get_edge (ref->stmt);
if (!edge)
return true;
- if (edge->callee == NULL)
+ if (edge->callee == nullptr)
return true; /* e.g. call through function ptr. */
if (edge->callee->definition)
return true;
if (symnode->externally_visible)
return true;
tree context_fndecl = DECL_CONTEXT (symnode->decl);
- if (context_fndecl == NULL)
+ if (context_fndecl == nullptr)
return true;
if (TREE_CODE (context_fndecl) != FUNCTION_DECL)
return true;
virtual enum region_kind get_kind () const = 0;
virtual const frame_region *
- dyn_cast_frame_region () const { return NULL; }
+ dyn_cast_frame_region () const { return nullptr; }
virtual const function_region *
- dyn_cast_function_region () const { return NULL; }
+ dyn_cast_function_region () const { return nullptr; }
virtual const symbolic_region *
- dyn_cast_symbolic_region () const { return NULL; }
+ dyn_cast_symbolic_region () const { return nullptr; }
virtual const decl_region *
- dyn_cast_decl_region () const { return NULL; }
+ dyn_cast_decl_region () const { return nullptr; }
virtual const field_region *
- dyn_cast_field_region () const { return NULL; }
+ dyn_cast_field_region () const { return nullptr; }
virtual const element_region *
- dyn_cast_element_region () const { return NULL; }
+ dyn_cast_element_region () const { return nullptr; }
virtual const offset_region *
- dyn_cast_offset_region () const { return NULL; }
+ dyn_cast_offset_region () const { return nullptr; }
virtual const sized_region *
- dyn_cast_sized_region () const { return NULL; }
+ dyn_cast_sized_region () const { return nullptr; }
virtual const cast_region *
- dyn_cast_cast_region () const { return NULL; }
+ dyn_cast_cast_region () const { return nullptr; }
virtual const string_region *
- dyn_cast_string_region () const { return NULL; }
+ dyn_cast_string_region () const { return nullptr; }
virtual const bit_range_region *
- dyn_cast_bit_range_region () const { return NULL; }
+ dyn_cast_bit_range_region () const { return nullptr; }
virtual const var_arg_region *
- dyn_cast_var_arg_region () const { return NULL; }
+ dyn_cast_var_arg_region () const { return nullptr; }
virtual void accept (visitor *v) const;
key_t (const frame_region *calling_frame, const function &fun)
: m_calling_frame (calling_frame), m_fun (&fun)
{
- /* calling_frame can be NULL. */
+ /* calling_frame can be nullptr. */
}
hashval_t hash () const
}
void mark_deleted () { m_fun = reinterpret_cast<function *> (1); }
- void mark_empty () { m_fun = NULL; }
+ void mark_empty () { m_fun = nullptr; }
bool is_deleted () const
{
return m_fun == reinterpret_cast<function *> (1);
}
- bool is_empty () const { return m_fun == NULL; }
+ bool is_empty () const { return m_fun == nullptr; }
const frame_region *m_calling_frame;
const function *m_fun;
}
void mark_deleted () { m_sval_ptr = reinterpret_cast<const svalue *> (1); }
- void mark_empty () { m_sval_ptr = NULL; }
+ void mark_empty () { m_sval_ptr = nullptr; }
bool is_deleted () const
{
return m_sval_ptr == reinterpret_cast<const svalue *> (1);
}
- bool is_empty () const { return m_sval_ptr == NULL; }
+ bool is_empty () const { return m_sval_ptr == nullptr; }
const region *m_parent;
const svalue *m_sval_ptr;
decl_region (symbol::id_t id, const region *parent, tree decl)
: region (complexity (parent), id, parent, TREE_TYPE (decl)), m_decl (decl),
m_tracked (calc_tracked_p (decl)),
- m_ctor_svalue (NULL)
+ m_ctor_svalue (nullptr)
{}
enum region_kind get_kind () const final override { return RK_DECL; }
}
void mark_deleted () { m_index = reinterpret_cast<const svalue *> (1); }
- void mark_empty () { m_index = NULL; }
+ void mark_empty () { m_index = nullptr; }
bool is_deleted () const
{
return m_index == reinterpret_cast<const svalue *> (1);
}
- bool is_empty () const { return m_index == NULL; }
+ bool is_empty () const { return m_index == nullptr; }
const region *m_parent;
tree m_element_type;
}
void mark_deleted () { m_byte_offset = reinterpret_cast<const svalue *> (1); }
- void mark_empty () { m_byte_offset = NULL; }
+ void mark_empty () { m_byte_offset = nullptr; }
bool is_deleted () const
{
return m_byte_offset == reinterpret_cast<const svalue *> (1);
}
- bool is_empty () const { return m_byte_offset == NULL; }
+ bool is_empty () const { return m_byte_offset == nullptr; }
const region *m_parent;
tree m_element_type;
}
void mark_deleted () { m_byte_size_sval = reinterpret_cast<const svalue *> (1); }
- void mark_empty () { m_byte_size_sval = NULL; }
+ void mark_empty () { m_byte_size_sval = nullptr; }
bool is_deleted () const
{
return m_byte_size_sval == reinterpret_cast<const svalue *> (1);
}
- bool is_empty () const { return m_byte_size_sval == NULL; }
+ bool is_empty () const { return m_byte_size_sval == nullptr; }
const region *m_parent;
tree m_element_type;
}
void mark_deleted () { m_parent = reinterpret_cast<const region *> (1); }
- void mark_empty () { m_parent = NULL; }
+ void mark_empty () { m_parent = nullptr; }
bool is_deleted () const
{
return m_parent == reinterpret_cast<const region *> (1);
}
- bool is_empty () const { return m_parent == NULL; }
+ bool is_empty () const { return m_parent == nullptr; }
const region *m_parent;
tree m_type;
{
m_parent = reinterpret_cast<const frame_region *> (1);
}
- void mark_empty () { m_parent = NULL; }
+ void mark_empty () { m_parent = nullptr; }
bool is_deleted () const
{
return m_parent == reinterpret_cast<const frame_region *> (1);
}
- bool is_empty () const { return m_parent == NULL; }
+ bool is_empty () const { return m_parent == nullptr; }
const frame_region *m_parent;
unsigned m_idx;
/* State for a file descriptor that we do not want to track anymore . */
state_t m_stop;
- /* Stashed constant values from the frontend. These could be NULL. */
+ /* Stashed constant values from the frontend. These could be NULL_TREE. */
tree m_O_ACCMODE;
tree m_O_RDONLY;
tree m_O_WRONLY;
const svalue *fd_sval,
const supernode *node,
state_t old_state,
- bool *complained = NULL) const;
+ bool *complained = nullptr) const;
bool check_for_new_socket_fd (const call_details &cd,
bool successful,
sm_context &sm_ctxt,
fd_param_diagnostic (const fd_state_machine &sm, tree arg, tree callee_fndecl)
: fd_diagnostic (sm, arg), m_callee_fndecl (callee_fndecl),
- m_attr_name (NULL), m_arg_idx (-1)
+ m_attr_name (nullptr), m_arg_idx (-1)
{
}
return m_unchecked_read_only;
else
gcc_unreachable ();
- return NULL;
+ return nullptr;
}
void
const svalue *fd_sval,
const extrinsic_state &ext_state) const
{
- smap->set_state (model, fd_sval, m_valid_read_write, NULL, ext_state);
+ smap->set_state (model, fd_sval, m_valid_read_write, nullptr, ext_state);
}
bool
if (successful)
{
- state_t next_state = NULL;
+ state_t next_state = nullptr;
if (old_state == m_new_stream_socket)
next_state = m_bound_stream_socket;
else if (old_state == m_new_datagram_socket)
if (successful)
{
model->update_for_zero_return (cd, true);
- state_t next_state = NULL;
+ state_t next_state = nullptr;
if (old_state == m_new_stream_socket)
next_state = m_connected_stream_socket;
else if (old_state == m_new_datagram_socket)
{
sm_state_map *smap;
const fd_state_machine *fd_sm;
- if (!get_fd_state (ctxt, &smap, &fd_sm, NULL, NULL))
+ if (!get_fd_state (ctxt, &smap, &fd_sm, nullptr, nullptr))
return;
const extrinsic_state *ext_state = ctxt->get_ext_state ();
if (!ext_state)
sm_state_map *smap;
const fd_state_machine *fd_sm;
std::unique_ptr<sm_context> sm_ctxt;
- if (!get_fd_state (ctxt, &smap, &fd_sm, NULL, &sm_ctxt))
+ if (!get_fd_state (ctxt, &smap, &fd_sm, nullptr, &sm_ctxt))
{
cd.set_any_lhs_with_defaults ();
return true;
sm_state_map *smap;
const fd_state_machine *fd_sm;
std::unique_ptr<sm_context> sm_ctxt;
- if (!get_fd_state (ctxt, &smap, &fd_sm, NULL, &sm_ctxt))
+ if (!get_fd_state (ctxt, &smap, &fd_sm, nullptr, &sm_ctxt))
{
cd.set_any_lhs_with_defaults ();
return true;
sm_state_map *smap;
const fd_state_machine *fd_sm;
std::unique_ptr<sm_context> sm_ctxt;
- if (!get_fd_state (ctxt, &smap, &fd_sm, NULL, &sm_ctxt))
+ if (!get_fd_state (ctxt, &smap, &fd_sm, nullptr, &sm_ctxt))
{
cd.set_any_lhs_with_defaults ();
return true;
sm_state_map *smap;
const fd_state_machine *fd_sm;
std::unique_ptr<sm_context> sm_ctxt;
- if (!get_fd_state (ctxt, &smap, &fd_sm, NULL, &sm_ctxt))
+ if (!get_fd_state (ctxt, &smap, &fd_sm, nullptr, &sm_ctxt))
{
cd.set_any_lhs_with_defaults ();
return true;
sm_state_map *smap;
const fd_state_machine *fd_sm;
std::unique_ptr<sm_context> sm_ctxt;
- if (!get_fd_state (ctxt, &smap, &fd_sm, NULL, &sm_ctxt))
+ if (!get_fd_state (ctxt, &smap, &fd_sm, nullptr, &sm_ctxt))
{
cd.set_any_lhs_with_defaults ();
return true;
sm_state_map *smap;
const fd_state_machine *fd_sm;
std::unique_ptr<sm_context> sm_ctxt;
- if (!get_fd_state (ctxt, &smap, &fd_sm, NULL, &sm_ctxt))
+ if (!get_fd_state (ctxt, &smap, &fd_sm, nullptr, &sm_ctxt))
return true;
const extrinsic_state *ext_state = ctxt->get_ext_state ();
if (!ext_state)
assumed_non_null_state (const char *name, unsigned id,
const frame_region *frame)
: allocation_state (name, id, RS_ASSUMED_NON_NULL,
- NULL, NULL),
+ nullptr, nullptr),
m_frame (frame)
{
gcc_assert (m_frame);
static inline hashval_t hash (const key_type &k)
{
- gcc_assert (k != NULL);
+ gcc_assert (k != nullptr);
gcc_assert (k != reinterpret_cast<key_type> (1));
hashval_t result = 0;
template <typename T>
static inline void mark_empty (T &entry)
{
- entry.m_key = NULL;
+ entry.m_key = nullptr;
}
template <typename T>
static inline bool is_deleted (const T &entry)
template <typename T>
static inline bool is_empty (const T &entry)
{
- return entry.m_key == NULL;
+ return entry.m_key == nullptr;
}
static const bool empty_zero_p = false;
};
enum wording wording)
: m_name (name),
m_wording (wording),
- m_freed (sm->add_state ("freed", RS_FREED, NULL, this))
+ m_freed (sm->add_state ("freed", RS_FREED, nullptr, this))
{
}
deallocator_set::deallocator_set (malloc_state_machine *sm,
enum wording wording)
: m_wording (wording),
- m_unchecked (sm->add_state ("unchecked", RS_UNCHECKED, this, NULL)),
- m_nonnull (sm->add_state ("nonnull", RS_NONNULL, this, NULL))
+ m_unchecked (sm->add_state ("unchecked", RS_UNCHECKED, this, nullptr)),
+ m_nonnull (sm->add_state ("nonnull", RS_NONNULL, this, nullptr))
{
}
{
if (m_deallocator_vec.length () == 1)
return m_deallocator_vec[0];
- return NULL;
+ return nullptr;
}
void
pp_character (pp, '}');
}
-/* Return STATE cast to the custom state subclass, or NULL for the start state.
+/* Return STATE cast to the custom state subclass, or nullptr for the
+ start state.
Everything should be an allocation_state apart from the start state. */
static const allocation_state *
dyn_cast_allocation_state (state_machine::state_t state)
{
if (state->get_id () == 0)
- return NULL;
+ return nullptr;
return static_cast <const allocation_state *> (state);
}
public:
deref_before_check (const malloc_state_machine &sm, tree arg)
: malloc_diagnostic (sm, arg),
- m_deref_enode (NULL),
- m_deref_expr (NULL),
- m_check_enode (NULL)
+ m_deref_enode (nullptr),
+ m_deref_expr (nullptr),
+ m_check_enode (nullptr)
{
gcc_assert (arg);
}
m_realloc (this, "realloc", WORDING_REALLOCATED)
{
gcc_assert (m_start->get_id () == 0);
- m_null = add_state ("null", RS_FREED, NULL, NULL);
- m_non_heap = add_state ("non-heap", RS_NON_HEAP, NULL, NULL);
- m_stop = add_state ("stop", RS_STOP, NULL, NULL);
+ m_null = add_state ("null", RS_FREED, nullptr, nullptr);
+ m_non_heap = add_state ("non-heap", RS_NON_HEAP, nullptr, nullptr);
+ m_stop = add_state ("stop", RS_STOP, nullptr, nullptr);
}
malloc_state_machine::~malloc_state_machine ()
return a custom_deallocator_set for them, consolidating them
to ensure uniqueness of the sets.
- Return NULL if it has no such attributes. */
+ Return nullptr if it has no such attributes. */
const custom_deallocator_set *
malloc_state_machine::
/* Early rejection of decls without attributes. */
tree attrs = DECL_ATTRIBUTES (allocator_fndecl);
if (!attrs)
- return NULL;
+ return nullptr;
/* Otherwise, call maybe_create_custom_deallocator_set,
memoizing the result. */
custom_deallocator_set for them, consolidating them
to ensure uniqueness of the sets.
- Return NULL if it has no such attributes.
+ Return nullptr if it has no such attributes.
Subroutine of get_or_create_custom_deallocator_set which
memoizes the result. */
/* If there weren't any deallocators, bail. */
if (deallocator_vec.length () == 0)
- return NULL;
+ return nullptr;
/* Consolidate, so that we reuse existing deallocator_set
instances. */
tree null_ptr_cst = build_int_cst (TREE_TYPE (ptr), 0);
tristate known_non_null
- = old_model->eval_condition (ptr, NE_EXPR, null_ptr_cst, NULL);
+ = old_model->eval_condition (ptr, NE_EXPR, null_ptr_cst, nullptr);
if (known_non_null.is_unknown ())
{
/* Cast away const-ness for cache-like operations. */
const deallocator *d) const
{
tree diag_arg = sm_ctxt.get_diagnostic_tree (arg);
- const region *freed_reg = NULL;
+ const region *freed_reg = nullptr;
if (const program_state *old_state = sm_ctxt.get_old_program_state ())
{
const region_model *old_model = old_state->m_region_model;
- const svalue *ptr_sval = old_model->get_rvalue (arg, NULL);
- freed_reg = old_model->deref_rvalue (ptr_sval, arg, NULL);
+ const svalue *ptr_sval = old_model->get_rvalue (arg, nullptr);
+ freed_reg = old_model->deref_rvalue (ptr_sval, arg, nullptr);
}
sm_ctxt.warn (node, &call, arg,
std::make_unique<free_of_non_heap>
return m_start;
if (state_a == m_start && assumed_non_null_p (state_b))
return m_start;
- return NULL;
+ return nullptr;
}
/* Return true if calls to FNDECL are known to not affect this sm-state. */
{
smap->set_state (model, old_ptr_sval,
m_free.m_deallocator.m_freed,
- NULL, ext_state);
+ nullptr, ext_state);
smap->set_state (model, new_ptr_sval,
m_free.m_nonnull,
- NULL, ext_state);
+ nullptr, ext_state);
}
/* Hook for get_or_create_region_for_heap_alloc for the case when we want
const svalue *new_ptr_sval,
const extrinsic_state &ext_state) const
{
- smap->set_state (model, new_ptr_sval, m_free.m_nonnull, NULL, ext_state);
+ smap->set_state (model, new_ptr_sval, m_free.m_nonnull, nullptr, ext_state);
}
void
enum tree_code op,
const svalue *rhs) const
{
- if (stmt == NULL)
+ if (stmt == nullptr)
return;
tree rhs_cst = rhs->maybe_get_constant ();
if (id_equal ("exit", DECL_NAME (m_unsafe_fndecl)))
return "_exit";
- return NULL;
+ return nullptr;
}
};
state_entering_handler,
src_enode);
if (dst_enode)
- eg->add_edge (src_enode, dst_enode, NULL, /*state_change (),*/
+ eg->add_edge (src_enode, dst_enode, nullptr, /*state_change (),*/
true, /* assume does work */
std::make_unique<signal_delivery_edge_info_t> ());
}
macro when we're describing them. */
return linemap_resolve_location (line_table, loc,
LRK_SPELLING_LOCATION,
- NULL);
+ nullptr);
else
return pending_diagnostic::fixup_location (loc, primary);
}
case BIT_AND_EXPR:
case RSHIFT_EXPR:
- return NULL;
+ return nullptr;
}
}
break;
}
- return NULL;
+ return nullptr;
}
/* Return true iff FNDECL should be considered to be an assertion failure
tree expr) const
{
const region_model *old_model = sm_ctxt.get_old_region_model ();
- const svalue *sval = old_model->get_rvalue (expr, NULL);
+ const svalue *sval = old_model->get_rvalue (expr, nullptr);
state_t state = sm_ctxt.get_state (stmt, sval);
enum bounds b;
if (get_taint (state, TREE_TYPE (expr), &b))
enum tree_code op,
const svalue *rhs) const
{
- if (stmt == NULL)
+ if (stmt == nullptr)
return;
if (lhs->get_kind () == SK_UNKNOWN
if (!INTEGRAL_TYPE_P (TREE_TYPE (divisor_expr)))
return;
- const svalue *divisor_sval = old_model->get_rvalue (divisor_expr, NULL);
+ const svalue *divisor_sval = old_model->get_rvalue (divisor_expr, nullptr);
state_t state = sm_ctxt.get_state (assign, divisor_sval);
enum bounds b;
if (!ext_state)
return;
- smap->set_state (this, sval, taint_sm.m_tainted, NULL, *ext_state);
+ smap->set_state (this, sval, taint_sm.m_tainted, nullptr, *ext_state);
}
/* Return true if SVAL could possibly be attacker-controlled. */
if (const program_state *old_state = get_old_program_state ())
return old_state->m_region_model;
else
- return NULL;
+ return nullptr;
}
/* Create instances of the various state machines, each using LOGGER,
const svalue *,
const extrinsic_state &) const
{
- return NULL;
+ return nullptr;
}
virtual bool
}
/* Attempt to get a state for the merger of STATE_A and STATE_B,
- or return NULL if merging shouldn't occur, so that differences
+ or return nullptr if merging shouldn't occur, so that differences
between sm-state will lead to separate exploded nodes.
Most state machines will only merge equal states, but can
state_t state_b ATTRIBUTE_UNUSED) const
{
/* By default, non-equal sm states should inhibit merger of enodes. */
- return NULL;
+ return nullptr;
}
void validate (state_t s) const;
virtual path_context *get_path_context () const
{
- return NULL;
+ return nullptr;
}
/* Are we handling an external function with unknown side effects? */
if (snode->entry_p ())
{
add_to_worklist
- (function_point::before_supernode (snode, NULL),
+ (function_point::before_supernode (snode, nullptr),
worklist, logger);
}
}
We can't just check for equality; consider the case of
"s.field = EXPR;" where the stmt writes to the only field
of "s", and there's no padding. */
- const region *lhs_reg = model.get_lvalue (lhs, NULL);
- const region *decl_reg = model.get_lvalue (decl, NULL);
+ const region *lhs_reg = model.get_lvalue (lhs, nullptr);
+ const region *decl_reg = model.get_lvalue (decl, nullptr);
if (same_binding_p (lhs_reg, decl_reg,
model.get_manager ()->get_store_manager ()))
return true;
const supernode &n,
bool within_table) const
{
- if (m_map == NULL)
+ if (m_map == nullptr)
return false;
if (within_table)
Determine which points to dump. */
auto_vec<function_point> points;
if (n.entry_p () || n.m_returning_call)
- points.safe_push (function_point::before_supernode (&n, NULL));
+ points.safe_push (function_point::before_supernode (&n, nullptr));
else
for (auto inedge : n.m_preds)
points.safe_push (function_point::before_supernode (&n, inedge));
if (within_row)
return;
- if (m_map == NULL)
+ if (m_map == nullptr)
return;
if (stmt->code == GIMPLE_PHI)
= const_cast <decl_map_t&> (m_decl_map).get (decl))
return *slot;
else
- return NULL;
+ return nullptr;
}
state_purge_per_decl &
const svalue *sval = (*iter).second;
const svalue **other_slot
= const_cast <map_t &> (other.m_map).get (key);
- if (other_slot == NULL)
+ if (other_slot == nullptr)
return false;
if (sval != *other_slot)
return false;
{
/* Reuse the get_rvalue logic from region_model. */
region_model m (mgr);
- return m.get_rvalue (path_var (val, 0), NULL);
+ return m.get_rvalue (path_var (val, 0), nullptr);
}
/* Bind values from CONSTRUCTOR to this map, relative to
void
binding_cluster::clobber_region (store_manager *mgr, const region *reg)
{
- remove_overlapping_bindings (mgr, reg, NULL, NULL);
+ remove_overlapping_bindings (mgr, reg, nullptr, nullptr);
}
/* Remove any bindings for REG within this cluster. */
const region *reg) const
{
if (reg->empty_p ())
- return NULL;
+ return nullptr;
const binding_key *reg_binding = binding_key::make (mgr, reg);
const svalue *sval = m_map.get (reg_binding);
if (sval)
return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
parent_sval, reg);
}
- return NULL;
+ return nullptr;
}
/* Get any value bound for REG within this cluster. */
return compound_sval;
/* Otherwise, the initial value, or uninitialized. */
- return NULL;
+ return nullptr;
}
/* Attempt to get a compound_svalue for the bindings within the cluster
For example, REG could be one element within an array of structs.
- Return the resulting compound_svalue, or NULL if there's a problem. */
+ Return the resulting compound_svalue, or nullptr if there's a problem. */
const svalue *
binding_cluster::maybe_get_compound_binding (store_manager *mgr,
region_offset cluster_offset
= m_base_region->get_offset (mgr->get_svalue_manager ());
if (cluster_offset.symbolic_p ())
- return NULL;
+ return nullptr;
region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
if (reg_offset.symbolic_p ())
- return NULL;
+ return nullptr;
if (reg->empty_p ())
- return NULL;
+ return nullptr;
region_model_manager *sval_mgr = mgr->get_svalue_manager ();
bit_size_t reg_bit_size;
if (!reg->get_bit_size (®_bit_size))
- return NULL;
+ return nullptr;
bit_range reg_range (reg_offset.get_bit_offset (),
reg_bit_size);
it overlaps with offset_concrete_key. */
default_map.remove_overlapping_bindings (mgr,
offset_concrete_key,
- NULL, NULL, false);
+ nullptr, nullptr, false);
}
else if (bound_range.contains_p (reg_range, &subrange))
{
it overlaps with overlap_concrete_key. */
default_map.remove_overlapping_bindings (mgr,
overlap_concrete_key,
- NULL, NULL, false);
+ nullptr, nullptr, false);
}
}
else
/* Can't handle symbolic bindings. */
- return NULL;
+ return nullptr;
}
if (result_map.elements () == 0)
- return NULL;
+ return nullptr;
/* Merge any bindings from default_map into result_map. */
for (auto iter : default_map)
/* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
could be NULL. Handle these cases. */
- if (cluster_a == NULL)
+ if (cluster_a == nullptr)
{
- gcc_assert (cluster_b != NULL);
+ gcc_assert (cluster_b != nullptr);
gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
return true;
}
- if (cluster_b == NULL)
+ if (cluster_b == nullptr)
{
- gcc_assert (cluster_a != NULL);
+ gcc_assert (cluster_a != nullptr);
gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
return true;
}
/* The "both inputs are non-NULL" case. */
- gcc_assert (cluster_a != NULL && cluster_b != NULL);
+ gcc_assert (cluster_a != nullptr && cluster_b != nullptr);
gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
}
}
-/* Get any svalue bound to KEY, or NULL. */
+/* Get any svalue bound to KEY, or nullptr. */
const svalue *
binding_cluster::get_any_value (const binding_key *key) const
const svalue *
binding_cluster::maybe_get_simple_value (store_manager *mgr) const
{
- /* Fail gracefully if MGR is NULL to make it easier to dump store
+ /* Fail gracefully if MGR is nullptr to make it easier to dump store
instances in the debugger. */
- if (mgr == NULL)
- return NULL;
+ if (mgr == nullptr)
+ return nullptr;
if (m_map.elements () != 1)
- return NULL;
+ return nullptr;
if (m_base_region->empty_p ())
- return NULL;
+ return nullptr;
const binding_key *key = binding_key::make (mgr, m_base_region);
return get_any_value (key);
binding_cluster *c = (*iter).second;
binding_cluster **other_slot
= const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
- if (other_slot == NULL)
+ if (other_slot == nullptr)
return false;
if (*c != **other_slot)
return false;
/* Dump a representation of this store to PP, using SIMPLE to control how
svalues and regions are printed.
- MGR is used for simplifying dumps if non-NULL, but can also be NULL
+ MGR is used for simplifying dumps if non-NULL, but can also be nullptr
(to make it easier to use from the debugger). */
void
store::dump (bool simple) const
{
tree_dump_pretty_printer pp (stderr);
- dump_to_pp (&pp, simple, true, NULL);
+ dump_to_pp (&pp, simple, true, nullptr);
pp_newline (&pp);
}
return store_widget;
}
-/* Get any svalue bound to REG, or NULL. */
+/* Get any svalue bound to REG, or nullptr. */
const svalue *
store::get_any_binding (store_manager *mgr, const region *reg) const
binding_cluster **cluster_slot
= const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
if (!cluster_slot)
- return NULL;
+ return nullptr;
return (*cluster_slot)->get_any_binding (mgr, reg);
}
{
/* Reject attempting to bind values into a symbolic region
for an unknown ptr; merely invalidate values below. */
- lhs_cluster = NULL;
+ lhs_cluster = nullptr;
/* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
then treat the region being pointed to as having escaped. */
{
/* Reject attempting to bind values into an untracked region;
merely invalidate values below. */
- lhs_cluster = NULL;
+ lhs_cluster = nullptr;
}
/* Bindings to a cluster can affect other clusters if a symbolic
const region *iter_base_reg = (*iter).first;
binding_cluster *iter_cluster = (*iter).second;
if (iter_base_reg != lhs_base_reg
- && (lhs_cluster == NULL
+ && (lhs_cluster == nullptr
|| lhs_cluster->symbolic_p ()
|| iter_cluster->symbolic_p ()))
{
purge_cluster (iter);
}
-/* Get the cluster for BASE_REG, or NULL (const version). */
+/* Get the cluster for BASE_REG, or nullptr (const version). */
const binding_cluster *
store::get_cluster (const region *base_reg) const
= const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
return *slot;
else
- return NULL;
+ return nullptr;
}
-/* Get the cluster for BASE_REG, or NULL (non-const version). */
+/* Get the cluster for BASE_REG, or nullptr (non-const version). */
binding_cluster *
store::get_cluster (const region *base_reg)
if (binding_cluster **slot = m_cluster_map.get (base_reg))
return *slot;
else
- return NULL;
+ return nullptr;
}
/* Get the cluster for BASE_REG, creating it if doesn't already exist. */
delete cluster;
return;
}
- /* Pass NULL for the maybe_live_values here, as we don't want to
+ /* Pass nullptr for the maybe_live_values here, as we don't want to
record the old svalues as being maybe-bound. */
- cluster->remove_overlapping_bindings (mgr, reg, uncertainty, NULL);
+ cluster->remove_overlapping_bindings (mgr, reg, uncertainty, nullptr);
}
}
caller_sval =
reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
set_value (mgr, caller_dest_reg,
- caller_sval, NULL /* uncertainty_t * */);
+ caller_sval, nullptr /* uncertainty_t * */);
}
break;
caller_sval =
reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
set_value (mgr, caller_dest_reg,
- caller_sval, NULL /* uncertainty_t * */);
+ caller_sval, nullptr /* uncertainty_t * */);
}
break;
static void
test_binding_key_overlap ()
{
- store_manager mgr (NULL);
+ store_manager mgr (nullptr);
/* Various 8-bit bindings. */
const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
static int cmp (const binding_key *, const binding_key *);
virtual const concrete_binding *dyn_cast_concrete_binding () const
- { return NULL; }
+ { return nullptr; }
virtual const symbolic_binding *dyn_cast_symbolic_binding () const
- { return NULL; }
+ { return nullptr; }
};
/* A concrete range of bits. */
static int cmp_ptr_ptr (const void *, const void *);
void mark_deleted () { m_region = reinterpret_cast<const region *> (1); }
- void mark_empty () { m_region = NULL; }
+ void mark_empty () { m_region = nullptr; }
bool is_deleted () const
{ return m_region == reinterpret_cast<const region *> (1); }
- bool is_empty () const { return m_region == NULL; }
+ bool is_empty () const { return m_region == nullptr; }
private:
const region *m_region;
if (slot)
return *slot;
else
- return NULL;
+ return nullptr;
}
bool put (const binding_key *k, const svalue *v)
{
{
cgraph_node *ultimate_node = edge->callee->ultimate_alias_target ();
if (!ultimate_node)
- return NULL;
+ return nullptr;
return ultimate_node->get_fun ();
}
{
const gcall *call = dyn_cast<const gcall *> (stmt);
if (!call)
- return NULL;
+ return nullptr;
cgraph_edge *edge
= cgraph_node::get (fun->decl)->get_edge (const_cast <gimple *> (stmt));
if (!edge)
- return NULL;
+ return nullptr;
if (!edge->callee)
- return NULL; /* e.g. for a function pointer. */
+ return nullptr; /* e.g. for a function pointer. */
if (!get_ultimate_function_for_cgraph_edge (edge))
- return NULL;
+ return nullptr;
return edge;
}
FOR_ALL_BB_FN (bb, fun)
{
/* The initial supernode for the BB gets the phi nodes (if any). */
- supernode *node_for_stmts = add_node (fun, bb, NULL, phi_nodes (bb));
+ supernode *node_for_stmts
+ = add_node (fun, bb, nullptr, phi_nodes (bb));
m_bb_to_initial_node.put (bb, node_for_stmts);
for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi);
gsi_next (&gpi))
m_stmt_to_node_t.put (stmt, node_for_stmts);
m_stmt_uids.make_uid_unique (stmt);
if (cgraph_edge *edge = supergraph_call_edge (fun, stmt))
- {
- m_cgraph_edge_to_caller_prev_node.put(edge, node_for_stmts);
- node_for_stmts = add_node (fun, bb, as_a <gcall *> (stmt),
- NULL);
- m_cgraph_edge_to_caller_next_node.put (edge, node_for_stmts);
- }
+ {
+ m_cgraph_edge_to_caller_prev_node.put(edge, node_for_stmts);
+ node_for_stmts = add_node (fun, bb, as_a <gcall *> (stmt),
+ nullptr);
+ m_cgraph_edge_to_caller_next_node.put (edge, node_for_stmts);
+ }
else
{
// maybe call is via a function pointer
if (!edge || !edge->callee)
{
supernode *old_node_for_stmts = node_for_stmts;
- node_for_stmts = add_node (fun, bb, call, NULL);
+ node_for_stmts = add_node (fun, bb, call, nullptr);
superedge *sedge
= new callgraph_superedge (old_node_for_stmts,
node_for_stmts,
SUPEREDGE_INTRAPROCEDURAL_CALL,
- NULL);
+ nullptr);
add_edge (sedge);
}
}
}
/* If this is an intraprocedural superedge, return the associated
- CFG edge. Otherwise, return NULL. */
+ CFG edge. Otherwise, return nullptr. */
::edge
superedge::get_any_cfg_edge () const
{
if (const cfg_superedge *sub = dyn_cast_cfg_superedge ())
return sub->get_cfg_edge ();
- return NULL;
+ return nullptr;
}
/* If this is an interprocedural superedge, return the associated
- cgraph_edge *. Otherwise, return NULL. */
+ cgraph_edge *. Otherwise, return nullptr. */
cgraph_edge *
superedge::get_any_callgraph_edge () const
{
if (const callgraph_superedge *sub = dyn_cast_callgraph_superedge ())
return sub->m_cedge;
- return NULL;
+ return nullptr;
}
/* Build a description of this superedge (e.g. "true" for the true
i.ptr = gimple_seq_first (*pseq);
i.seq = pseq;
- i.bb = i.ptr ? gimple_bb (i.ptr) : NULL;
+ i.bb = i.ptr ? gimple_bb (i.ptr) : nullptr;
return i;
}
gimple *get_last_stmt () const
{
if (m_stmts.length () == 0)
- return NULL;
+ return nullptr;
return m_stmts[m_stmts.length () - 1];
}
gcall *get_final_call () const
{
gimple *stmt = get_last_stmt ();
- if (stmt == NULL)
- return NULL;
+ if (stmt == nullptr)
+ return nullptr;
return dyn_cast<gcall *> (stmt);
}
enum edge_kind get_kind () const { return m_kind; }
- virtual cfg_superedge *dyn_cast_cfg_superedge () { return NULL; }
- virtual const cfg_superedge *dyn_cast_cfg_superedge () const { return NULL; }
- virtual const switch_cfg_superedge *dyn_cast_switch_cfg_superedge () const { return NULL; }
+ virtual cfg_superedge *dyn_cast_cfg_superedge () { return nullptr; }
+ virtual const cfg_superedge *dyn_cast_cfg_superedge () const { return nullptr; }
+ virtual const switch_cfg_superedge *dyn_cast_switch_cfg_superedge () const { return nullptr; }
virtual const eh_dispatch_cfg_superedge *dyn_cast_eh_dispatch_cfg_superedge () const { return nullptr; }
virtual const eh_dispatch_try_cfg_superedge *dyn_cast_eh_dispatch_try_cfg_superedge () const { return nullptr; }
virtual const eh_dispatch_allowed_cfg_superedge *dyn_cast_eh_dispatch_allowed_cfg_superedge () const { return nullptr; }
- virtual callgraph_superedge *dyn_cast_callgraph_superedge () { return NULL; }
- virtual const callgraph_superedge *dyn_cast_callgraph_superedge () const { return NULL; }
- virtual call_superedge *dyn_cast_call_superedge () { return NULL; }
- virtual const call_superedge *dyn_cast_call_superedge () const { return NULL; }
- virtual return_superedge *dyn_cast_return_superedge () { return NULL; }
- virtual const return_superedge *dyn_cast_return_superedge () const { return NULL; }
+ virtual callgraph_superedge *dyn_cast_callgraph_superedge () { return nullptr; }
+ virtual const callgraph_superedge *dyn_cast_callgraph_superedge () const { return nullptr; }
+ virtual call_superedge *dyn_cast_call_superedge () { return nullptr; }
+ virtual const call_superedge *dyn_cast_call_superedge () const { return nullptr; }
+ virtual return_superedge *dyn_cast_return_superedge () { return nullptr; }
+ virtual const return_superedge *dyn_cast_return_superedge () const { return nullptr; }
::edge get_any_cfg_edge () const;
cgraph_edge *get_any_callgraph_edge () const;
inline bool
is_a_helper <const switch_cfg_superedge *>::test (const superedge *sedge)
{
- return sedge->dyn_cast_switch_cfg_superedge () != NULL;
+ return sedge->dyn_cast_switch_cfg_superedge () != nullptr;
}
namespace ana {
inline bool
is_a_helper <const eh_dispatch_cfg_superedge *>::test (const superedge *sedge)
{
- return sedge->dyn_cast_eh_dispatch_cfg_superedge () != NULL;
+ return sedge->dyn_cast_eh_dispatch_cfg_superedge () != nullptr;
}
namespace ana {
inline bool
is_a_helper <const eh_dispatch_try_cfg_superedge *>::test (const superedge *sedge)
{
- return sedge->dyn_cast_eh_dispatch_try_cfg_superedge () != NULL;
+ return sedge->dyn_cast_eh_dispatch_try_cfg_superedge () != nullptr;
}
namespace ana {
inline bool
is_a_helper <const eh_dispatch_allowed_cfg_superedge *>::test (const superedge *sedge)
{
- return sedge->dyn_cast_eh_dispatch_allowed_cfg_superedge () != NULL;
+ return sedge->dyn_cast_eh_dispatch_allowed_cfg_superedge () != nullptr;
}
namespace ana {
}
/* If this svalue is a region_svalue, return the region it points to.
- Otherwise return NULL. */
+ Otherwise return nullptr. */
const region *
svalue::maybe_get_region () const
if (const region_svalue *region_sval = dyn_cast_region_svalue ())
return region_sval->get_pointee ();
else
- return NULL;
+ return nullptr;
}
/* If this svalue is a cast (i.e a unaryop NOP_EXPR or VIEW_CONVERT_EXPR),
return the underlying svalue.
- Otherwise return NULL. */
+ Otherwise return nullptr. */
const svalue *
svalue::maybe_undo_cast () const
if (op == NOP_EXPR || op == VIEW_CONVERT_EXPR)
return unaryop_sval->get_arg ();
}
- return NULL;
+ return nullptr;
}
/* If this svalue is an unmergeable decorator around another svalue, return
}
/* Attempt to merge THIS with OTHER, returning the merged svalue.
- Return NULL if not mergeable. */
+ Return nullptr if not mergeable. */
const svalue *
svalue::can_merge_p (const svalue *other,
model_merger *merger) const
{
if (!(get_type () && other->get_type ()))
- return NULL;
+ return nullptr;
if (!types_compatible_p (get_type (), other->get_type ()))
- return NULL;
+ return nullptr;
/* Reject attempts to merge unmergeable svalues. */
if ((get_kind () == SK_UNMERGEABLE)
|| (other->get_kind () == SK_UNMERGEABLE))
- return NULL;
+ return nullptr;
/* Reject attempts to merge poisoned svalues with other svalues
(either non-poisoned, or other kinds of poison), so that e.g.
we identify paths in which a variable is conditionally uninitialized. */
if (get_kind () == SK_POISONED
|| other->get_kind () == SK_POISONED)
- return NULL;
+ return nullptr;
/* Reject attempts to merge NULL pointers with not-NULL-pointers. */
if (POINTER_TYPE_P (get_type ()))
if (zerop (cst1))
null1 = true;
if (null0 != null1)
- return NULL;
+ return nullptr;
}
/* Reject merging svalues that have non-purgable sm-state,
to avoid falsely reporting memory leaks by merging them
with something else. */
if (!merger->mergeable_svalue_p (this))
- return NULL;
+ return nullptr;
if (!merger->mergeable_svalue_p (other))
- return NULL;
+ return nullptr;
/* Widening. */
/* Merge: (new_cst, existing_cst) -> widen (existing, new). */
/* Determine if this svalue is either within LIVE_SVALUES, or is implicitly
live with respect to LIVE_SVALUES and MODEL.
- LIVE_SVALUES can be NULL, in which case determine if this svalue is
+ LIVE_SVALUES can be nullptr, in which case determine if this svalue is
intrinsically live. */
bool
region_model_manager *) const
{
/* By default, don't fold. */
- return NULL;
+ return nullptr;
}
/* Base implementation of svalue::all_zeroes_p.
}
/* If this svalue is a pointer, attempt to determine the base region it points
- to. Return NULL on any problems. */
+ to. Return nullptr on any problems. */
const region *
svalue::maybe_get_deref_base_region () const
switch (iter->get_kind ())
{
default:
- return NULL;
+ return nullptr;
case SK_REGION:
{
continue;
default:
- return NULL;
+ return nullptr;
}
- return NULL;
+ return nullptr;
}
}
}
}
/* Otherwise, don't fold. */
- return NULL;
+ return nullptr;
}
/* Implementation of svalue::all_zeroes_p for constant_svalue. */
a popped stack frame. */
if (model->region_exists_p (m_reg))
{
- const svalue *reg_sval = model->get_store_value (m_reg, NULL);
+ const svalue *reg_sval = model->get_store_value (m_reg, nullptr);
if (reg_sval == this)
return true;
}
live in the external caller. */
if (initial_value_of_param_p ())
if (const frame_region *frame_reg = m_reg->maybe_get_frame_region ())
- if (frame_reg->get_calling_frame () == NULL)
+ if (frame_reg->get_calling_frame () == nullptr)
return true;
return false;
break;
}
/* Otherwise, don't fold. */
- return NULL;
+ return nullptr;
}
/* class binop_svalue : public svalue. */
}
}
- return NULL;
+ return nullptr;
}
/* class bits_within_svalue : public svalue. */
}
else
/* If we have any symbolic keys we can't get it as bits. */
- return NULL;
+ return nullptr;
}
return mgr->get_or_create_compound_svalue (type, result_map);
}
const char *prefix = nullptr) const;
virtual const region_svalue *
- dyn_cast_region_svalue () const { return NULL; }
+ dyn_cast_region_svalue () const { return nullptr; }
virtual const constant_svalue *
- dyn_cast_constant_svalue () const { return NULL; }
+ dyn_cast_constant_svalue () const { return nullptr; }
virtual const poisoned_svalue *
- dyn_cast_poisoned_svalue () const { return NULL; }
+ dyn_cast_poisoned_svalue () const { return nullptr; }
virtual const setjmp_svalue *
- dyn_cast_setjmp_svalue () const { return NULL; }
+ dyn_cast_setjmp_svalue () const { return nullptr; }
virtual const initial_svalue *
- dyn_cast_initial_svalue () const { return NULL; }
+ dyn_cast_initial_svalue () const { return nullptr; }
virtual const unaryop_svalue *
- dyn_cast_unaryop_svalue () const { return NULL; }
+ dyn_cast_unaryop_svalue () const { return nullptr; }
virtual const binop_svalue *
- dyn_cast_binop_svalue () const { return NULL; }
+ dyn_cast_binop_svalue () const { return nullptr; }
virtual const sub_svalue *
- dyn_cast_sub_svalue () const { return NULL; }
+ dyn_cast_sub_svalue () const { return nullptr; }
virtual const repeated_svalue *
- dyn_cast_repeated_svalue () const { return NULL; }
+ dyn_cast_repeated_svalue () const { return nullptr; }
virtual const bits_within_svalue *
- dyn_cast_bits_within_svalue () const { return NULL; }
+ dyn_cast_bits_within_svalue () const { return nullptr; }
virtual const unmergeable_svalue *
- dyn_cast_unmergeable_svalue () const { return NULL; }
+ dyn_cast_unmergeable_svalue () const { return nullptr; }
virtual const widening_svalue *
- dyn_cast_widening_svalue () const { return NULL; }
+ dyn_cast_widening_svalue () const { return nullptr; }
virtual const compound_svalue *
- dyn_cast_compound_svalue () const { return NULL; }
+ dyn_cast_compound_svalue () const { return nullptr; }
virtual const conjured_svalue *
- dyn_cast_conjured_svalue () const { return NULL; }
+ dyn_cast_conjured_svalue () const { return nullptr; }
virtual const asm_output_svalue *
- dyn_cast_asm_output_svalue () const { return NULL; }
+ dyn_cast_asm_output_svalue () const { return nullptr; }
virtual const const_fn_result_svalue *
- dyn_cast_const_fn_result_svalue () const { return NULL; }
+ dyn_cast_const_fn_result_svalue () const { return nullptr; }
tree maybe_get_constant () const;
const region *maybe_get_region () const;
: svalue (complexity (reg), id, type),
m_reg (reg)
{
- gcc_assert (m_reg != NULL);
+ gcc_assert (m_reg != nullptr);
}
enum svalue_kind get_kind () const final override { return SK_REGION; }
initial_svalue (symbol::id_t id, tree type, const region *reg)
: svalue (complexity (reg), id, type), m_reg (reg)
{
- gcc_assert (m_reg != NULL);
+ gcc_assert (m_reg != nullptr);
}
enum svalue_kind get_kind () const final override { return SK_INITIAL; }
&& m_idx == other.m_idx);
}
- /* Use m_stmt to mark empty/deleted, as m_type can be NULL for
+ /* Use m_stmt to mark empty/deleted, as m_type can be NULL_TREE for
legitimate instances. */
void mark_deleted () { m_stmt = reinterpret_cast<const gimple *> (1); }
- void mark_empty () { m_stmt = NULL; }
+ void mark_empty () { m_stmt = nullptr; }
bool is_deleted () const
{
return m_stmt == reinterpret_cast<const gimple *> (1);
}
- bool is_empty () const { return m_stmt == NULL; }
+ bool is_empty () const { return m_stmt == nullptr; }
tree m_type;
const gimple *m_stmt;
: svalue (complexity (id_reg), id, type),
m_stmt (stmt), m_id_reg (id_reg), m_idx (idx)
{
- gcc_assert (m_stmt != NULL);
+ gcc_assert (m_stmt != nullptr);
}
enum svalue_kind get_kind () const final override { return SK_CONJURED; }
return true;
}
- /* Use m_asm_string to mark empty/deleted, as m_type can be NULL for
+ /* Use m_asm_string to mark empty/deleted, as m_type can be NULL_TREE for
legitimate instances. */
void mark_deleted () { m_asm_string = reinterpret_cast<const char *> (1); }
- void mark_empty () { m_asm_string = NULL; }
+ void mark_empty () { m_asm_string = nullptr; }
bool is_deleted () const
{
return m_asm_string == reinterpret_cast<const char *> (1);
}
- bool is_empty () const { return m_asm_string == NULL; }
+ bool is_empty () const { return m_asm_string == nullptr; }
tree m_type;
const char *m_asm_string;
/* Use m_fndecl to mark empty/deleted. */
void mark_deleted () { m_fndecl = reinterpret_cast<tree> (1); }
- void mark_empty () { m_fndecl = NULL; }
+ void mark_empty () { m_fndecl = NULL_TREE; }
bool is_deleted () const
{
return m_fndecl == reinterpret_cast<tree> (1);
}
- bool is_empty () const { return m_fndecl == NULL; }
+ bool is_empty () const { return m_fndecl == NULL_TREE; }
tree m_type;
tree m_fndecl;
if (const program_state *new_state = sm_ctxt.get_new_program_state ())
{
const region_model *new_model = new_state->m_region_model;
- const svalue *ptr_sval = new_model->get_rvalue (ap, NULL);
- const region *reg = new_model->deref_rvalue (ptr_sval, ap, NULL);
- const svalue *impl_sval = new_model->get_store_value (reg, NULL);
+ const svalue *ptr_sval = new_model->get_rvalue (ap, nullptr);
+ const region *reg = new_model->deref_rvalue (ptr_sval, ap, nullptr);
+ const svalue *impl_sval = new_model->get_store_value (reg, nullptr);
if (const svalue *cast = impl_sval->maybe_undo_cast ())
impl_sval = cast;
return impl_sval;
}
}
- return NULL;
+ return nullptr;
}
/* Abstract class for diagnostics relating to va_list_state_machine. */
return "va_end";
}
}
- return NULL;
+ return nullptr;
}
const va_list_state_machine &m_sm;
const svalue *ap_sval, tree ap_tree,
const program_state *final_state)
: va_list_sm_diagnostic (sm, ap_sval, ap_tree),
- m_start_event_fnname (NULL),
+ m_start_event_fnname (nullptr),
m_final_state ()
{
if (final_state)
/* Get the svalue with associated va_list_state_machine state for
ARG_IDX of CALL to va_copy, if SM_CTXT supports this,
- or NULL otherwise. */
+ or nullptr otherwise. */
static const svalue *
get_stateful_va_copy_arg (sm_context &sm_ctxt,
if (const program_state *new_state = sm_ctxt.get_new_program_state ())
{
const region_model *new_model = new_state->m_region_model;
- const svalue *arg = get_va_copy_arg (new_model, NULL, call, arg_idx);
+ const svalue *arg = get_va_copy_arg (new_model, nullptr, call, arg_idx);
return arg;
}
- return NULL;
+ return nullptr;
}
/* Update state machine for a "va_copy" call. */
in_va_list
= model->check_for_poison (in_va_list,
get_va_list_diag_arg (cd.get_arg_tree (1)),
- NULL,
+ nullptr,
cd.get_ctxt ());
const region *out_dst_reg
}
/* If AP_SVAL is a pointer to a var_arg_region, return that var_arg_region.
- Otherwise return NULL. */
+ Otherwise return nullptr. */
static const var_arg_region *
maybe_get_var_arg_region (const svalue *ap_sval)
{
if (const region *reg = ap_sval->maybe_get_region ())
return reg->dyn_cast_var_arg_region ();
- return NULL;
+ return nullptr;
}
/* Handler for "__builtin_va_arg". */