break;
case BIT_AND_EXPR:
if (cst1)
- if (zerop (cst1) && INTEGRAL_TYPE_P (type))
- /* "(ARG0 & 0)" -> "0". */
- return get_or_create_constant_svalue (build_int_cst (type, 0));
+ {
+ if (zerop (cst1) && INTEGRAL_TYPE_P (type))
+ /* "(ARG0 & 0)" -> "0". */
+ return get_or_create_constant_svalue (build_int_cst (type, 0));
+
+ /* Support masking out bits from a compound_svalue, as this
+ is generated when accessing bitfields. */
+ if (const compound_svalue *compound_sval
+ = arg0->dyn_cast_compound_svalue ())
+ {
+ const binding_map &map = compound_sval->get_map ();
+ unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (cst1);
+ /* If "mask" is a contiguous range of set bits, see if the
+ compound_sval has a value for those bits. */
+ bit_range bits (0, 0);
+ if (bit_range::from_mask (mask, &bits))
+ {
+ const concrete_binding *conc
+ = get_store_manager ()->get_concrete_binding (bits,
+ BK_direct);
+ if (const svalue *sval = map.get (conc))
+ {
+ /* We have a value;
+ shift it by the correct number of bits. */
+ const svalue *lhs = get_or_create_cast (type, sval);
+ HOST_WIDE_INT bit_offset
+ = bits.get_start_bit_offset ().to_shwi ();
+ tree shift_amt = build_int_cst (type, bit_offset);
+ const svalue *shift_sval
+ = get_or_create_constant_svalue (shift_amt);
+ const svalue *shifted_sval
+ = get_or_create_binop (type,
+ LSHIFT_EXPR,
+ lhs, shift_sval);
+ /* Reapply the mask (needed for negative
+ signed bitfields). */
+ return get_or_create_binop (type,
+ BIT_AND_EXPR,
+ shifted_sval, arg1);
+ }
+ }
+ }
+ }
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_AND_EXPR:
break;
case BIT_FIELD_REF:
- return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (pv.m_tree));
+ {
+ tree expr = pv.m_tree;
+ tree op0 = TREE_OPERAND (expr, 0);
+ const region *reg = get_lvalue (op0, ctxt);
+ tree num_bits = TREE_OPERAND (expr, 1);
+ tree first_bit_offset = TREE_OPERAND (expr, 2);
+ gcc_assert (TREE_CODE (num_bits) == INTEGER_CST);
+ gcc_assert (TREE_CODE (first_bit_offset) == INTEGER_CST);
+ bit_range bits (TREE_INT_CST_LOW (first_bit_offset),
+ TREE_INT_CST_LOW (num_bits));
+ return get_rvalue_for_bits (TREE_TYPE (expr), reg, bits);
+ }
case SSA_NAME:
case VAR_DECL:
return m_mgr->get_symbolic_region (ptr_sval);
}
+/* Attempt to get BITS within any value of REG, as TYPE.
+ In particular, extract values from compound_svalues for the case
+ where there's a concrete binding at BITS.
+ Return an unknown svalue if we can't handle the given case. */
+
+const svalue *
+region_model::get_rvalue_for_bits (tree type,
+ const region *reg,
+ const bit_range &bits)
+{
+ const svalue *sval = get_store_value (reg);
+ if (const compound_svalue *compound_sval = sval->dyn_cast_compound_svalue ())
+ {
+ const binding_map &map = compound_sval->get_map ();
+ binding_map result_map;
+ for (auto iter : map)
+ {
+ const binding_key *key = iter.first;
+ if (const concrete_binding *conc_key
+ = key->dyn_cast_concrete_binding ())
+ {
+ /* Ignore concrete bindings outside BITS. */
+ if (!conc_key->get_bit_range ().intersects_p (bits))
+ continue;
+ if ((conc_key->get_start_bit_offset ()
+ < bits.get_start_bit_offset ())
+ || (conc_key->get_next_bit_offset ()
+ > bits.get_next_bit_offset ()))
+ {
+ /* If we have any concrete keys that aren't fully within BITS,
+ then bail out. */
+ return m_mgr->get_or_create_unknown_svalue (type);
+ }
+ const concrete_binding *offset_conc_key
+ = m_mgr->get_store_manager ()->get_concrete_binding
+ (conc_key->get_start_bit_offset ()
+ - bits.get_start_bit_offset (),
+ conc_key->get_size_in_bits (),
+ conc_key->get_kind ());
+ const svalue *sval = iter.second;
+ result_map.put (offset_conc_key, sval);
+ }
+ else
+ /* If we have any symbolic keys we can't get it as bits. */
+ return m_mgr->get_or_create_unknown_svalue (type);
+ }
+ return m_mgr->get_or_create_compound_svalue (type, result_map);
+ }
+
+ return m_mgr->get_or_create_unknown_svalue (type);
+}
+
/* A subclass of pending_diagnostic for complaining about writes to
constant regions of memory. */
const region *deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
region_model_context *ctxt);
+ const svalue *get_rvalue_for_bits (tree type,
+ const region *reg,
+ const bit_range &bits);
+
void set_value (const region *lhs_reg, const svalue *rhs_sval,
region_model_context *ctxt);
void set_value (tree lhs, tree rhs, region_model_context *ctxt);
return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
}
+/* If MASK is a contiguous range of set bits, write them
+ to *OUT and return true.
+ Otherwise return false. */
+
+bool
+bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
+{
+ unsigned iter_bit_idx = 0;
+ unsigned HOST_WIDE_INT iter_bit_mask = 1;
+
+ /* Find the first contiguous run of set bits in MASK. */
+
+ /* Find first set bit in MASK. */
+ while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+ {
+ if (mask & iter_bit_mask)
+ break;
+ iter_bit_idx++;
+ iter_bit_mask <<= 1;
+ }
+ if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
+ /* MASK is zero. */
+ return false;
+
+ unsigned first_set_iter_bit_idx = iter_bit_idx;
+ unsigned num_set_bits = 1;
+ iter_bit_idx++;
+ iter_bit_mask <<= 1;
+
+ /* Find next unset bit in MASK. */
+ while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+ {
+ if (!(mask & iter_bit_mask))
+ break;
+ num_set_bits++;
+ iter_bit_idx++;
+ iter_bit_mask <<= 1;
+ }
+ if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
+ {
+ *out = bit_range (first_set_iter_bit_idx, num_set_bits);
+ return true;
+ }
+
+ /* We now have the first contiguous run of set bits in MASK.
+ Fail if any other bits are set. */
+ while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+ {
+ if (mask & iter_bit_mask)
+ return false;
+ iter_bit_idx++;
+ iter_bit_mask <<= 1;
+ }
+
+ *out = bit_range (first_set_iter_bit_idx, num_set_bits);
+ return true;
+}
+
/* class concrete_binding : public binding_key. */
/* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
namespace selftest {
+/* Verify that bit_range::intersects_p works as expected. */
+
+static void
+test_bit_range_intersects_p ()
+{
+ bit_range b0 (0, 1);
+ bit_range b1 (1, 1);
+ bit_range b2 (2, 1);
+ bit_range b3 (3, 1);
+ bit_range b4 (4, 1);
+ bit_range b5 (5, 1);
+ bit_range b6 (6, 1);
+ bit_range b7 (7, 1);
+ bit_range b1_to_6 (1, 6);
+ bit_range b0_to_7 (0, 8);
+ bit_range b3_to_5 (3, 3);
+ bit_range b6_to_7 (6, 2);
+
+ /* self-intersection is true. */
+ ASSERT_TRUE (b0.intersects_p (b0));
+ ASSERT_TRUE (b7.intersects_p (b7));
+ ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
+ ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
+
+ ASSERT_FALSE (b0.intersects_p (b1));
+ ASSERT_FALSE (b1.intersects_p (b0));
+ ASSERT_FALSE (b0.intersects_p (b7));
+ ASSERT_FALSE (b7.intersects_p (b0));
+
+ ASSERT_TRUE (b0_to_7.intersects_p (b0));
+ ASSERT_TRUE (b0_to_7.intersects_p (b7));
+ ASSERT_TRUE (b0.intersects_p (b0_to_7));
+ ASSERT_TRUE (b7.intersects_p (b0_to_7));
+
+ ASSERT_FALSE (b0.intersects_p (b1_to_6));
+ ASSERT_FALSE (b1_to_6.intersects_p (b0));
+ ASSERT_TRUE (b1.intersects_p (b1_to_6));
+ ASSERT_TRUE (b1_to_6.intersects_p (b1));
+ ASSERT_TRUE (b1_to_6.intersects_p (b6));
+ ASSERT_FALSE (b1_to_6.intersects_p (b7));
+
+ ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
+ ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
+
+ ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
+ ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
+}
+
+/* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
+
+static void
+assert_bit_range_from_mask_eq (const location &loc,
+ unsigned HOST_WIDE_INT mask,
+ const bit_range &expected)
+{
+ bit_range actual (0, 0);
+ bool ok = bit_range::from_mask (mask, &actual);
+ ASSERT_TRUE_AT (loc, ok);
+ ASSERT_EQ_AT (loc, actual, expected);
+}
+
+/* Assert that bit_range::from_mask (MASK) returns true, and writes
+ out EXPECTED_BIT_RANGE. */
+
+#define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
+ SELFTEST_BEGIN_STMT \
+ assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
+ EXPECTED_BIT_RANGE); \
+ SELFTEST_END_STMT
+
+/* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
+
+static void
+assert_no_bit_range_from_mask_eq (const location &loc,
+ unsigned HOST_WIDE_INT mask)
+{
+ bit_range actual (0, 0);
+ bool ok = bit_range::from_mask (mask, &actual);
+ ASSERT_FALSE_AT (loc, ok);
+}
+
+/* Assert that bit_range::from_mask (MASK) returns false. */
+
+#define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
+ SELFTEST_BEGIN_STMT \
+ assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
+ SELFTEST_END_STMT
+
+/* Verify that bit_range::from_mask works as expected. */
+
+static void
+test_bit_range_from_mask ()
+{
+ /* Should fail on zero. */
+ ASSERT_NO_BIT_RANGE_FROM_MASK (0);
+
+ /* Verify 1-bit masks. */
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
+
+ /* Verify N-bit masks starting at bit 0. */
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
+
+ /* Various other tests. */
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
+
+ /* Multiple ranges of set bits should fail. */
+ ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
+ ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
+}
+
/* Implementation detail of ASSERT_OVERLAP. */
static void
void
analyzer_store_cc_tests ()
{
+ test_bit_range_intersects_p ();
+ test_bit_range_from_mask ();
test_binding_key_overlap ();
}
&& m_size_in_bits == other.m_size_in_bits);
}
+ bool intersects_p (const bit_range &other) const
+ {
+ return (get_start_bit_offset () < other.get_next_bit_offset ()
+ && other.get_start_bit_offset () < get_next_bit_offset ());
+ }
+
static int cmp (const bit_range &br1, const bit_range &br2);
+ static bool from_mask (unsigned HOST_WIDE_INT mask, bit_range *out);
+
bit_offset_t m_start_bit_offset;
bit_size_t m_size_in_bits;
};
const concrete_binding *dyn_cast_concrete_binding () const FINAL OVERRIDE
{ return this; }
+ const bit_range &get_bit_range () const { return m_bit_range; }
+
bit_offset_t get_start_bit_offset () const
{
return m_bit_range.m_start_bit_offset;
get_concrete_binding (bit_offset_t start_bit_offset,
bit_offset_t size_in_bits,
enum binding_kind kind);
+ const concrete_binding *
+ get_concrete_binding (const bit_range &bits,
+ enum binding_kind kind)
+ {
+ return get_concrete_binding (bits.get_start_bit_offset (),
+ bits.m_size_in_bits,
+ kind);
+ }
const symbolic_binding *
get_symbolic_binding (const region *region,
enum binding_kind kind);
--- /dev/null
+#include "analyzer-decls.h"
+
+typedef unsigned char u8;
+typedef unsigned __INT16_TYPE__ u16;
+typedef unsigned __INT32_TYPE__ u32;
+
+struct st1
+{
+ u16 nonzero_offset;
+ unsigned int f0 : 1;
+ unsigned int f1 : 1;
+ unsigned int f2 : 1;
+ unsigned int f3 : 1;
+ unsigned int f4 : 1;
+ unsigned int f5 : 1;
+ unsigned int f6 : 1;
+ unsigned int f7 : 1;
+};
+
+void test_1 (void)
+{
+ struct st1 s;
+ s.f0 = 0;
+ __analyzer_eval (s.f0 == 0); /* { dg-warning "TRUE" } */
+ s.f0 = 1;
+ __analyzer_eval (s.f0 == 1); /* { dg-warning "TRUE" } */
+
+ s.f1 = 0;
+ __analyzer_eval (s.f1 == 0); /* { dg-warning "TRUE" } */
+ s.f1 = 1;
+ __analyzer_eval (s.f1 == 1); /* { dg-warning "TRUE" } */
+
+ /* etc */
+
+ s.f6 = 0;
+ __analyzer_eval (s.f6 == 0); /* { dg-warning "TRUE" } */
+ s.f6 = 1;
+ __analyzer_eval (s.f6 == 1); /* { dg-warning "TRUE" } */
+
+ s.f7 = 0;
+ __analyzer_eval (s.f7 == 0); /* { dg-warning "TRUE" } */
+ s.f7 = 1;
+ __analyzer_eval (s.f7 == 1); /* { dg-warning "TRUE" } */
+};
+
+void test_2 (_Bool v0, _Bool v1, _Bool v2, _Bool v3,
+ _Bool v4, _Bool v5, _Bool v6, _Bool v7)
+{
+ struct st1 s;
+ s.f0 = v0;
+ s.f1 = v1;
+ s.f2 = v2;
+ s.f3 = v3;
+ s.f4 = v4;
+ s.f5 = v5;
+ s.f6 = v6;
+ s.f7 = v7;
+
+ __analyzer_eval (s.f0 == v0); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.f1 == v1); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.f2 == v2); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.f3 == v3); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.f4 == v4); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.f5 == v5); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.f6 == v6); /* { dg-warning "TRUE" } */
+ __analyzer_eval (s.f7 == v7); /* { dg-warning "TRUE" } */
+};
+
+struct st3
+{
+ unsigned int f01 : 2;
+ unsigned int f23 : 2;
+ unsigned int f34 : 2;
+ unsigned int f56 : 2;
+};
+
+void test_3 (void)
+{
+ struct st3 s;
+ s.f01 = 0;
+ __analyzer_eval (s.f01 == 0); /* { dg-warning "TRUE" } */
+ s.f01 = 1;
+ __analyzer_eval (s.f01 == 1); /* { dg-warning "TRUE" } */
+ s.f01 = 2;
+ __analyzer_eval (s.f01 == 2); /* { dg-warning "TRUE" } */
+ s.f01 = 3;
+ __analyzer_eval (s.f01 == 3); /* { dg-warning "TRUE" } */
+
+ /* etc */
+
+ s.f56 = 0;
+ __analyzer_eval (s.f56 == 0); /* { dg-warning "TRUE" } */
+ s.f56 = 1;
+ __analyzer_eval (s.f56 == 1); /* { dg-warning "TRUE" } */
+ s.f56 = 2;
+ __analyzer_eval (s.f56 == 2); /* { dg-warning "TRUE" } */
+ s.f56 = 3;
+ __analyzer_eval (s.f56 == 3); /* { dg-warning "TRUE" } */
+};
+
+/* A signed bitfield. */
+
+struct st4
+{
+ signed int f012 : 3;
+ signed int f345 : 3;
+};
+
+void test_4 (void)
+{
+ struct st4 s;
+ s.f345 = -4;
+ __analyzer_eval (s.f345 == -4); /* { dg-warning "TRUE" } */
+ s.f345 = -3;
+ __analyzer_eval (s.f345 == -3); /* { dg-warning "TRUE" } */
+ s.f345 = -2;
+ __analyzer_eval (s.f345 == -2); /* { dg-warning "TRUE" } */
+ s.f345 = -1;
+ __analyzer_eval (s.f345 == -1); /* { dg-warning "TRUE" } */
+ s.f345 = 0;
+ __analyzer_eval (s.f345 == 0); /* { dg-warning "TRUE" } */
+ s.f345 = 1;
+ __analyzer_eval (s.f345 == 1); /* { dg-warning "TRUE" } */
+ s.f345 = 2;
+ __analyzer_eval (s.f345 == 2); /* { dg-warning "TRUE" } */
+ s.f345 = 3;
+ __analyzer_eval (s.f345 == 3); /* { dg-warning "TRUE" } */
+};
+
+/* A zero bitfield to break up padding. */
+
+struct st5
+{
+ unsigned f0 : 5;
+ unsigned :0;
+ unsigned f1 : 16;
+};
+
+void test_5 (void)
+{
+ struct st5 s;
+ s.f1 = 0xcafe;
+ __analyzer_eval (s.f1 == 0xcafe); /* { dg-warning "TRUE" } */
+}
struct sbits
{
- int b0 : 1;
- int b123 : 3;
- int b456 : 3;
- int b7 : 1;
+ signed int b0 : 1;
+ signed int b123 : 3;
+ signed int b456 : 3;
+ signed int b7 : 1;
};
void test_44 (void)
{
struct sbits bits;
- bits.b0 = 1;
- __analyzer_eval (bits.b0 == 1); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "FALSE" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail): ^^^^
+ bits.b0 = -1;
+ __analyzer_eval (bits.b0 == -1); /* { dg-warning "TRUE" } */
- bits.b456 = 5;
- __analyzer_eval (bits.b456 == 5); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "FALSE" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail): ^^^^
+ bits.b456 = -4;
+ __analyzer_eval (bits.b456 == -4); /* { dg-warning "TRUE" } */
};
struct ubits
unsigned int b7 : 1;
};
-/* FIXME: this requires BIT_FIELD_REF to work. */
-
void test_45 (void)
{
struct ubits bits;
bits.b0 = 1;
- __analyzer_eval (bits.b0 == 1); /* { dg-warning "TRUE" "desired, PR99212" { xfail { ! { cris-*-* } } } } */
- /* { dg-warning "UNKNOWN" "status quo, PR99212" { target { *-*-* } xfail { cris-*-* } } .-1 } */
- // TODO(xfail): ^^^^
+ __analyzer_eval (bits.b0 == 1); /* { dg-warning "TRUE" } */
bits.b456 = 5;
- __analyzer_eval (bits.b456 == 5); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
- /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
- // TODO(xfail): ^^^^
+ __analyzer_eval (bits.b456 == 5); /* { dg-warning "TRUE" } */
};
extern const char *char_ptr;