diff --git a/gcc/analyzer/analyzer.h b/gcc/analyzer/analyzer.h index 896b35075cc..05d47512bd7 100644 --- a/gcc/analyzer/analyzer.h +++ b/gcc/analyzer/analyzer.h @@ -75,10 +75,12 @@ class region_model; class region_model_context; class impl_region_model_context; class call_details; -struct rejected_constraint; +class rejected_constraint; class constraint_manager; class equiv_class; class reachable_regions; +class bounded_ranges; +class bounded_ranges_manager; class pending_diagnostic; class state_change_event; diff --git a/gcc/analyzer/constraint-manager.cc b/gcc/analyzer/constraint-manager.cc index f59929a75ca..dc65c8dd92b 100644 --- a/gcc/analyzer/constraint-manager.cc +++ b/gcc/analyzer/constraint-manager.cc @@ -42,12 +42,14 @@ along with GCC; see the file COPYING3. If not see #include "sbitmap.h" #include "bitmap.h" #include "tristate.h" +#include "analyzer/analyzer-logging.h" #include "analyzer/call-string.h" #include "analyzer/program-point.h" #include "analyzer/store.h" #include "analyzer/region-model.h" #include "analyzer/constraint-manager.h" #include "analyzer/analyzer-selftests.h" +#include "tree-pretty-print.h" #if ENABLE_ANALYZER @@ -65,6 +67,50 @@ compare_constants (tree lhs_const, enum tree_code op, tree rhs_const) return tristate (tristate::TS_UNKNOWN); } +/* Return true iff CST is below the maximum value for its type. */ + +static bool +can_plus_one_p (tree cst) +{ + gcc_assert (CONSTANT_CLASS_P (cst)); + return tree_int_cst_lt (cst, TYPE_MAX_VALUE (TREE_TYPE (cst))); +} + +/* Return (CST + 1). */ + +static tree +plus_one (tree cst) +{ + gcc_assert (CONSTANT_CLASS_P (cst)); + gcc_assert (can_plus_one_p (cst)); + tree result = fold_build2 (PLUS_EXPR, TREE_TYPE (cst), + cst, integer_one_node); + gcc_assert (CONSTANT_CLASS_P (result)); + return result; +} + +/* Return true iff CST is above the minimum value for its type. */ + +static bool +can_minus_one_p (tree cst) +{ + gcc_assert (CONSTANT_CLASS_P (cst)); + return tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (cst)), cst); +} + +/* Return (CST - 1). */ + +static tree +minus_one (tree cst) +{ + gcc_assert (CONSTANT_CLASS_P (cst)); + gcc_assert (can_minus_one_p (cst)); + tree result = fold_build2 (MINUS_EXPR, TREE_TYPE (cst), + cst, integer_one_node); + gcc_assert (CONSTANT_CLASS_P (result)); + return result; +} + /* struct bound. */ /* Ensure that this bound is closed by converting an open bound to a @@ -255,6 +301,678 @@ range::above_upper_bound (tree rhs_const) const m_upper_bound.m_constant).is_true (); } +/* struct bounded_range. */ + +bounded_range::bounded_range (const_tree lower, const_tree upper) +: m_lower (const_cast (lower)), + m_upper (const_cast (upper)) +{ + if (lower && upper) + { + gcc_assert (TREE_CODE (m_lower) == INTEGER_CST); + gcc_assert (TREE_CODE (m_upper) == INTEGER_CST); + /* We should have lower <= upper. */ + gcc_assert (!tree_int_cst_lt (m_upper, m_lower)); + } + else + { + /* Purely for pending on-stack values, for + writing back to. */ + gcc_assert (m_lower == NULL_TREE); + gcc_assert (m_lower == NULL_TREE); + } +} + +static void +dump_cst (pretty_printer *pp, tree cst, bool show_types) +{ + gcc_assert (cst); + if (show_types) + { + pp_character (pp, '('); + dump_generic_node (pp, TREE_TYPE (cst), 0, (dump_flags_t)0, false); + pp_character (pp, ')'); + } + dump_generic_node (pp, cst, 0, (dump_flags_t)0, false); +} + +/* Dump this object to PP. */ + +void +bounded_range::dump_to_pp (pretty_printer *pp, bool show_types) const +{ + if (tree_int_cst_equal (m_lower, m_upper)) + dump_cst (pp, m_lower, show_types); + else + { + pp_character (pp, '['); + dump_cst (pp, m_lower, show_types); + pp_string (pp, ", "); + dump_cst (pp, m_upper, show_types); + pp_character (pp, ']'); + } +} + +/* Dump this object to stderr. */ + +void +bounded_range::dump (bool show_types) const +{ + pretty_printer pp; + pp_format_decoder (&pp) = default_tree_printer; + pp_show_color (&pp) = pp_show_color (global_dc->printer); + pp.buffer->stream = stderr; + dump_to_pp (&pp, show_types); + pp_newline (&pp); + pp_flush (&pp); +} + +json::object * +bounded_range::to_json () const +{ + json::object *range_obj = new json::object (); + set_json_attr (range_obj, "lower", m_lower); + set_json_attr (range_obj, "upper", m_upper); + return range_obj; +} + +/* Subroutine of bounded_range::to_json. */ + +void +bounded_range::set_json_attr (json::object *obj, const char *name, tree value) +{ + pretty_printer pp; + pp_format_decoder (&pp) = default_tree_printer; + pp_printf (&pp, "%E", value); + obj->set (name, new json::string (pp_formatted_text (&pp))); +} + + +/* Return true iff CST is within this range. */ + +bool +bounded_range::contains_p (tree cst) const +{ + /* Reject if below lower bound. */ + if (tree_int_cst_lt (cst, m_lower)) + return false; + /* Reject if above lower bound. */ + if (tree_int_cst_lt (m_upper, cst)) + return false; + return true; +} + +/* If this range intersects OTHER, return true, writing + the intersection to *OUT if OUT is non-NULL. + Return false if they do not intersect. */ + +bool +bounded_range::intersects_p (const bounded_range &other, + bounded_range *out) const +{ + const tree max_lower + = (tree_int_cst_le (m_lower, other.m_lower) + ? other.m_lower : m_lower); + gcc_assert (TREE_CODE (max_lower) == INTEGER_CST); + const tree min_upper + = (tree_int_cst_le (m_upper, other.m_upper) + ? m_upper : other.m_upper); + gcc_assert (TREE_CODE (min_upper) == INTEGER_CST); + + if (tree_int_cst_le (max_lower, min_upper)) + { + if (out) + *out = bounded_range (max_lower, min_upper); + return true; + } + else + return false; +} + +bool +bounded_range::operator== (const bounded_range &other) const +{ + return (tree_int_cst_equal (m_lower, other.m_lower) + && tree_int_cst_equal (m_upper, other.m_upper)); +} + +int +bounded_range::cmp (const bounded_range &br1, const bounded_range &br2) +{ + if (int cmp_lower = tree_int_cst_compare (br1.m_lower, + br2.m_lower)) + return cmp_lower; + return tree_int_cst_compare (br1.m_upper, br2.m_upper); +} + +/* struct bounded_ranges. */ + +/* Construct a bounded_ranges instance from a single range. */ + +bounded_ranges::bounded_ranges (const bounded_range &range) +: m_ranges (1) +{ + m_ranges.quick_push (range); + canonicalize (); + validate (); +} + +/* Construct a bounded_ranges instance from multiple ranges. */ + +bounded_ranges::bounded_ranges (const vec &ranges) +: m_ranges (ranges.length ()) +{ + m_ranges.safe_splice (ranges); + canonicalize (); + validate (); +} + +/* Construct a bounded_ranges instance for values of LHS for which + (LHS OP RHS_CONST) is true (e.g. "(LHS > 3)". */ + +bounded_ranges::bounded_ranges (enum tree_code op, tree rhs_const) +: m_ranges () +{ + gcc_assert (TREE_CODE (rhs_const) == INTEGER_CST); + tree type = TREE_TYPE (rhs_const); + switch (op) + { + default: + gcc_unreachable (); + case EQ_EXPR: + m_ranges.safe_push (bounded_range (rhs_const, rhs_const)); + break; + + case GE_EXPR: + m_ranges.safe_push (bounded_range (rhs_const, TYPE_MAX_VALUE (type))); + break; + + case LE_EXPR: + m_ranges.safe_push (bounded_range (TYPE_MIN_VALUE (type), rhs_const)); + break; + + case NE_EXPR: + if (tree_int_cst_lt (TYPE_MIN_VALUE (type), rhs_const)) + m_ranges.safe_push (bounded_range (TYPE_MIN_VALUE (type), + minus_one (rhs_const))); + if (tree_int_cst_lt (rhs_const, TYPE_MAX_VALUE (type))) + m_ranges.safe_push (bounded_range (plus_one (rhs_const), + TYPE_MAX_VALUE (type))); + break; + case GT_EXPR: + if (tree_int_cst_lt (rhs_const, TYPE_MAX_VALUE (type))) + m_ranges.safe_push (bounded_range (plus_one (rhs_const), + TYPE_MAX_VALUE (type))); + break; + case LT_EXPR: + if (tree_int_cst_lt (TYPE_MIN_VALUE (type), rhs_const)) + m_ranges.safe_push (bounded_range (TYPE_MIN_VALUE (type), + minus_one (rhs_const))); + break; + } + canonicalize (); + validate (); +} + +/* Subroutine of ctors for fixing up m_ranges. + Also, initialize m_hash. */ + +void +bounded_ranges::canonicalize () +{ + /* Sort the ranges. */ + m_ranges.qsort ([](const void *p1, const void *p2) -> int + { + const bounded_range &br1 = *(const bounded_range *)p1; + const bounded_range &br2 = *(const bounded_range *)p2; + return bounded_range::cmp (br1, br2); + }); + + /* Merge ranges that are touching or overlapping. */ + for (unsigned i = 1; i < m_ranges.length (); ) + { + bounded_range *prev = &m_ranges[i - 1]; + const bounded_range *next = &m_ranges[i]; + if (prev->intersects_p (*next, NULL) + || (can_plus_one_p (prev->m_upper) + && tree_int_cst_equal (plus_one (prev->m_upper), + next->m_lower))) + { + prev->m_upper = next->m_upper; + m_ranges.ordered_remove (i); + } + else + i++; + } + + /* Initialize m_hash. */ + inchash::hash hstate (0); + for (const auto &iter : m_ranges) + { + inchash::add_expr (iter.m_lower, hstate); + inchash::add_expr (iter.m_upper, hstate); + } + m_hash = hstate.end (); +} + +/* Assert that this object is valid. */ + +void +bounded_ranges::validate () const +{ + /* Skip this in a release build. */ +#if !CHECKING_P + return; +#endif + + for (unsigned i = 1; i < m_ranges.length (); i++) + { + const bounded_range &prev = m_ranges[i - 1]; + const bounded_range &next = m_ranges[i]; + + /* Give up if we somehow have incompatible different types. */ + if (!types_compatible_p (TREE_TYPE (prev.m_upper), + TREE_TYPE (next.m_lower))) + continue; + + /* Verify sorted. */ + gcc_assert (tree_int_cst_lt (prev.m_upper, next.m_lower)); + + gcc_assert (can_plus_one_p (prev.m_upper)); + /* otherwise there's no room for "next". */ + + /* Verify no ranges touch each other. */ + gcc_assert (tree_int_cst_lt (plus_one (prev.m_upper), next.m_lower)); + } +} + +/* bounded_ranges equality operator. */ + +bool +bounded_ranges::operator== (const bounded_ranges &other) const +{ + if (m_ranges.length () != other.m_ranges.length ()) + return false; + for (unsigned i = 0; i < m_ranges.length (); i++) + { + if (m_ranges[i] != other.m_ranges[i]) + return false; + } + return true; +} + +/* Dump this object to PP. */ + +void +bounded_ranges::dump_to_pp (pretty_printer *pp, bool show_types) const +{ + pp_character (pp, '{'); + for (unsigned i = 0; i < m_ranges.length (); ++i) + { + if (i > 0) + pp_string (pp, ", "); + m_ranges[i].dump_to_pp (pp, show_types); + } + pp_character (pp, '}'); +} + +/* Dump this object to stderr. */ + +DEBUG_FUNCTION void +bounded_ranges::dump (bool show_types) const +{ + pretty_printer pp; + pp_format_decoder (&pp) = default_tree_printer; + pp_show_color (&pp) = pp_show_color (global_dc->printer); + pp.buffer->stream = stderr; + dump_to_pp (&pp, show_types); + pp_newline (&pp); + pp_flush (&pp); +} + +json::value * +bounded_ranges::to_json () const +{ + json::array *arr_obj = new json::array (); + + for (unsigned i = 0; i < m_ranges.length (); ++i) + arr_obj->append (m_ranges[i].to_json ()); + + return arr_obj; +} + +/* Determine whether (X OP RHS_CONST) is known to be true or false + for all X in the ranges expressed by this object. */ + +tristate +bounded_ranges::eval_condition (enum tree_code op, + tree rhs_const, + bounded_ranges_manager *mgr) const +{ + /* Convert (X OP RHS_CONST) to a bounded_ranges instance and find + the intersection of that with this object. */ + bounded_ranges other (op, rhs_const); + const bounded_ranges *intersection + = mgr->get_or_create_intersection (this, &other); + + if (intersection->m_ranges.length () > 0) + { + /* We can use pointer equality to check for equality, + due to instance consolidation. */ + if (intersection == this) + return tristate (tristate::TS_TRUE); + else + return tristate (tristate::TS_UNKNOWN); + } + else + /* No intersection. */ + return tristate (tristate::TS_FALSE); +} + +/* Return true if CST is within any of the ranges. */ + +bool +bounded_ranges::contain_p (tree cst) const +{ + gcc_assert (TREE_CODE (cst) == INTEGER_CST); + for (const auto &iter : m_ranges) + { + /* TODO: should we optimize this based on sorting? */ + if (iter.contains_p (cst)) + return true; + } + return false; +} + +int +bounded_ranges::cmp (const bounded_ranges *a, const bounded_ranges *b) +{ + if (int cmp_length = ((int)a->m_ranges.length () + - (int)b->m_ranges.length ())) + return cmp_length; + for (unsigned i = 0; i < a->m_ranges.length (); i++) + { + if (int cmp_range = bounded_range::cmp (a->m_ranges[i], b->m_ranges[i])) + return cmp_range; + } + /* They are equal. They ought to have been consolidated, so we should + have two pointers to the same object. */ + gcc_assert (a == b); + return 0; +} + +/* class bounded_ranges_manager. */ + +/* bounded_ranges_manager's dtor. */ + +bounded_ranges_manager::~bounded_ranges_manager () +{ + /* Delete the managed objects. */ + for (const auto &iter : m_map) + delete iter.second; +} + +/* Get the bounded_ranges instance for the empty set, creating it if + necessary. */ + +const bounded_ranges * +bounded_ranges_manager::get_or_create_empty () +{ + auto_vec empty_vec; + + return consolidate (new bounded_ranges (empty_vec)); +} + +/* Get the bounded_ranges instance for {CST}, creating it if necessary. */ + +const bounded_ranges * +bounded_ranges_manager::get_or_create_point (const_tree cst) +{ + gcc_assert (TREE_CODE (cst) == INTEGER_CST); + + return get_or_create_range (cst, cst); +} + +/* Get the bounded_ranges instance for {[LOWER_BOUND..UPPER_BOUND]}, + creating it if necessary. */ + +const bounded_ranges * +bounded_ranges_manager::get_or_create_range (const_tree lower_bound, + const_tree upper_bound) +{ + gcc_assert (TREE_CODE (lower_bound) == INTEGER_CST); + gcc_assert (TREE_CODE (upper_bound) == INTEGER_CST); + + return consolidate + (new bounded_ranges (bounded_range (lower_bound, upper_bound))); +} + +/* Get the bounded_ranges instance for the union of OTHERS, + creating it if necessary. */ + +const bounded_ranges * +bounded_ranges_manager:: +get_or_create_union (const vec &others) +{ + auto_vec ranges; + for (const auto &r : others) + ranges.safe_splice (r->m_ranges); + return consolidate (new bounded_ranges (ranges)); +} + +/* Get the bounded_ranges instance for the intersection of A and B, + creating it if necessary. */ + +const bounded_ranges * +bounded_ranges_manager::get_or_create_intersection (const bounded_ranges *a, + const bounded_ranges *b) +{ + auto_vec ranges; + unsigned a_idx = 0; + unsigned b_idx = 0; + while (a_idx < a->m_ranges.length () + && b_idx < b->m_ranges.length ()) + { + const bounded_range &r_a = a->m_ranges[a_idx]; + const bounded_range &r_b = b->m_ranges[b_idx]; + + bounded_range intersection (NULL_TREE, NULL_TREE); + if (r_a.intersects_p (r_b, &intersection)) + { + ranges.safe_push (intersection); + } + if (tree_int_cst_lt (r_a.m_lower, r_b.m_lower)) + { + a_idx++; + } + else + { + if (tree_int_cst_lt (r_a.m_upper, r_b.m_upper)) + a_idx++; + else + b_idx++; + } + } + + return consolidate (new bounded_ranges (ranges)); +} + +/* Get the bounded_ranges instance for the inverse of OTHER relative + to TYPE, creating it if necessary. + This is for use when handling "default" in switch statements, where + OTHER represents all the other cases. */ + +const bounded_ranges * +bounded_ranges_manager::get_or_create_inverse (const bounded_ranges *other, + tree type) +{ + tree min_val = TYPE_MIN_VALUE (type); + tree max_val = TYPE_MAX_VALUE (type); + if (other->m_ranges.length () == 0) + return get_or_create_range (min_val, max_val); + auto_vec ranges; + tree first_lb = other->m_ranges[0].m_lower; + if (tree_int_cst_lt (min_val, first_lb) + && can_minus_one_p (first_lb)) + ranges.safe_push (bounded_range (min_val, + minus_one (first_lb))); + for (unsigned i = 1; i < other->m_ranges.length (); i++) + { + tree prev_ub = other->m_ranges[i - 1].m_upper; + tree iter_lb = other->m_ranges[i].m_lower; + gcc_assert (tree_int_cst_lt (prev_ub, iter_lb)); + if (can_plus_one_p (prev_ub) && can_minus_one_p (iter_lb)) + ranges.safe_push (bounded_range (plus_one (prev_ub), + minus_one (iter_lb))); + } + tree last_ub + = other->m_ranges[other->m_ranges.length () - 1].m_upper; + if (tree_int_cst_lt (last_ub, max_val) + && can_plus_one_p (last_ub)) + ranges.safe_push (bounded_range (plus_one (last_ub), max_val)); + + return consolidate (new bounded_ranges (ranges)); +} + +/* If an object equal to INST is already present, delete INST and + return the existing object. + Otherwise add INST and return it. */ + +const bounded_ranges * +bounded_ranges_manager::consolidate (bounded_ranges *inst) +{ + if (bounded_ranges **slot = m_map.get (inst)) + { + delete inst; + return *slot; + } + m_map.put (inst, inst); + return inst; +} + +/* Get the bounded_ranges instance for EDGE of SWITCH_STMT, + creating it if necessary, and caching it by edge. */ + +const bounded_ranges * +bounded_ranges_manager:: +get_or_create_ranges_for_switch (const switch_cfg_superedge *edge, + const gswitch *switch_stmt) +{ + /* Look in per-edge cache. */ + if (const bounded_ranges ** slot = m_edge_cache.get (edge)) + return *slot; + + /* Not yet in cache. */ + const bounded_ranges *all_cases_ranges + = create_ranges_for_switch (*edge, switch_stmt); + m_edge_cache.put (edge, all_cases_ranges); + return all_cases_ranges; +} + +/* Get the bounded_ranges instance for EDGE of SWITCH_STMT, + creating it if necessary, for edges for which the per-edge + cache has not yet been populated. */ + +const bounded_ranges * +bounded_ranges_manager:: +create_ranges_for_switch (const switch_cfg_superedge &edge, + const gswitch *switch_stmt) +{ + /* Get the ranges for each case label. */ + auto_vec case_ranges_vec + (gimple_switch_num_labels (switch_stmt)); + + for (tree case_label : edge.get_case_labels ()) + { + /* Get the ranges for this case label. */ + const bounded_ranges *case_ranges + = make_case_label_ranges (switch_stmt, case_label); + case_ranges_vec.quick_push (case_ranges); + } + + /* Combine all the ranges for each case label into a single collection + of ranges. */ + const bounded_ranges *all_cases_ranges + = get_or_create_union (case_ranges_vec); + return all_cases_ranges; +} + +/* Get the bounded_ranges instance for CASE_LABEL within + SWITCH_STMT. */ + +const bounded_ranges * +bounded_ranges_manager:: +make_case_label_ranges (const gswitch *switch_stmt, + tree case_label) +{ + gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR); + tree lower_bound = CASE_LOW (case_label); + tree upper_bound = CASE_HIGH (case_label); + if (lower_bound) + { + if (upper_bound) + /* Range. */ + return get_or_create_range (lower_bound, upper_bound); + else + /* Single-value. */ + return get_or_create_point (lower_bound); + } + else + { + /* The default case. + Add exclusions based on the other cases. */ + auto_vec other_case_ranges + (gimple_switch_num_labels (switch_stmt)); + for (unsigned other_idx = 1; + other_idx < gimple_switch_num_labels (switch_stmt); + other_idx++) + { + tree other_label = gimple_switch_label (switch_stmt, + other_idx); + const bounded_ranges *other_ranges + = make_case_label_ranges (switch_stmt, other_label); + other_case_ranges.quick_push (other_ranges); + } + const bounded_ranges *other_cases_ranges + = get_or_create_union (other_case_ranges); + tree type = TREE_TYPE (gimple_switch_index (switch_stmt)); + return get_or_create_inverse (other_cases_ranges, type); + } +} + +/* Dump the number of objects of each class that were managed by this + manager to LOGGER. + If SHOW_OBJS is true, also dump the objects themselves. */ + +void +bounded_ranges_manager::log_stats (logger *logger, bool show_objs) const +{ + LOG_SCOPE (logger); + logger->log (" # %s: %li", "ranges", m_map.elements ()); + if (!show_objs) + return; + + auto_vec vec_objs (m_map.elements ()); + for (const auto &iter : m_map) + vec_objs.quick_push (iter.second); + vec_objs.qsort + ([](const void *p1, const void *p2) -> int + { + const bounded_ranges *br1 = *(const bounded_ranges * const *)p1; + const bounded_ranges *br2 = *(const bounded_ranges * const *)p2; + return bounded_ranges::cmp (br1, br2); + }); + + for (const auto &iter : vec_objs) + { + logger->start_log_line (); + pretty_printer *pp = logger->get_printer (); + pp_string (pp, " "); + iter->dump_to_pp (pp, true); + logger->end_log_line (); + } +} + /* class equiv_class. */ /* equiv_class's default ctor. */ @@ -576,6 +1294,49 @@ constraint::implied_by (const constraint &other, return false; } +/* class bounded_ranges_constraint. */ + +void +bounded_ranges_constraint::print (pretty_printer *pp, + const constraint_manager &cm) const +{ + m_ec_id.print (pp); + pp_string (pp, ": "); + m_ec_id.get_obj (cm).print (pp); + pp_string (pp, ": "); + m_ranges->dump_to_pp (pp, true); +} + +json::object * +bounded_ranges_constraint::to_json () const +{ + json::object *con_obj = new json::object (); + + con_obj->set ("ec", new json::integer_number (m_ec_id.as_int ())); + con_obj->set ("ranges", m_ranges->to_json ()); + + return con_obj; +} + +bool +bounded_ranges_constraint:: +operator== (const bounded_ranges_constraint &other) const +{ + if (m_ec_id != other.m_ec_id) + return false; + + /* We can compare by pointer, since the bounded_ranges_manager + consolidates instances. */ + return m_ranges == other.m_ranges; +} + +void +bounded_ranges_constraint::add_to_hash (inchash::hash *hstate) const +{ + hstate->add_int (m_ec_id.m_idx); + hstate->merge_hash (m_ranges->get_hash ()); +} + /* class equiv_class_id. */ /* Get the underlying equiv_class for this ID from CM. */ @@ -612,6 +1373,7 @@ equiv_class_id::print (pretty_printer *pp) const constraint_manager::constraint_manager (const constraint_manager &other) : m_equiv_classes (other.m_equiv_classes.length ()), m_constraints (other.m_constraints.length ()), + m_bounded_ranges_constraints (other.m_bounded_ranges_constraints.length ()), m_mgr (other.m_mgr) { int i; @@ -621,6 +1383,8 @@ constraint_manager::constraint_manager (const constraint_manager &other) constraint *c; FOR_EACH_VEC_ELT (other.m_constraints, i, c) m_constraints.quick_push (*c); + for (const auto &iter : other.m_bounded_ranges_constraints) + m_bounded_ranges_constraints.quick_push (iter); } /* constraint_manager's assignment operator. */ @@ -630,6 +1394,7 @@ constraint_manager::operator= (const constraint_manager &other) { gcc_assert (m_equiv_classes.length () == 0); gcc_assert (m_constraints.length () == 0); + gcc_assert (m_bounded_ranges_constraints.length () == 0); int i; equiv_class *ec; @@ -640,6 +1405,8 @@ constraint_manager::operator= (const constraint_manager &other) m_constraints.reserve (other.m_constraints.length ()); FOR_EACH_VEC_ELT (other.m_constraints, i, c) m_constraints.quick_push (*c); + for (const auto &iter : other.m_bounded_ranges_constraints) + m_bounded_ranges_constraints.quick_push (iter); return *this; } @@ -658,6 +1425,8 @@ constraint_manager::hash () const hstate.merge_hash (ec->hash ()); FOR_EACH_VEC_ELT (m_constraints, i, c) hstate.merge_hash (c->hash ()); + for (const auto &iter : m_bounded_ranges_constraints) + iter.add_to_hash (&hstate); return hstate.end (); } @@ -670,6 +1439,9 @@ constraint_manager::operator== (const constraint_manager &other) const return false; if (m_constraints.length () != other.m_constraints.length ()) return false; + if (m_bounded_ranges_constraints.length () + != other.m_bounded_ranges_constraints.length ()) + return false; int i; equiv_class *ec; @@ -684,6 +1456,13 @@ constraint_manager::operator== (const constraint_manager &other) const if (!(*c == other.m_constraints[i])) return false; + for (unsigned i = 0; i < m_bounded_ranges_constraints.length (); i++) + { + if (m_bounded_ranges_constraints[i] + != other.m_bounded_ranges_constraints[i]) + return false; + } + return true; } @@ -711,6 +1490,18 @@ constraint_manager::print (pretty_printer *pp) const pp_string (pp, " && "); c->print (pp, *this); } + if (m_bounded_ranges_constraints.length ()) + { + pp_string (pp, " | "); + i = 0; + for (const auto &iter : m_bounded_ranges_constraints) + { + if (i > 0) + pp_string (pp, " && "); + iter.print (pp, *this); + i++; + } + } pp_printf (pp, "}"); } @@ -762,6 +1553,30 @@ constraint_manager::dump_to_pp (pretty_printer *pp, bool multiline) const } if (!multiline) pp_string (pp, "}"); + if (m_bounded_ranges_constraints.length ()) + { + if (multiline) + pp_string (pp, " "); + pp_string (pp, "ranges:"); + if (multiline) + pp_newline (pp); + else + pp_string (pp, "{"); + i = 0; + for (const auto &iter : m_bounded_ranges_constraints) + { + if (multiline) + pp_string (pp, " "); + else if (i > 0) + pp_string (pp, " && "); + iter.print (pp, *this); + if (multiline) + pp_newline (pp); + i++; + } + if (!multiline) + pp_string (pp, "}"); + } } /* Dump a multiline representation of this constraint_manager to FP. */ @@ -818,6 +1633,14 @@ constraint_manager::to_json () const cm_obj->set ("constraints", con_arr); } + /* m_bounded_ranges_constraints. */ + { + json::array *con_arr = new json::array (); + for (const auto &c : m_bounded_ranges_constraints) + con_arr->append (c.to_json ()); + cm_obj->set ("bounded_ranges_constraints", con_arr); + } + return cm_obj; } @@ -936,6 +1759,8 @@ constraint_manager::add_unknown_constraint (equiv_class_id lhs_ec_id, if (final_ec != old_ec) m_equiv_classes[rhs_ec_id.m_idx] = final_ec; delete old_ec; + if (lhs_ec_id == final_ec_id) + lhs_ec_id = rhs_ec_id; /* Update the constraints. */ constraint *c; @@ -955,6 +1780,14 @@ constraint_manager::add_unknown_constraint (equiv_class_id lhs_ec_id, if (c->m_rhs == final_ec_id) c->m_rhs = rhs_ec_id; } + bounded_ranges_constraint *brc; + FOR_EACH_VEC_ELT (m_bounded_ranges_constraints, i, brc) + { + if (brc->m_ec_id == rhs_ec_id) + brc->m_ec_id = lhs_ec_id; + if (brc->m_ec_id == final_ec_id) + brc->m_ec_id = rhs_ec_id; + } /* We may now have self-comparisons due to the merger; these constraints should be removed. */ @@ -1008,6 +1841,8 @@ constraint_manager::add_constraint_internal (equiv_class_id lhs_id, /* Add the constraint. */ m_constraints.safe_push (new_c); + /* We don't yet update m_bounded_ranges_constraints here yet. */ + if (!flag_analyzer_transitivity) return; @@ -1141,6 +1976,80 @@ constraint_manager::add_constraint_internal (equiv_class_id lhs_id, } } +/* Attempt to add the constraint that SVAL is within RANGES to this + constraint_manager. + + Return true if the constraint was successfully added (or is already + known to be true). + Return false if the constraint contradicts existing knowledge. */ + +bool +constraint_manager::add_bounded_ranges (const svalue *sval, + const bounded_ranges *ranges) +{ + sval = sval->unwrap_any_unmergeable (); + + /* Nothing can be known about unknown/poisoned values. */ + if (!sval->can_have_associated_state_p ()) + /* Not a contradiction. */ + return true; + + /* If SVAL is a constant, then we can look at RANGES directly. */ + if (tree cst = sval->maybe_get_constant ()) + { + /* If the ranges contain CST, then it's a successful no-op; + otherwise it's a contradiction. */ + return ranges->contain_p (cst); + } + + equiv_class_id ec_id = get_or_add_equiv_class (sval); + + /* If the EC has a constant, it's either true or false. */ + const equiv_class &ec = ec_id.get_obj (*this); + if (tree ec_cst = ec.get_any_constant ()) + { + if (ranges->contain_p (ec_cst)) + /* We already have SVAL == EC_CST, within RANGES, so + we can discard RANGES and succeed. */ + return true; + else + /* We already have SVAL == EC_CST, not within RANGES, so + we can reject RANGES as a contradiction. */ + return false; + } + + /* We have at most one per ec_id. */ + /* Iterate through each range in RANGES. */ + for (auto iter : m_bounded_ranges_constraints) + { + if (iter.m_ec_id == ec_id) + { + /* Update with intersection, or fail if empty. */ + bounded_ranges_manager *mgr = get_range_manager (); + const bounded_ranges *intersection + = mgr->get_or_create_intersection (iter.m_ranges, ranges); + if (intersection->empty_p ()) + { + /* No intersection; fail. */ + return false; + } + else + { + /* Update with intersection; succeed. */ + iter.m_ranges = intersection; + validate (); + return true; + } + } + } + m_bounded_ranges_constraints.safe_push + (bounded_ranges_constraint (ec_id, ranges)); + + validate (); + + return true; +} + /* Look for SVAL within the equivalence classes of this constraint_manager; if found, return true, writing the id to *OUT if OUT is non-NULL, otherwise return false. */ @@ -1279,6 +2188,8 @@ constraint_manager::eval_condition (equiv_class_id lhs_ec, } } + /* We don't use m_bounded_ranges_constraints here yet. */ + return tristate (tristate::TS_UNKNOWN); } @@ -1404,6 +2315,12 @@ constraint_manager::eval_condition (equiv_class_id lhs_ec, } } } + + bounded_ranges_manager *mgr = get_range_manager (); + for (const auto &iter : m_bounded_ranges_constraints) + if (iter.m_ec_id == lhs_ec) + return iter.m_ranges->eval_condition (op, rhs_const, mgr); + /* Look at existing bounds on LHS_EC. */ range lhs_bounds = get_ec_bounds (lhs_ec); return lhs_bounds.eval_condition (op, rhs_const); @@ -1552,6 +2469,29 @@ constraint_manager::purge (const PurgeCriteria &p, purge_stats *stats) con_idx++; } } + + /* Update bounded_ranges_constraint instances. */ + for (unsigned r_idx = 0; + r_idx < m_bounded_ranges_constraints.length (); ) + { + bounded_ranges_constraint *brc + = &m_bounded_ranges_constraints[r_idx]; + + /* Remove if it refers to the deleted EC. */ + if (brc->m_ec_id == ec_idx) + { + m_bounded_ranges_constraints.ordered_remove (r_idx); + if (stats) + stats->m_num_bounded_ranges_constraints++; + } + else + { + /* Renumber any EC ids that refer to ECs that have + had their idx changed. */ + brc->m_ec_id.update_for_removal (ec_idx); + r_idx++; + } + } } else ec_idx++; @@ -1610,6 +2550,17 @@ constraint_manager::purge (const PurgeCriteria &p, purge_stats *stats) c->m_lhs.update_for_removal (ec_idx); c->m_rhs.update_for_removal (ec_idx); } + + /* Likewise for m_bounded_ranges_constraints. */ + for (unsigned r_idx = 0; + r_idx < m_bounded_ranges_constraints.length (); + r_idx++) + { + bounded_ranges_constraint *brc + = &m_bounded_ranges_constraints[r_idx]; + brc->m_ec_id.update_for_removal (ec_idx); + } + continue; } } @@ -1751,6 +2702,9 @@ constraint_manager::canonicalize () used_ecs.add (m_equiv_classes[c->m_rhs.as_int ()]); } + for (const auto &iter : m_bounded_ranges_constraints) + used_ecs.add (m_equiv_classes[iter.m_ec_id.as_int ()]); + /* Purge unused ECs: those that aren't used by constraints and that effectively have only one svalue (either in m_constant or in m_vars). */ @@ -1791,6 +2745,9 @@ constraint_manager::canonicalize () ec_id_map.update (&c->m_rhs); } + for (auto &iter : m_bounded_ranges_constraints) + ec_id_map.update (&iter.m_ec_id); + /* Finally, sort the constraints. */ m_constraints.qsort (constraint_cmp); } @@ -1835,6 +2792,32 @@ public: } } + void on_ranges (const svalue *lhs_sval, + const bounded_ranges *ranges) FINAL OVERRIDE + { + for (const auto &iter : m_cm_b->m_bounded_ranges_constraints) + { + const equiv_class &ec_rhs = iter.m_ec_id.get_obj (*m_cm_b); + for (unsigned i = 0; i < ec_rhs.m_vars.length (); i++) + { + const svalue *rhs_sval = ec_rhs.m_vars[i]; + if (lhs_sval == rhs_sval) + { + /* Union of the two ranges. */ + auto_vec pair (2); + pair.quick_push (ranges); + pair.quick_push (iter.m_ranges); + bounded_ranges_manager *ranges_mgr + = m_cm_b->get_range_manager (); + const bounded_ranges *union_ + = ranges_mgr->get_or_create_union (pair); + bool sat = m_out->add_bounded_ranges (lhs_sval, union_); + gcc_assert (sat); + } + } + } + } + private: const constraint_manager *m_cm_b; constraint_manager *m_out; @@ -1908,6 +2891,16 @@ constraint_manager::for_each_fact (fact_visitor *visitor) const visitor->on_fact (ec_lhs.m_vars[i], code, ec_rhs.m_vars[j]); } } + + for (const auto &iter : m_bounded_ranges_constraints) + { + const equiv_class &ec_lhs = iter.m_ec_id.get_obj (*this); + for (unsigned i = 0; i < ec_lhs.m_vars.length (); i++) + { + const svalue *lhs_sval = ec_lhs.m_vars[i]; + visitor->on_ranges (lhs_sval, iter.m_ranges); + } + } } /* Assert that this object is valid. */ @@ -1945,10 +2938,22 @@ constraint_manager::validate () const FOR_EACH_VEC_ELT (m_constraints, i, c) { gcc_assert (!c->m_lhs.null_p ()); - gcc_assert (c->m_lhs.as_int () <= (int)m_equiv_classes.length ()); + gcc_assert (c->m_lhs.as_int () < (int)m_equiv_classes.length ()); gcc_assert (!c->m_rhs.null_p ()); - gcc_assert (c->m_rhs.as_int () <= (int)m_equiv_classes.length ()); + gcc_assert (c->m_rhs.as_int () < (int)m_equiv_classes.length ()); } + + for (const auto &iter : m_bounded_ranges_constraints) + { + gcc_assert (!iter.m_ec_id.null_p ()); + gcc_assert (iter.m_ec_id.as_int () < (int)m_equiv_classes.length ()); + } +} + +bounded_ranges_manager * +constraint_manager::get_range_manager () const +{ + return m_mgr->get_range_manager (); } #if CHECKING_P @@ -2696,6 +3701,318 @@ test_many_constants () } } +/* Implementation detail of ASSERT_DUMP_BOUNDED_RANGES_EQ. */ + +static void +assert_dump_bounded_range_eq (const location &loc, + const bounded_range &range, + const char *expected) +{ + auto_fix_quotes sentinel; + pretty_printer pp; + pp_format_decoder (&pp) = default_tree_printer; + range.dump_to_pp (&pp, false); + ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected); +} + +/* Assert that BR.dump (false) is EXPECTED. */ + +#define ASSERT_DUMP_BOUNDED_RANGE_EQ(BR, EXPECTED) \ + SELFTEST_BEGIN_STMT \ + assert_dump_bounded_range_eq ((SELFTEST_LOCATION), (BR), (EXPECTED)); \ + SELFTEST_END_STMT + +/* Verify that bounded_range works as expected. */ + +static void +test_bounded_range () +{ + tree u8_0 = build_int_cst (unsigned_char_type_node, 0); + tree u8_1 = build_int_cst (unsigned_char_type_node, 1); + tree u8_64 = build_int_cst (unsigned_char_type_node, 64); + tree u8_128 = build_int_cst (unsigned_char_type_node, 128); + tree u8_255 = build_int_cst (unsigned_char_type_node, 255); + + tree s8_0 = build_int_cst (signed_char_type_node, 0); + tree s8_1 = build_int_cst (signed_char_type_node, 1); + tree s8_2 = build_int_cst (signed_char_type_node, 2); + + bounded_range br_u8_0 (u8_0, u8_0); + ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_0, "0"); + ASSERT_TRUE (br_u8_0.contains_p (u8_0)); + ASSERT_FALSE (br_u8_0.contains_p (u8_1)); + ASSERT_TRUE (br_u8_0.contains_p (s8_0)); + ASSERT_FALSE (br_u8_0.contains_p (s8_1)); + + bounded_range br_u8_0_1 (u8_0, u8_1); + ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_0_1, "[0, 1]"); + + bounded_range tmp (NULL_TREE, NULL_TREE); + ASSERT_TRUE (br_u8_0.intersects_p (br_u8_0_1, &tmp)); + ASSERT_DUMP_BOUNDED_RANGE_EQ (tmp, "0"); + + bounded_range br_u8_64_128 (u8_64, u8_128); + ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_64_128, "[64, 128]"); + + ASSERT_FALSE (br_u8_0.intersects_p (br_u8_64_128, NULL)); + ASSERT_FALSE (br_u8_64_128.intersects_p (br_u8_0, NULL)); + + bounded_range br_u8_128_255 (u8_128, u8_255); + ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_128_255, "[128, 255]"); + ASSERT_TRUE (br_u8_128_255.intersects_p (br_u8_64_128, &tmp)); + ASSERT_DUMP_BOUNDED_RANGE_EQ (tmp, "128"); + + bounded_range br_s8_2 (s8_2, s8_2); + ASSERT_DUMP_BOUNDED_RANGE_EQ (br_s8_2, "2"); + bounded_range br_s8_2_u8_255 (s8_2, u8_255); + ASSERT_DUMP_BOUNDED_RANGE_EQ (br_s8_2_u8_255, "[2, 255]"); +} + +/* Implementation detail of ASSERT_DUMP_BOUNDED_RANGES_EQ. */ + +static void +assert_dump_bounded_ranges_eq (const location &loc, + const bounded_ranges *ranges, + const char *expected) +{ + auto_fix_quotes sentinel; + pretty_printer pp; + pp_format_decoder (&pp) = default_tree_printer; + ranges->dump_to_pp (&pp, false); + ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected); +} + +/* Implementation detail of ASSERT_DUMP_BOUNDED_RANGES_EQ. */ + +static void +assert_dump_bounded_ranges_eq (const location &loc, + const bounded_ranges &ranges, + const char *expected) +{ + auto_fix_quotes sentinel; + pretty_printer pp; + pp_format_decoder (&pp) = default_tree_printer; + ranges.dump_to_pp (&pp, false); + ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected); +} + +/* Assert that BRS.dump (false) is EXPECTED. */ + +#define ASSERT_DUMP_BOUNDED_RANGES_EQ(BRS, EXPECTED) \ + SELFTEST_BEGIN_STMT \ + assert_dump_bounded_ranges_eq ((SELFTEST_LOCATION), (BRS), (EXPECTED)); \ + SELFTEST_END_STMT + +/* Verify that the bounded_ranges class works as expected. */ + +static void +test_bounded_ranges () +{ + bounded_ranges_manager mgr; + + tree ch0 = build_int_cst (unsigned_char_type_node, 0); + tree ch1 = build_int_cst (unsigned_char_type_node, 1); + tree ch2 = build_int_cst (unsigned_char_type_node, 2); + tree ch3 = build_int_cst (unsigned_char_type_node, 3); + tree ch128 = build_int_cst (unsigned_char_type_node, 128); + tree ch129 = build_int_cst (unsigned_char_type_node, 129); + tree ch254 = build_int_cst (unsigned_char_type_node, 254); + tree ch255 = build_int_cst (unsigned_char_type_node, 255); + + const bounded_ranges *empty = mgr.get_or_create_empty (); + ASSERT_DUMP_BOUNDED_RANGES_EQ (empty, "{}"); + + const bounded_ranges *point0 = mgr.get_or_create_point (ch0); + ASSERT_DUMP_BOUNDED_RANGES_EQ (point0, "{0}"); + + const bounded_ranges *point1 = mgr.get_or_create_point (ch1); + ASSERT_DUMP_BOUNDED_RANGES_EQ (point1, "{1}"); + + const bounded_ranges *point2 = mgr.get_or_create_point (ch2); + ASSERT_DUMP_BOUNDED_RANGES_EQ (point2, "{2}"); + + const bounded_ranges *range0_128 = mgr.get_or_create_range (ch0, ch128); + ASSERT_DUMP_BOUNDED_RANGES_EQ (range0_128, "{[0, 128]}"); + + const bounded_ranges *range0_255 = mgr.get_or_create_range (ch0, ch255); + ASSERT_DUMP_BOUNDED_RANGES_EQ (range0_255, "{[0, 255]}"); + + ASSERT_FALSE (empty->contain_p (ch0)); + ASSERT_FALSE (empty->contain_p (ch1)); + ASSERT_FALSE (empty->contain_p (ch255)); + + ASSERT_TRUE (point0->contain_p (ch0)); + ASSERT_FALSE (point0->contain_p (ch1)); + ASSERT_FALSE (point0->contain_p (ch255)); + + ASSERT_FALSE (point1->contain_p (ch0)); + ASSERT_TRUE (point1->contain_p (ch1)); + ASSERT_FALSE (point0->contain_p (ch255)); + + ASSERT_TRUE (range0_128->contain_p (ch0)); + ASSERT_TRUE (range0_128->contain_p (ch1)); + ASSERT_TRUE (range0_128->contain_p (ch128)); + ASSERT_FALSE (range0_128->contain_p (ch129)); + ASSERT_FALSE (range0_128->contain_p (ch254)); + ASSERT_FALSE (range0_128->contain_p (ch255)); + + const bounded_ranges *inv0_128 + = mgr.get_or_create_inverse (range0_128, unsigned_char_type_node); + ASSERT_DUMP_BOUNDED_RANGES_EQ (inv0_128, "{[129, 255]}"); + + const bounded_ranges *range128_129 = mgr.get_or_create_range (ch128, ch129); + ASSERT_DUMP_BOUNDED_RANGES_EQ (range128_129, "{[128, 129]}"); + + const bounded_ranges *inv128_129 + = mgr.get_or_create_inverse (range128_129, unsigned_char_type_node); + ASSERT_DUMP_BOUNDED_RANGES_EQ (inv128_129, "{[0, 127], [130, 255]}"); + + /* Intersection. */ + { + /* Intersection of disjoint ranges should be empty set. */ + const bounded_ranges *intersect0_1 + = mgr.get_or_create_intersection (point0, point1); + ASSERT_DUMP_BOUNDED_RANGES_EQ (intersect0_1, "{}"); + } + + /* Various tests of "union of ranges". */ + { + { + /* Touching points should be merged into a range. */ + auto_vec v; + v.safe_push (point0); + v.safe_push (point1); + const bounded_ranges *union_0_and_1 = mgr.get_or_create_union (v); + ASSERT_DUMP_BOUNDED_RANGES_EQ (union_0_and_1, "{[0, 1]}"); + } + + { + /* Overlapping and out-of-order. */ + auto_vec v; + v.safe_push (inv0_128); // {[129, 255]} + v.safe_push (range128_129); + const bounded_ranges *union_129_255_and_128_129 + = mgr.get_or_create_union (v); + ASSERT_DUMP_BOUNDED_RANGES_EQ (union_129_255_and_128_129, "{[128, 255]}"); + } + + { + /* Union of R and inverse(R) should be full range of type. */ + auto_vec v; + v.safe_push (range128_129); + v.safe_push (inv128_129); + const bounded_ranges *union_ = mgr.get_or_create_union (v); + ASSERT_DUMP_BOUNDED_RANGES_EQ (union_, "{[0, 255]}"); + } + + /* Union with an endpoint. */ + { + const bounded_ranges *range2_to_255 + = mgr.get_or_create_range (ch2, ch255); + ASSERT_DUMP_BOUNDED_RANGES_EQ (range2_to_255, "{[2, 255]}"); + auto_vec v; + v.safe_push (point0); + v.safe_push (point2); + v.safe_push (range2_to_255); + const bounded_ranges *union_ = mgr.get_or_create_union (v); + ASSERT_DUMP_BOUNDED_RANGES_EQ (union_, "{0, [2, 255]}"); + } + + /* Construct from vector of bounded_range. */ + { + auto_vec v; + v.safe_push (bounded_range (ch2, ch2)); + v.safe_push (bounded_range (ch0, ch0)); + v.safe_push (bounded_range (ch2, ch255)); + bounded_ranges br (v); + ASSERT_DUMP_BOUNDED_RANGES_EQ (&br, "{0, [2, 255]}"); + } + } + + /* Various tests of "inverse". */ + { + { + const bounded_ranges *range_1_to_3 = mgr.get_or_create_range (ch1, ch3); + ASSERT_DUMP_BOUNDED_RANGES_EQ (range_1_to_3, "{[1, 3]}"); + const bounded_ranges *inv + = mgr.get_or_create_inverse (range_1_to_3, unsigned_char_type_node); + ASSERT_DUMP_BOUNDED_RANGES_EQ (inv, "{0, [4, 255]}"); + } + { + const bounded_ranges *range_1_to_255 + = mgr.get_or_create_range (ch1, ch255); + ASSERT_DUMP_BOUNDED_RANGES_EQ (range_1_to_255, "{[1, 255]}"); + const bounded_ranges *inv + = mgr.get_or_create_inverse (range_1_to_255, unsigned_char_type_node); + ASSERT_DUMP_BOUNDED_RANGES_EQ (inv, "{0}"); + } + { + const bounded_ranges *range_0_to_254 + = mgr.get_or_create_range (ch0, ch254); + ASSERT_DUMP_BOUNDED_RANGES_EQ (range_0_to_254, "{[0, 254]}"); + const bounded_ranges *inv + = mgr.get_or_create_inverse (range_0_to_254, unsigned_char_type_node); + ASSERT_DUMP_BOUNDED_RANGES_EQ (inv, "{255}"); + } + } + + /* "case 'a'-'z': case 'A-Z':" vs "default:", for ASCII. */ + { + tree ch65 = build_int_cst (unsigned_char_type_node, 65); + tree ch90 = build_int_cst (unsigned_char_type_node, 90); + + tree ch97 = build_int_cst (unsigned_char_type_node, 97); + tree ch122 = build_int_cst (unsigned_char_type_node, 122); + + const bounded_ranges *A_to_Z = mgr.get_or_create_range (ch65, ch90); + ASSERT_DUMP_BOUNDED_RANGES_EQ (A_to_Z, "{[65, 90]}"); + const bounded_ranges *a_to_z = mgr.get_or_create_range (ch97, ch122); + ASSERT_DUMP_BOUNDED_RANGES_EQ (a_to_z, "{[97, 122]}"); + auto_vec v; + v.safe_push (A_to_Z); + v.safe_push (a_to_z); + const bounded_ranges *label_ranges = mgr.get_or_create_union (v); + ASSERT_DUMP_BOUNDED_RANGES_EQ (label_ranges, "{[65, 90], [97, 122]}"); + const bounded_ranges *default_ranges + = mgr.get_or_create_inverse (label_ranges, unsigned_char_type_node); + ASSERT_DUMP_BOUNDED_RANGES_EQ (default_ranges, + "{[0, 64], [91, 96], [123, 255]}"); + } + + /* Verify ranges from ops. */ + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (EQ_EXPR, ch128), + "{128}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (NE_EXPR, ch128), + "{[0, 127], [129, 255]}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (LT_EXPR, ch128), + "{[0, 127]}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (LE_EXPR, ch128), + "{[0, 128]}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (GE_EXPR, ch128), + "{[128, 255]}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (GT_EXPR, ch128), + "{[129, 255]}"); + /* Ops at endpoints of type ranges. */ + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (LE_EXPR, ch0), + "{0}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (LT_EXPR, ch0), + "{}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (NE_EXPR, ch0), + "{[1, 255]}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (GE_EXPR, ch255), + "{255}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (GT_EXPR, ch255), + "{}"); + ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (NE_EXPR, ch255), + "{[0, 254]}"); + + /* Verify that instances are consolidated by mgr. */ + ASSERT_EQ (mgr.get_or_create_point (ch0), + mgr.get_or_create_point (ch0)); + ASSERT_NE (mgr.get_or_create_point (ch0), + mgr.get_or_create_point (ch1)); +} + /* Run the selftests in this file, temporarily overriding flag_analyzer_transitivity with TRANSITIVITY. */ @@ -2715,6 +4032,8 @@ run_constraint_manager_tests (bool transitivity) test_constraint_impl (); test_equality (); test_many_constants (); + test_bounded_range (); + test_bounded_ranges (); flag_analyzer_transitivity = saved_flag_analyzer_transitivity; } diff --git a/gcc/analyzer/constraint-manager.h b/gcc/analyzer/constraint-manager.h index 2bb3215e630..0a430eae91f 100644 --- a/gcc/analyzer/constraint-manager.h +++ b/gcc/analyzer/constraint-manager.h @@ -64,6 +64,164 @@ struct range bound m_upper_bound; }; +/* A closed range of values with constant integer bounds + e.g. [3, 5] for the set {3, 4, 5}. */ + +struct bounded_range +{ + bounded_range (const_tree lower, const_tree upper); + + void dump_to_pp (pretty_printer *pp, bool show_types) const; + void dump (bool show_types) const; + + json::object *to_json () const; + + bool contains_p (tree cst) const; + + bool intersects_p (const bounded_range &other, + bounded_range *out) const; + + bool operator== (const bounded_range &other) const; + bool operator!= (const bounded_range &other) const + { + return !(*this == other); + } + + static int cmp (const bounded_range &a, const bounded_range &b); + + tree m_lower; + tree m_upper; + +private: + static void set_json_attr (json::object *obj, const char *name, tree value); +}; + +/* A collection of bounded_range instances, suitable + for representing the ranges on a case label within a switch + statement. */ + +struct bounded_ranges +{ +public: + typedef bounded_ranges key_t; + + bounded_ranges (const bounded_range &range); + bounded_ranges (const vec &ranges); + bounded_ranges (enum tree_code op, tree rhs_const); + + bool operator== (const bounded_ranges &other) const; + + hashval_t get_hash () const { return m_hash; } + + void dump_to_pp (pretty_printer *pp, bool show_types) const; + void dump (bool show_types) const; + + json::value *to_json () const; + + tristate eval_condition (enum tree_code op, + tree rhs_const, + bounded_ranges_manager *mgr) const; + + bool contain_p (tree cst) const; + bool empty_p () const { return m_ranges.length () == 0; } + + static int cmp (const bounded_ranges *a, const bounded_ranges *b); + +private: + void canonicalize (); + void validate () const; + + friend class bounded_ranges_manager; + + auto_vec m_ranges; + hashval_t m_hash; +}; + +} // namespace ana + +template <> struct default_hash_traits +: public member_function_hash_traits +{ + static const bool empty_zero_p = true; +}; + +namespace ana { + +/* An object to own and consolidate bounded_ranges instances. + This also caches the mapping from switch_cfg_superedge + bounded_ranges instances, so that get_or_create_ranges_for_switch is + memoized. */ + +class bounded_ranges_manager +{ +public: + ~bounded_ranges_manager (); + + const bounded_ranges * + get_or_create_ranges_for_switch (const switch_cfg_superedge *edge, + const gswitch *switch_stmt); + + const bounded_ranges *get_or_create_empty (); + const bounded_ranges *get_or_create_point (const_tree value); + const bounded_ranges *get_or_create_range (const_tree lower_bound, + const_tree upper_bound); + const bounded_ranges * + get_or_create_union (const vec &others); + const bounded_ranges * + get_or_create_intersection (const bounded_ranges *a, + const bounded_ranges *b); + const bounded_ranges * + get_or_create_inverse (const bounded_ranges *other, tree type); + + void log_stats (logger *logger, bool show_objs) const; + +private: + const bounded_ranges * + create_ranges_for_switch (const switch_cfg_superedge &edge, + const gswitch *switch_stmt); + + const bounded_ranges * + make_case_label_ranges (const gswitch *switch_stmt, + tree case_label); + + const bounded_ranges *consolidate (bounded_ranges *); + + struct hash_traits_t : public typed_noop_remove + { + typedef bounded_ranges *key_type; + typedef bounded_ranges *value_type; + + static inline bool + equal (const key_type &k1, const key_type &k2) + { + return *k1 == *k2; + } + static inline hashval_t + hash (const key_type &k) + { + return k->get_hash (); + } + static inline bool is_empty (key_type k) { return k == NULL; } + static inline void mark_empty (key_type &k) { k = NULL; } + static inline bool is_deleted (key_type k) + { + return k == reinterpret_cast (1); + } + + static const bool empty_zero_p = true; + }; + struct traits_t : public simple_hashmap_traits + { + }; + typedef hash_map map_t; + map_t m_map; + + typedef hash_map edge_cache_t; + edge_cache_t m_edge_cache; +}; + /* An equivalence class within a constraint manager: a set of svalues that are known to all be equal to each other, together with an optional tree constant that they are equal to. */ @@ -190,6 +348,33 @@ class fact_visitor virtual void on_fact (const svalue *lhs, enum tree_code, const svalue *rhs) = 0; + virtual void on_ranges (const svalue *lhs, + const bounded_ranges *ranges) = 0; +}; + +class bounded_ranges_constraint +{ +public: + bounded_ranges_constraint (equiv_class_id ec_id, + const bounded_ranges *ranges) + : m_ec_id (ec_id), m_ranges (ranges) + { + } + + void print (pretty_printer *pp, const constraint_manager &cm) const; + + json::object *to_json () const; + + bool operator== (const bounded_ranges_constraint &other) const; + bool operator!= (const bounded_ranges_constraint &other) const + { + return !(*this == other); + } + + void add_to_hash (inchash::hash *hstate) const; + + equiv_class_id m_ec_id; + const bounded_ranges *m_ranges; }; /* A collection of equivalence classes and constraints on them. @@ -248,6 +433,9 @@ public: enum tree_code op, equiv_class_id rhs_ec_id); + bool add_bounded_ranges (const svalue *sval, + const bounded_ranges *ranges); + bool get_equiv_class_by_svalue (const svalue *sval, equiv_class_id *out) const; equiv_class_id get_or_add_equiv_class (const svalue *sval); @@ -281,8 +469,11 @@ public: void validate () const; + bounded_ranges_manager *get_range_manager () const; + auto_delete_vec m_equiv_classes; auto_vec m_constraints; + auto_vec m_bounded_ranges_constraints; private: void add_constraint_internal (equiv_class_id lhs_id, diff --git a/gcc/analyzer/diagnostic-manager.cc b/gcc/analyzer/diagnostic-manager.cc index 77dda4d2768..7ffe0004356 100644 --- a/gcc/analyzer/diagnostic-manager.cc +++ b/gcc/analyzer/diagnostic-manager.cc @@ -520,8 +520,7 @@ epath_finder::process_worklist_item (feasible_worklist *worklist, gcc_assert (rc); fg->add_feasibility_problem (fnode, succ_eedge, - *rc); - delete rc; + rc); /* Give up if there have been too many infeasible edges. */ if (fg->get_num_infeasible () diff --git a/gcc/analyzer/engine.cc b/gcc/analyzer/engine.cc index e66ca4e42fd..4ee92794941 100644 --- a/gcc/analyzer/engine.cc +++ b/gcc/analyzer/engine.cc @@ -3842,7 +3842,7 @@ feasibility_problem::dump_to_pp (pretty_printer *pp) const pp_string (pp, "; rejected constraint: "); m_rc->dump_to_pp (pp); pp_string (pp, "; rmodel: "); - m_rc->m_model.dump_to_pp (pp, true, false); + m_rc->get_model ().dump_to_pp (pp, true, false); } } diff --git a/gcc/analyzer/feasible-graph.cc b/gcc/analyzer/feasible-graph.cc index 675bda9e7e5..3b8589630ed 100644 --- a/gcc/analyzer/feasible-graph.cc +++ b/gcc/analyzer/feasible-graph.cc @@ -129,7 +129,7 @@ infeasible_node::dump_dot (graphviz_out *gv, pp_string (pp, "rejected constraint:"); pp_newline (pp); - m_rc.dump_to_pp (pp); + m_rc->dump_to_pp (pp); pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true); @@ -178,12 +178,13 @@ feasible_graph::add_node (const exploded_node *enode, } /* Add an infeasible_node to this graph and an infeasible_edge connecting - to it from SRC_FNODE, capturing a failure of RC along EEDGE. */ + to it from SRC_FNODE, capturing a failure of RC along EEDGE. + Takes ownership of RC. */ void feasible_graph::add_feasibility_problem (feasible_node *src_fnode, const exploded_edge *eedge, - const rejected_constraint &rc) + rejected_constraint *rc) { infeasible_node *dst_fnode = new infeasible_node (eedge->m_dest, m_nodes.length (), rc); diff --git a/gcc/analyzer/feasible-graph.h b/gcc/analyzer/feasible-graph.h index 5a580f4b925..07696faecee 100644 --- a/gcc/analyzer/feasible-graph.h +++ b/gcc/analyzer/feasible-graph.h @@ -115,17 +115,18 @@ class infeasible_node : public base_feasible_node { public: infeasible_node (const exploded_node *inner_node, unsigned index, - const rejected_constraint &rc) + rejected_constraint *rc) : base_feasible_node (inner_node, index), m_rc (rc) { } + ~infeasible_node () { delete m_rc; } void dump_dot (graphviz_out *gv, const dump_args_t &args) const FINAL OVERRIDE; private: - rejected_constraint m_rc; + rejected_constraint *m_rc; }; /* Base class of edge within a feasible_graph. */ @@ -192,7 +193,7 @@ class feasible_graph : public digraph void add_feasibility_problem (feasible_node *src_fnode, const exploded_edge *eedge, - const rejected_constraint &rc); + rejected_constraint *rc); exploded_path *make_epath (feasible_node *fnode) const; diff --git a/gcc/analyzer/region-model-manager.cc b/gcc/analyzer/region-model-manager.cc index 9e4644f2e54..1cdec1bd230 100644 --- a/gcc/analyzer/region-model-manager.cc +++ b/gcc/analyzer/region-model-manager.cc @@ -56,6 +56,7 @@ along with GCC; see the file COPYING3. If not see #include "analyzer/program-point.h" #include "analyzer/store.h" #include "analyzer/region-model.h" +#include "analyzer/constraint-manager.h" #if ENABLE_ANALYZER @@ -77,7 +78,8 @@ region_model_manager::region_model_manager () m_fndecls_map (), m_labels_map (), m_globals_region (alloc_region_id (), &m_root_region), m_globals_map (), - m_store_mgr (this) + m_store_mgr (this), + m_range_mgr (new bounded_ranges_manager ()) { } @@ -142,6 +144,8 @@ region_model_manager::~region_model_manager () for (string_map_t::iterator iter = m_string_map.begin (); iter != m_string_map.end (); ++iter) delete (*iter).second; + + delete m_range_mgr; } /* Return true if C exceeds the complexity limit for svalues. */ @@ -1574,6 +1578,7 @@ region_model_manager::log_stats (logger *logger, bool show_objs) const logger->log (" # managed dynamic regions: %i", m_managed_dynamic_regions.length ()); m_store_mgr.log_stats (logger, show_objs); + m_range_mgr->log_stats (logger, show_objs); } /* Dump the number of objects of each class that were managed by this diff --git a/gcc/analyzer/region-model.cc b/gcc/analyzer/region-model.cc index f54be14e639..787f2ed33c0 100644 --- a/gcc/analyzer/region-model.cc +++ b/gcc/analyzer/region-model.cc @@ -2773,7 +2773,7 @@ region_model::add_constraint (tree lhs, enum tree_code op, tree rhs, { bool sat = add_constraint (lhs, op, rhs, ctxt); if (!sat && out) - *out = new rejected_constraint (*this, lhs, op, rhs); + *out = new rejected_op_constraint (*this, lhs, op, rhs); return sat; } @@ -3329,56 +3329,15 @@ region_model::apply_constraints_for_gswitch (const switch_cfg_superedge &edge, region_model_context *ctxt, rejected_constraint **out) { + bounded_ranges_manager *ranges_mgr = get_range_manager (); + const bounded_ranges *all_cases_ranges + = ranges_mgr->get_or_create_ranges_for_switch (&edge, switch_stmt); tree index = gimple_switch_index (switch_stmt); - tree case_label = edge.get_case_label (); - gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR); - tree lower_bound = CASE_LOW (case_label); - tree upper_bound = CASE_HIGH (case_label); - if (lower_bound) - { - if (upper_bound) - { - /* Range. */ - if (!add_constraint (index, GE_EXPR, lower_bound, ctxt, out)) - return false; - return add_constraint (index, LE_EXPR, upper_bound, ctxt, out); - } - else - /* Single-value. */ - return add_constraint (index, EQ_EXPR, lower_bound, ctxt, out); - } - else - { - /* The default case. - Add exclusions based on the other cases. */ - for (unsigned other_idx = 1; - other_idx < gimple_switch_num_labels (switch_stmt); - other_idx++) - { - tree other_label = gimple_switch_label (switch_stmt, - other_idx); - tree other_lower_bound = CASE_LOW (other_label); - tree other_upper_bound = CASE_HIGH (other_label); - gcc_assert (other_lower_bound); - if (other_upper_bound) - { - /* Exclude this range-valued case. - For now, we just exclude the boundary values. - TODO: exclude the values within the region. */ - if (!add_constraint (index, NE_EXPR, other_lower_bound, - ctxt, out)) - return false; - if (!add_constraint (index, NE_EXPR, other_upper_bound, - ctxt, out)) - return false; - } - else - /* Exclude this single-valued case. */ - if (!add_constraint (index, NE_EXPR, other_lower_bound, ctxt, out)) - return false; - } - return true; - } + const svalue *index_sval = get_rvalue (index, ctxt); + bool sat = m_constraints->add_bounded_ranges (index_sval, all_cases_ranges); + if (!sat && out) + *out = new rejected_ranges_constraint (*this, index, all_cases_ranges); + return sat; } /* Apply any constraints due to an exception being thrown at LAST_STMT. @@ -3860,10 +3819,10 @@ debug (const region_model &rmodel) rmodel.dump (false); } -/* struct rejected_constraint. */ +/* class rejected_op_constraint : public rejected_constraint. */ void -rejected_constraint::dump_to_pp (pretty_printer *pp) const +rejected_op_constraint::dump_to_pp (pretty_printer *pp) const { region_model m (m_model); const svalue *lhs_sval = m.get_rvalue (m_lhs, NULL); @@ -3873,6 +3832,18 @@ rejected_constraint::dump_to_pp (pretty_printer *pp) const rhs_sval->dump_to_pp (pp, true); } +/* class rejected_ranges_constraint : public rejected_constraint. */ + +void +rejected_ranges_constraint::dump_to_pp (pretty_printer *pp) const +{ + region_model m (m_model); + const svalue *sval = m.get_rvalue (m_expr, NULL); + sval->dump_to_pp (pp, true); + pp_string (pp, " in "); + m_ranges->dump_to_pp (pp, true); +} + /* class engine. */ /* Dump the managed objects by class to LOGGER, and the per-class totals. */ diff --git a/gcc/analyzer/region-model.h b/gcc/analyzer/region-model.h index a734f9f7315..f2c82b0dd80 100644 --- a/gcc/analyzer/region-model.h +++ b/gcc/analyzer/region-model.h @@ -189,6 +189,7 @@ struct purge_stats m_num_regions (0), m_num_equiv_classes (0), m_num_constraints (0), + m_num_bounded_ranges_constraints (0), m_num_client_items (0) {} @@ -196,6 +197,7 @@ struct purge_stats int m_num_regions; int m_num_equiv_classes; int m_num_constraints; + int m_num_bounded_ranges_constraints; int m_num_client_items; }; @@ -320,6 +322,7 @@ public: unsigned alloc_region_id () { return m_next_region_id++; } store_manager *get_store_manager () { return &m_store_mgr; } + bounded_ranges_manager *get_range_manager () const { return m_range_mgr; } /* Dynamically-allocated region instances. The number of these within the analysis can grow arbitrarily. @@ -456,6 +459,8 @@ private: store_manager m_store_mgr; + bounded_ranges_manager *m_range_mgr; + /* "Dynamically-allocated" region instances. The number of these within the analysis can grow arbitrarily. They are still owned by the manager. */ @@ -698,6 +703,10 @@ class region_model void unset_dynamic_extents (const region *reg); region_model_manager *get_manager () const { return m_mgr; } + bounded_ranges_manager *get_range_manager () const + { + return m_mgr->get_range_manager (); + } void unbind_region_and_descendents (const region *reg, enum poison_kind pkind); @@ -945,21 +954,54 @@ struct model_merger /* A record that can (optionally) be written out when region_model::add_constraint fails. */ -struct rejected_constraint +class rejected_constraint { - rejected_constraint (const region_model &model, - tree lhs, enum tree_code op, tree rhs) - : m_model (model), m_lhs (lhs), m_op (op), m_rhs (rhs) +public: + virtual ~rejected_constraint () {} + virtual void dump_to_pp (pretty_printer *pp) const = 0; + + const region_model &get_model () const { return m_model; } + +protected: + rejected_constraint (const region_model &model) + : m_model (model) {} - void dump_to_pp (pretty_printer *pp) const; - region_model m_model; +}; + +class rejected_op_constraint : public rejected_constraint +{ +public: + rejected_op_constraint (const region_model &model, + tree lhs, enum tree_code op, tree rhs) + : rejected_constraint (model), + m_lhs (lhs), m_op (op), m_rhs (rhs) + {} + + void dump_to_pp (pretty_printer *pp) const FINAL OVERRIDE; + tree m_lhs; enum tree_code m_op; tree m_rhs; }; +class rejected_ranges_constraint : public rejected_constraint +{ +public: + rejected_ranges_constraint (const region_model &model, + tree expr, const bounded_ranges *ranges) + : rejected_constraint (model), + m_expr (expr), m_ranges (ranges) + {} + + void dump_to_pp (pretty_printer *pp) const FINAL OVERRIDE; + +private: + tree m_expr; + const bounded_ranges *m_ranges; +}; + /* A bundle of state. */ class engine diff --git a/gcc/analyzer/supergraph.cc b/gcc/analyzer/supergraph.cc index 66ef765f472..85acf44d045 100644 --- a/gcc/analyzer/supergraph.cc +++ b/gcc/analyzer/supergraph.cc @@ -50,6 +50,7 @@ along with GCC; see the file COPYING3. If not see #include "cgraph.h" #include "cfg.h" #include "digraph.h" +#include "tree-cfg.h" #include "analyzer/supergraph.h" #include "analyzer/analyzer-logging.h" @@ -246,7 +247,7 @@ supergraph::supergraph (logger *logger) supernode *dest_supernode = *m_bb_to_initial_node.get (dest_cfg_block); cfg_superedge *cfg_sedge - = add_cfg_edge (src_supernode, dest_supernode, cfg_edge, idx); + = add_cfg_edge (src_supernode, dest_supernode, cfg_edge); m_cfg_edge_to_cfg_superedge.put (cfg_edge, cfg_sedge); } } @@ -505,17 +506,16 @@ supergraph::add_node (function *fun, basic_block bb, gcall *returning_call, adding it to this supergraph. If the edge is for a switch statement, create a switch_cfg_superedge - subclass using IDX (the index of E within the out-edges from SRC's - underlying basic block). */ + subclass. */ cfg_superedge * -supergraph::add_cfg_edge (supernode *src, supernode *dest, ::edge e, int idx) +supergraph::add_cfg_edge (supernode *src, supernode *dest, ::edge e) { /* Special-case switch edges. */ gimple *stmt = src->get_last_stmt (); cfg_superedge *new_edge; if (stmt && stmt->code == GIMPLE_SWITCH) - new_edge = new switch_cfg_superedge (src, dest, e, idx); + new_edge = new switch_cfg_superedge (src, dest, e); else new_edge = new cfg_superedge (src, dest, e); add_edge (new_edge); @@ -1072,6 +1072,23 @@ cfg_superedge::get_phi_arg (const gphi *phi) const return gimple_phi_arg_def (phi, index); } +switch_cfg_superedge::switch_cfg_superedge (supernode *src, + supernode *dst, + ::edge e) +: cfg_superedge (src, dst, e) +{ + /* Populate m_case_labels with all cases which go to DST. */ + const gswitch *gswitch = get_switch_stmt (); + for (unsigned i = 0; i < gimple_switch_num_labels (gswitch); i++) + { + tree case_ = gimple_switch_label (gswitch, i); + basic_block bb = label_to_block (src->get_function (), + CASE_LABEL (case_)); + if (bb == dst->m_bb) + m_case_labels.safe_push (case_); + } +} + /* Implementation of superedge::dump_label_to_pp for CFG superedges for "switch" statements. @@ -1081,31 +1098,63 @@ void switch_cfg_superedge::dump_label_to_pp (pretty_printer *pp, bool user_facing ATTRIBUTE_UNUSED) const { - tree case_label = get_case_label (); - gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR); - tree lower_bound = CASE_LOW (case_label); - tree upper_bound = CASE_HIGH (case_label); - if (lower_bound) + if (user_facing) { - pp_printf (pp, "case "); - dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0, false); - if (upper_bound) + for (unsigned i = 0; i < m_case_labels.length (); ++i) { - pp_printf (pp, " ... "); - dump_generic_node (pp, upper_bound, 0, (dump_flags_t)0, false); + if (i > 0) + pp_string (pp, ", "); + tree case_label = m_case_labels[i]; + gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR); + tree lower_bound = CASE_LOW (case_label); + tree upper_bound = CASE_HIGH (case_label); + if (lower_bound) + { + pp_printf (pp, "case "); + dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0, false); + if (upper_bound) + { + pp_printf (pp, " ... "); + dump_generic_node (pp, upper_bound, 0, (dump_flags_t)0, + false); + } + pp_printf (pp, ":"); + } + else + pp_printf (pp, "default:"); } - pp_printf (pp, ":"); } else - pp_printf (pp, "default:"); -} - -/* Get the case label for this "switch" superedge. */ - -tree -switch_cfg_superedge::get_case_label () const -{ - return gimple_switch_label (get_switch_stmt (), m_idx); + { + pp_character (pp, '{'); + for (unsigned i = 0; i < m_case_labels.length (); ++i) + { + if (i > 0) + pp_string (pp, ", "); + tree case_label = m_case_labels[i]; + gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR); + tree lower_bound = CASE_LOW (case_label); + tree upper_bound = CASE_HIGH (case_label); + if (lower_bound) + { + if (upper_bound) + { + pp_character (pp, '['); + dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0, + false); + pp_string (pp, ", "); + dump_generic_node (pp, upper_bound, 0, (dump_flags_t)0, + false); + pp_character (pp, ']'); + } + else + dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0, false); + } + else + pp_printf (pp, "default"); + } + pp_character (pp, '}'); + } } /* Implementation of superedge::dump_label_to_pp for interprocedural diff --git a/gcc/analyzer/supergraph.h b/gcc/analyzer/supergraph.h index 335f5133407..09a12be483d 100644 --- a/gcc/analyzer/supergraph.h +++ b/gcc/analyzer/supergraph.h @@ -181,7 +181,7 @@ public: private: supernode *add_node (function *fun, basic_block bb, gcall *returning_call, gimple_seq phi_nodes); - cfg_superedge *add_cfg_edge (supernode *src, supernode *dest, ::edge e, int idx); + cfg_superedge *add_cfg_edge (supernode *src, supernode *dest, ::edge e); call_superedge *add_call_superedge (supernode *src, supernode *dest, cgraph_edge *cedge); return_superedge *add_return_superedge (supernode *src, supernode *dest, @@ -539,15 +539,12 @@ is_a_helper ::test (const superedge *sedge) namespace ana { /* A subclass for edges from switch statements, retaining enough - information to identify the pertinent case, and for adding labels + information to identify the pertinent cases, and for adding labels when rendering via graphviz. */ class switch_cfg_superedge : public cfg_superedge { public: - switch_cfg_superedge (supernode *src, supernode *dst, ::edge e, int idx) - : cfg_superedge (src, dst, e), - m_idx (idx) - {} + switch_cfg_superedge (supernode *src, supernode *dst, ::edge e); const switch_cfg_superedge *dyn_cast_switch_cfg_superedge () const FINAL OVERRIDE @@ -563,10 +560,10 @@ class switch_cfg_superedge : public cfg_superedge { return as_a (m_src->get_last_stmt ()); } - tree get_case_label () const; + const vec &get_case_labels () const { return m_case_labels; } - private: - const int m_idx; +private: + auto_vec m_case_labels; }; } // namespace ana diff --git a/gcc/testsuite/gcc.dg/analyzer/switch.c b/gcc/testsuite/gcc.dg/analyzer/switch.c index 870b00f8c03..0b9e7e3b869 100644 --- a/gcc/testsuite/gcc.dg/analyzer/switch.c +++ b/gcc/testsuite/gcc.dg/analyzer/switch.c @@ -8,23 +8,156 @@ void test (int i) { case 0: __analyzer_eval (i == 0); /* { dg-warning "TRUE" } */ + __analyzer_eval (i != -1); /* { dg-warning "TRUE" } */ + __analyzer_eval (i != 0); /* { dg-warning "FALSE" } */ + __analyzer_eval (i != 1); /* { dg-warning "TRUE" } */ break; case 3 ... 5: + __analyzer_eval (i != 0); /* { dg-warning "TRUE" } */ + __analyzer_eval (i > 1); /* { dg-warning "TRUE" } */ + __analyzer_eval (i > 2); /* { dg-warning "TRUE" } */ + __analyzer_eval (i >= 2); /* { dg-warning "TRUE" } */ __analyzer_eval (i >= 3); /* { dg-warning "TRUE" } */ __analyzer_eval (i <= 5); /* { dg-warning "TRUE" } */ + __analyzer_eval (i < 6); /* { dg-warning "TRUE" } */ + __analyzer_eval (i <= 6); /* { dg-warning "TRUE" } */ + __analyzer_eval (i < 7); /* { dg-warning "TRUE" } */ + __analyzer_eval (i != 6); /* { dg-warning "TRUE" } */ + __analyzer_eval (i != 3); /* { dg-warning "UNKNOWN" } */ + __analyzer_eval (i != 4); /* { dg-warning "UNKNOWN" } */ + __analyzer_eval (i != 5); /* { dg-warning "UNKNOWN" } */ + __analyzer_eval (i >= 4); /* { dg-warning "UNKNOWN" } */ + __analyzer_eval (i >= 5); /* { dg-warning "UNKNOWN" } */ + __analyzer_eval (i <= 3); /* { dg-warning "UNKNOWN" } */ + __analyzer_eval (i <= 4); /* { dg-warning "UNKNOWN" } */ break; default: + __analyzer_eval (i == -1); /* { dg-warning "UNKNOWN" } */ __analyzer_eval (i == 0); /* { dg-warning "FALSE" } */ __analyzer_eval (i == 2); /* { dg-warning "UNKNOWN" } */ __analyzer_eval (i == 3); /* { dg-warning "FALSE" } */ - __analyzer_eval (i == 4); /* { dg-warning "FALSE" "desired" { xfail *-*-* } } */ - /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */ - /* TODO(xfail^^^): we're only checking against endpoints of case - ranges, not the insides. */ + __analyzer_eval (i == 4); /* { dg-warning "FALSE" } */ __analyzer_eval (i == 5); /* { dg-warning "FALSE" } */ __analyzer_eval (i == 6); /* { dg-warning "UNKNOWN" } */ + __analyzer_eval (i != 0); /* { dg-warning "TRUE" } */ + __analyzer_eval (i != 1); /* { dg-warning "UNKNOWN" } */ + __analyzer_eval (i != 3); /* { dg-warning "TRUE" } */ + __analyzer_eval (i != 4); /* { dg-warning "TRUE" } */ + __analyzer_eval (i != 5); /* { dg-warning "TRUE" } */ + __analyzer_eval (i != 6); /* { dg-warning "UNKNOWN" } */ break; } } + +/* Verify that the analyzer follows the correct paths on a + switch statement guarded by an if, using noinline to defeat + optimizations. */ + +static void __attribute__((noinline)) +__analyzer_called_by_test_2 (int y) +{ + switch (y) + { + case 0: + __analyzer_dump_path (); /* { dg-bogus "path" } */ + break; + case 1: + __analyzer_dump_path (); /* { dg-message "path" } */ + break; + case 2: + __analyzer_dump_path (); /* { dg-bogus "path" } */ + break; + default: + __analyzer_dump_path (); /* { dg-bogus "path" } */ + break; + } +} + +void test_2 (int x) +{ + if (x == 1) + __analyzer_called_by_test_2 (x); +} + +void test_3 (int x, int y) +{ + if (y == 3) + switch (x) + { + case 0 ... 9: + case 20 ... 29: + if (x == y) + __analyzer_dump_path (); /* { dg-message "path" } */ + else + __analyzer_dump_path (); /* { dg-message "path" } */ + } +} + +struct s4 +{ + unsigned char level:3; + unsigned char key_id_mode:2; + unsigned char reserved:3; +}; + +void test_4 (struct s4 *p) +{ + switch (p->key_id_mode) + { + case 0: + __analyzer_dump_path (); /* { dg-message "path" } */ + break; + case 1: + __analyzer_dump_path (); /* { dg-message "path" } */ + break; + case 2: + __analyzer_dump_path (); /* { dg-message "path" } */ + break; + case 3: + __analyzer_dump_path (); /* { dg-message "path" } */ + break; + } + __analyzer_dump_path (); /* { dg-message "path" } */ +} + +int test_5 (unsigned v) +{ + switch (v) + { + case 0: + return 7; + break; + case 1: + return 23; + break; + default: + return v * 2; + } +} + +int test_6 (unsigned v) +{ + switch (v) + { + case 0: + return 3; + case -1: + return 22; + } + return -3; +} + +int g7 = -1; +int test_7 () +{ + switch (g7++) { + case 0: + return 32; + + case 100: + return 42; + } + return 0; +} diff --git a/gcc/testsuite/gcc.dg/analyzer/torture/switch-2.c b/gcc/testsuite/gcc.dg/analyzer/torture/switch-2.c new file mode 100644 index 00000000000..3da2e301b14 --- /dev/null +++ b/gcc/testsuite/gcc.dg/analyzer/torture/switch-2.c @@ -0,0 +1,42 @@ +struct s +{ + int f0; + int f1; +}; + +int test (int cmd) +{ + int err = 0; + struct s foo; + struct s bar; + + switch (cmd) + { + case 0: + foo.f0 = 0; + break; + case 1: + foo.f0 = 1; + break; + case 30 ... 50: + case 70 ... 80: + __builtin_memset (&bar, 0, sizeof (bar)); + break; + } + + switch (cmd) + { + default: + return -1; + case 0 ... 1: + return foo.f0; + break; + case 42: + return bar.f1; + break; + case 65: + return bar.f1; + break; + } + return err; +} diff --git a/gcc/testsuite/gcc.dg/analyzer/torture/switch-3.c b/gcc/testsuite/gcc.dg/analyzer/torture/switch-3.c new file mode 100644 index 00000000000..57b8acdb292 --- /dev/null +++ b/gcc/testsuite/gcc.dg/analyzer/torture/switch-3.c @@ -0,0 +1,158 @@ +typedef unsigned int __u32; +__extension__ typedef unsigned long long __u64; + +extern unsigned long +copy_from_user(void *to, const void *from, unsigned long n); + +extern unsigned long +copy_to_user(void *to, const void *from, unsigned long n); + +struct mtrr_sentry { + __u64 base; + __u32 size; + __u32 type; +}; + +struct mtrr_gentry { + __u64 base; + __u32 size; + __u32 regnum; + __u32 type; + __u32 _pad; +}; + +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 +#define _IOC_SIZEBITS 14 +#define _IOC_DIRBITS 2 + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +#define _IOC_WRITE 1U +#define _IOC_READ 2U + +#define _IOC(dir,type,nr,size) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + +#define _IOC_TYPECHECK(t) (sizeof(t)) + +#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) +#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) + +#define MTRR_IOCTL_BASE 'M' + +#define EFAULT 14 +#define EINVAL 22 +#define ENOTTY 25 + +#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) +#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) +#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) +#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) +#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) +#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) +#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) +#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) +#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) +#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) + +extern void check_init_u64 (__u64 v); +extern void check_init_u32 (__u32 v); + +/* Adapted/reduced from arch/x86/kernel/cpu/mtrr/if.c: mtrr_ioctl, + which is GPL-2.0 */ + +long mtrr_ioctl(unsigned int cmd, unsigned long __arg) { + int err = 0; + struct mtrr_sentry sentry; + struct mtrr_gentry gentry; + void *arg = (void *)__arg; + + __builtin_memset(&gentry, 0, sizeof(gentry)); + + switch (cmd) { + case MTRRIOC_ADD_ENTRY: + case MTRRIOC_SET_ENTRY: + case MTRRIOC_DEL_ENTRY: + case MTRRIOC_KILL_ENTRY: + case MTRRIOC_ADD_PAGE_ENTRY: + case MTRRIOC_SET_PAGE_ENTRY: + case MTRRIOC_DEL_PAGE_ENTRY: + case MTRRIOC_KILL_PAGE_ENTRY: + if (copy_from_user(&sentry, arg, sizeof(sentry))) + return -EFAULT; + break; + case MTRRIOC_GET_ENTRY: + case MTRRIOC_GET_PAGE_ENTRY: + if (copy_from_user(&gentry, arg, sizeof(gentry))) + return -EFAULT; + break; + } + + switch (cmd) { + default: + return -ENOTTY; + case MTRRIOC_ADD_ENTRY: + check_init_u64 (sentry.base); + check_init_u32 (sentry.size); + check_init_u32 (sentry.type); + break; + case MTRRIOC_SET_ENTRY: + check_init_u64 (sentry.base); + check_init_u32 (sentry.size); + check_init_u32 (sentry.type); + break; + case MTRRIOC_DEL_ENTRY: + check_init_u64 (sentry.base); + check_init_u32 (sentry.size); + check_init_u32 (sentry.type); + break; + case MTRRIOC_KILL_ENTRY: + check_init_u64 (sentry.base); + check_init_u32 (sentry.size); + check_init_u32 (sentry.type); + break; + case MTRRIOC_GET_ENTRY: + check_init_u64 (gentry.base); + check_init_u32 (gentry.size); + check_init_u32 (gentry.regnum); + check_init_u32 (gentry.type); + check_init_u32 (gentry._pad); + break; + case MTRRIOC_ADD_PAGE_ENTRY: + check_init_u64 (sentry.base); + check_init_u32 (sentry.size); + check_init_u32 (sentry.type); + break; + case MTRRIOC_SET_PAGE_ENTRY: + check_init_u64 (sentry.base); + check_init_u32 (sentry.size); + check_init_u32 (sentry.type); + break; + case MTRRIOC_DEL_PAGE_ENTRY: + check_init_u64 (sentry.base); + check_init_u32 (sentry.size); + check_init_u32 (sentry.type); + break; + case MTRRIOC_KILL_PAGE_ENTRY: + check_init_u64 (sentry.base); + check_init_u32 (sentry.size); + check_init_u32 (sentry.type); + break; + case MTRRIOC_GET_PAGE_ENTRY: + check_init_u64 (gentry.base); + check_init_u32 (gentry.size); + check_init_u32 (gentry.regnum); + check_init_u32 (gentry.type); + check_init_u32 (gentry._pad); + break; + } + + return err; +} diff --git a/gcc/testsuite/gcc.dg/analyzer/torture/switch-4.c b/gcc/testsuite/gcc.dg/analyzer/torture/switch-4.c new file mode 100644 index 00000000000..f5cdb5cea01 --- /dev/null +++ b/gcc/testsuite/gcc.dg/analyzer/torture/switch-4.c @@ -0,0 +1,27 @@ +struct snd_ac97 { + // snip + unsigned int id; + // snip +}; + +int snd_ac97_valid_reg(struct snd_ac97 *ac97, unsigned short reg) { + + switch (ac97->id) { + case 0x53544d02: + if (reg == 0x22 || reg == 0x7a) + return 1; + __attribute__((__fallthrough__)); + case 0x414b4d00: + return 0; + } + return 1; +} + +int snd_ac97_update_bits(struct snd_ac97 *ac97, unsigned short reg) { + if (ac97->id == 0x414c4781) + { + if (!snd_ac97_valid_reg(ac97, reg)) + return -22; + } + return 0; +} diff --git a/gcc/testsuite/gcc.dg/analyzer/torture/switch-5.c b/gcc/testsuite/gcc.dg/analyzer/torture/switch-5.c new file mode 100644 index 00000000000..10b2f29c680 --- /dev/null +++ b/gcc/testsuite/gcc.dg/analyzer/torture/switch-5.c @@ -0,0 +1,68 @@ +/* { dg-additional-options "-fno-analyzer-call-summaries" } */ + +typedef unsigned char u8; +typedef signed int s32; +typedef unsigned int u32; + +enum v4l2_mpeg_video_hevc_profile { + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN = 0, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE = 1, + V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 = 2 +}; +enum v4l2_buf_type { + V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, + V4L2_BUF_TYPE_VIDEO_OUTPUT = 2 +}; +struct v4l2_fmtdesc { + u32 index; + u32 type; +}; +struct v4l2_ctrl; +s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl); +struct create_channel_param { + u8 profile; +}; + +u8 +hevc_profile_to_mcu_profile(enum v4l2_mpeg_video_hevc_profile profile) { + switch (profile) { + default: + case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN: + return 1; + case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10: + return 2; + case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE: + return 3; + } +} + +int fill_create_channel_param(struct v4l2_ctrl *ctrl, + struct create_channel_param *param) { + enum v4l2_mpeg_video_hevc_profile profile; + profile = v4l2_ctrl_g_ctrl(ctrl); + param->profile = hevc_profile_to_mcu_profile(profile); + return 0; +} + +int allegro_enum_fmt_vid(struct v4l2_fmtdesc *f) { + switch (f->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + if (f->index >= 1) + return -22; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + if (f->index >= 2) + return -22; + break; + default: + return -22; + } + return 0; +} + +int allegro_ioctl_streamon(struct v4l2_ctrl *ctrl, + struct create_channel_param *param) { + fill_create_channel_param(ctrl, param); + + return 0; +}