analyzer: initial implementation of asm support [PR101570]

gcc/ChangeLog:
	PR analyzer/101570
	* Makefile.in (ANALYZER_OBJS): Add analyzer/region-model-asm.o.

gcc/analyzer/ChangeLog:
	PR analyzer/101570
	* analyzer.cc (maybe_reconstruct_from_def_stmt): Add GIMPLE_ASM
	case.
	* analyzer.h (class asm_output_svalue): New forward decl.
	(class reachable_regions): New forward decl.
	* complexity.cc (complexity::from_vec_svalue): New.
	* complexity.h (complexity::from_vec_svalue): New decl.
	* engine.cc (feasibility_state::maybe_update_for_edge): Handle
	asm stmts by calling on_asm_stmt.
	* region-model-asm.cc: New file.
	* region-model-manager.cc
	(region_model_manager::maybe_fold_asm_output_svalue): New.
	(region_model_manager::get_or_create_asm_output_svalue): New.
	(region_model_manager::log_stats): Log m_asm_output_values_map.
	* region-model.cc (region_model::on_stmt_pre): Handle GIMPLE_ASM.
	* region-model.h (visitor::visit_asm_output_svalue): New.
	(region_model_manager::get_or_create_asm_output_svalue): New decl.
	(region_model_manager::maybe_fold_asm_output_svalue): New decl.
	(region_model_manager::asm_output_values_map_t): New typedef.
	(region_model_manager::m_asm_output_values_map): New field.
	(region_model::on_asm_stmt): New.
	* store.cc (binding_cluster::on_asm): New.
	* store.h (binding_cluster::on_asm): New decl.
	* svalue.cc (svalue::cmp_ptr): Handle SK_ASM_OUTPUT.
	(asm_output_svalue::dump_to_pp): New.
	(asm_output_svalue::dump_input): New.
	(asm_output_svalue::input_idx_to_asm_idx): New.
	(asm_output_svalue::accept): New.
	* svalue.h (enum svalue_kind): Add SK_ASM_OUTPUT.
	(svalue::dyn_cast_asm_output_svalue): New.
	(class asm_output_svalue): New.
	(is_a_helper <const asm_output_svalue *>::test): New.
	(struct default_hash_traits<asm_output_svalue::key_t>): New.

gcc/testsuite/ChangeLog:
	PR analyzer/101570
	* gcc.dg/analyzer/asm-x86-1.c: New test.
	* gcc.dg/analyzer/asm-x86-lp64-1.c: New test.
	* gcc.dg/analyzer/asm-x86-lp64-2.c: New test.
	* gcc.dg/analyzer/pr101570.c: New test.
	* gcc.dg/analyzer/torture/asm-x86-linux-array_index_mask_nospec.c:
	New test.
	* gcc.dg/analyzer/torture/asm-x86-linux-cpuid-paravirt-1.c: New
	test.
	* gcc.dg/analyzer/torture/asm-x86-linux-cpuid-paravirt-2.c: New
	test.
	* gcc.dg/analyzer/torture/asm-x86-linux-cpuid.c: New test.
	* gcc.dg/analyzer/torture/asm-x86-linux-rdmsr-paravirt.c: New
	test.
	* gcc.dg/analyzer/torture/asm-x86-linux-rdmsr.c: New test.
	* gcc.dg/analyzer/torture/asm-x86-linux-wfx_get_ps_timeout-full.c:
	New test.
	* gcc.dg/analyzer/torture/asm-x86-linux-wfx_get_ps_timeout-reduced.c:
	New test.

Signed-off-by: David Malcolm <dmalcolm@redhat.com>
This commit is contained in:
David Malcolm 2021-08-04 18:21:21 -04:00
parent 5738a64f8b
commit ded2c2c068
26 changed files with 1855 additions and 3 deletions

View file

@ -1262,6 +1262,7 @@ ANALYZER_OBJS = \
analyzer/program-state.o \
analyzer/region.o \
analyzer/region-model.o \
analyzer/region-model-asm.o \
analyzer/region-model-impl-calls.o \
analyzer/region-model-manager.o \
analyzer/region-model-reachability.o \

View file

@ -131,6 +131,7 @@ maybe_reconstruct_from_def_stmt (tree ssa_name,
{
default:
gcc_unreachable ();
case GIMPLE_ASM:
case GIMPLE_NOP:
case GIMPLE_PHI:
/* Can't handle these. */

View file

@ -53,6 +53,7 @@ class svalue;
class widening_svalue;
class compound_svalue;
class conjured_svalue;
class asm_output_svalue;
typedef hash_set<const svalue *> svalue_set;
class region;
class frame_region;
@ -77,6 +78,7 @@ class call_details;
struct rejected_constraint;
class constraint_manager;
class equiv_class;
class reachable_regions;
class pending_diagnostic;
class state_change_event;

View file

@ -90,6 +90,22 @@ complexity::from_pair (const complexity &c1, const complexity &c2)
MAX (c1.m_max_depth, c2.m_max_depth) + 1);
}
/* Get complexity for a new node that references the svalues in VEC. */
complexity
complexity::from_vec_svalue (const vec<const svalue *> &vec)
{
unsigned num_nodes = 0;
unsigned max_depth = 0;
for (auto iter_sval : vec)
{
const complexity &iter_c = iter_sval->get_complexity ();
num_nodes += iter_c.m_num_nodes;
max_depth = MAX (max_depth, iter_c.m_max_depth);
}
return complexity (num_nodes + 1, max_depth + 1);
}
} // namespace ana
#endif /* #if ENABLE_ANALYZER */

View file

@ -36,6 +36,7 @@ struct complexity
complexity (const region *reg);
complexity (const svalue *sval);
static complexity from_pair (const complexity &c1, const complexity &c);
static complexity from_vec_svalue (const vec<const svalue *> &vec);
/* The total number of svalues and regions in the tree of this
entity, including the entity itself. */

View file

@ -3718,6 +3718,8 @@ feasibility_state::maybe_update_for_edge (logger *logger,
if (const gassign *assign = dyn_cast <const gassign *> (stmt))
m_model.on_assignment (assign, NULL);
else if (const gasm *asm_stmt = dyn_cast <const gasm *> (stmt))
m_model.on_asm_stmt (asm_stmt, NULL);
else if (const gcall *call = dyn_cast <const gcall *> (stmt))
{
bool terminate_path;

View file

@ -0,0 +1,303 @@
/* Handling inline asm in the analyzer.
Copyright (C) 2021 Free Software Foundation, Inc.
Contributed by David Malcolm <dmalcolm@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tree.h"
#include "function.h"
#include "basic-block.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "diagnostic-core.h"
#include "pretty-print.h"
#include "tristate.h"
#include "selftest.h"
#include "json.h"
#include "analyzer/analyzer.h"
#include "analyzer/analyzer-logging.h"
#include "options.h"
#include "analyzer/call-string.h"
#include "analyzer/program-point.h"
#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/region-model-reachability.h"
#include "stmt.h"
#if ENABLE_ANALYZER
namespace ana {
/* Minimal asm support for the analyzer.
The objective of this code is to:
- minimize false positives from the analyzer on the Linux kernel
(which makes heavy use of inline asm), whilst
- avoiding having to "teach" the compiler anything about specific strings
in asm statements.
Specifically, we want to:
(a) mark asm outputs and certain other regions as having been written to,
to avoid false postives from -Wanalyzer-use-of-uninitialized-value.
(b) identify some of these stmts as "deterministic" so that we can
write consistent outputs given consistent inputs, so that we can
avoid false positives for paths in which an asm is invoked twice
with the same inputs and is expected to emit the same output.
This file implements heuristics for achieving the above. */
/* Determine if ASM_STMT is deterministic, in the sense of (b) above.
Consider this x86 function taken from the Linux kernel
(arch/x86/include/asm/barrier.h):
static inline unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size)
{
unsigned long mask;
asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"g"(size),"r" (index)
:"cc");
return mask;
}
The above is a mitigation for Spectre-variant-1 attacks, for clamping
an array access to within the range of [0, size] if the CPU speculates
past the array bounds.
However, it is ultimately used to implement wdev_to_wvif:
static inline struct wfx_vif *
wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
{
vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif));
if (!wdev->vif[vif_id]) {
return NULL;
}
return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv;
}
which is used by:
if (wdev_to_wvif(wvif->wdev, 1))
return wdev_to_wvif(wvif->wdev, 1)->vif;
The code has been written to assume that wdev_to_wvif is deterministic,
and won't change from returning non-NULL at the "if" clause to
returning NULL at the "->vif" dereference.
By treating the above specific "asm volatile" as deterministic we avoid
a false positive from -Wanalyzer-null-dereference. */
static bool
deterministic_p (const gasm *asm_stmt)
{
/* Assume something volatile with no inputs is querying
changeable state e.g. rdtsc. */
if (gimple_asm_ninputs (asm_stmt) == 0
&& gimple_asm_volatile_p (asm_stmt))
return false;
/* Otherwise assume it's purely a function of its inputs. */
return true;
}
/* Update this model for the asm STMT, using CTXT to report any
diagnostics.
Compare with cfgexpand.c: expand_asm_stmt. */
void
region_model::on_asm_stmt (const gasm *stmt, region_model_context *ctxt)
{
logger *logger = ctxt ? ctxt->get_logger () : NULL;
LOG_SCOPE (logger);
const unsigned noutputs = gimple_asm_noutputs (stmt);
const unsigned ninputs = gimple_asm_ninputs (stmt);
auto_vec<tree> output_tvec;
auto_vec<tree> input_tvec;
auto_vec<const char *> constraints;
/* Copy the gimple vectors into new vectors that we can manipulate. */
output_tvec.safe_grow (noutputs, true);
input_tvec.safe_grow (ninputs, true);
constraints.safe_grow (noutputs + ninputs, true);
for (unsigned i = 0; i < noutputs; ++i)
{
tree t = gimple_asm_output_op (stmt, i);
output_tvec[i] = TREE_VALUE (t);
constraints[i] = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
}
for (unsigned i = 0; i < ninputs; i++)
{
tree t = gimple_asm_input_op (stmt, i);
input_tvec[i] = TREE_VALUE (t);
constraints[i + noutputs]
= TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
}
/* Determine which regions are reachable from the inputs
to this stmt. */
reachable_regions reachable_regs (this);
int num_errors = 0;
auto_vec<const region *> output_regions (noutputs);
for (unsigned i = 0; i < noutputs; ++i)
{
tree val = output_tvec[i];
const char *constraint;
bool is_inout;
bool allows_reg;
bool allows_mem;
const region *dst_reg = get_lvalue (val, ctxt);
output_regions.quick_push (dst_reg);
reachable_regs.add (dst_reg, true);
/* Try to parse the output constraint. If that fails, there's
no point in going further. */
constraint = constraints[i];
if (!parse_output_constraint (&constraint, i, ninputs, noutputs,
&allows_mem, &allows_reg, &is_inout))
{
if (logger)
logger->log ("error parsing constraint for output %i: %qs",
i, constraint);
num_errors++;
continue;
}
if (logger)
{
logger->log ("output %i: %qs %qE"
" is_inout: %i allows_reg: %i allows_mem: %i",
i, constraint, val,
(int)is_inout, (int)allows_reg, (int)allows_mem);
logger->start_log_line ();
logger->log_partial (" region: ");
dst_reg->dump_to_pp (logger->get_printer (), true);
logger->end_log_line ();
}
}
/* Ideally should combine with inout_svals to determine the
"effective inputs" and use this for the asm_output_svalue. */
auto_vec<const svalue *> input_svals (ninputs);
for (unsigned i = 0; i < ninputs; i++)
{
tree val = input_tvec[i];
const char *constraint = constraints[i + noutputs];
bool allows_reg, allows_mem;
if (! parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
constraints.address (),
&allows_mem, &allows_reg))
{
if (logger)
logger->log ("error parsing constraint for input %i: %qs",
i, constraint);
num_errors++;
continue;
}
tree src_expr = input_tvec[i];
const svalue *src_sval = get_rvalue (src_expr, ctxt);
check_for_poison (src_sval, src_expr, ctxt);
input_svals.quick_push (src_sval);
reachable_regs.handle_sval (src_sval);
if (logger)
{
logger->log ("input %i: %qs %qE"
" allows_reg: %i allows_mem: %i",
i, constraint, val,
(int)allows_reg, (int)allows_mem);
logger->start_log_line ();
logger->log_partial (" sval: ");
src_sval->dump_to_pp (logger->get_printer (), true);
logger->end_log_line ();
}
}
if (num_errors > 0)
gcc_unreachable ();
if (logger)
{
logger->log ("reachability: ");
reachable_regs.dump_to_pp (logger->get_printer ());
logger->end_log_line ();
}
/* Given the regions that were reachable from the inputs we
want to clobber them.
This is similar to region_model::handle_unrecognized_call,
but the unknown call policies seems too aggressive (e.g. purging state
from anything that's ever escaped). Instead, clobber any clusters
that were reachable in *this* asm stmt, rather than those that
escaped, and we don't treat the values as having escaped.
We also assume that asm stmts don't affect sm-state. */
for (auto iter = reachable_regs.begin_mutable_base_regs ();
iter != reachable_regs.end_mutable_base_regs (); ++iter)
{
const region *base_reg = *iter;
if (base_reg->symbolic_for_unknown_ptr_p ())
continue;
binding_cluster *cluster = m_store.get_or_create_cluster (base_reg);
cluster->on_asm (stmt, m_mgr->get_store_manager ());
}
/* Update the outputs. */
for (unsigned output_idx = 0; output_idx < noutputs; output_idx++)
{
tree dst_expr = output_tvec[output_idx];
const region *dst_reg = output_regions[output_idx];
const svalue *sval;
if (deterministic_p (stmt)
&& input_svals.length () <= asm_output_svalue::MAX_INPUTS)
sval = m_mgr->get_or_create_asm_output_svalue (TREE_TYPE (dst_expr),
stmt,
output_idx,
input_svals);
else
{
sval = m_mgr->get_or_create_conjured_svalue (TREE_TYPE (dst_expr),
stmt,
dst_reg);
purge_state_involving (sval, ctxt);
}
set_value (dst_reg, sval, ctxt);
}
}
} // namespace ana
#endif /* #if ENABLE_ANALYZER */

View file

@ -1087,6 +1087,51 @@ region_model_manager::get_or_create_conjured_svalue (tree type,
return conjured_sval;
}
/* Subroutine of region_model_manager::get_or_create_asm_output_svalue.
Return a folded svalue, or NULL. */
const svalue *
region_model_manager::
maybe_fold_asm_output_svalue (tree type,
const vec<const svalue *> &inputs)
{
/* Unknown inputs should lead to unknown results. */
for (const auto &iter : inputs)
if (iter->get_kind () == SK_UNKNOWN)
return get_or_create_unknown_svalue (type);
return NULL;
}
/* Return the svalue * of type TYPE for OUTPUT_IDX of the deterministic
asm stmt ASM_STMT, given INPUTS as inputs. */
const svalue *
region_model_manager::
get_or_create_asm_output_svalue (tree type,
const gasm *asm_stmt,
unsigned output_idx,
const vec<const svalue *> &inputs)
{
gcc_assert (inputs.length () <= asm_output_svalue::MAX_INPUTS);
if (const svalue *folded
= maybe_fold_asm_output_svalue (type, inputs))
return folded;
const char *asm_string = gimple_asm_string (asm_stmt);
const unsigned noutputs = gimple_asm_noutputs (asm_stmt);
asm_output_svalue::key_t key (type, asm_string, output_idx, inputs);
if (asm_output_svalue **slot = m_asm_output_values_map.get (key))
return *slot;
asm_output_svalue *asm_output_sval
= new asm_output_svalue (type, asm_string, output_idx, noutputs, inputs);
RETURN_UNKNOWN_IF_TOO_COMPLEX (asm_output_sval);
m_asm_output_values_map.put (key, asm_output_sval);
return asm_output_sval;
}
/* Given STRING_CST, a STRING_CST and BYTE_OFFSET_CST a constant,
attempt to get the character at that offset, returning either
the svalue for the character constant, or NULL if unsuccessful. */
@ -1505,6 +1550,9 @@ region_model_manager::log_stats (logger *logger, bool show_objs) const
log_uniq_map (logger, show_objs, "widening_svalue", m_widening_values_map);
log_uniq_map (logger, show_objs, "compound_svalue", m_compound_values_map);
log_uniq_map (logger, show_objs, "conjured_svalue", m_conjured_values_map);
log_uniq_map (logger, show_objs, "asm_output_svalue",
m_asm_output_values_map);
logger->log ("max accepted svalue num_nodes: %i",
m_max_complexity.m_num_nodes);
logger->log ("max accepted svalue max_depth: %i",

View file

@ -980,7 +980,10 @@ region_model::on_stmt_pre (const gimple *stmt,
break;
case GIMPLE_ASM:
/* No-op for now. */
{
const gasm *asm_stmt = as_a <const gasm *> (stmt);
on_asm_stmt (asm_stmt, ctxt);
}
break;
case GIMPLE_CALL:

View file

@ -221,6 +221,7 @@ public:
virtual void visit_widening_svalue (const widening_svalue *) {}
virtual void visit_compound_svalue (const compound_svalue *) {}
virtual void visit_conjured_svalue (const conjured_svalue *) {}
virtual void visit_asm_output_svalue (const asm_output_svalue *) {}
virtual void visit_region (const region *) {}
};
@ -274,6 +275,11 @@ public:
const binding_map &map);
const svalue *get_or_create_conjured_svalue (tree type, const gimple *stmt,
const region *id_reg);
const svalue *
get_or_create_asm_output_svalue (tree type,
const gasm *asm_stmt,
unsigned output_idx,
const vec<const svalue *> &inputs);
const svalue *maybe_get_char_from_string_cst (tree string_cst,
tree byte_offset_cst);
@ -346,6 +352,8 @@ private:
const svalue *maybe_undo_optimize_bit_field_compare (tree type,
const compound_svalue *compound_sval,
tree cst, const svalue *arg1);
const svalue *maybe_fold_asm_output_svalue (tree type,
const vec<const svalue *> &inputs);
unsigned m_next_region_id;
root_region m_root_region;
@ -410,6 +418,10 @@ private:
conjured_svalue *> conjured_values_map_t;
conjured_values_map_t m_conjured_values_map;
typedef hash_map<asm_output_svalue::key_t,
asm_output_svalue *> asm_output_values_map_t;
asm_output_values_map_t m_asm_output_values_map;
bool m_check_complexity;
/* Maximum complexity of svalues that weren't rejected. */
@ -537,6 +549,7 @@ class region_model
void on_assignment (const gassign *stmt, region_model_context *ctxt);
const svalue *get_gassign_result (const gassign *assign,
region_model_context *ctxt);
void on_asm_stmt (const gasm *asm_stmt, region_model_context *ctxt);
bool on_call_pre (const gcall *stmt, region_model_context *ctxt,
bool *out_terminate_path);
void on_call_post (const gcall *stmt,

View file

@ -1796,6 +1796,23 @@ binding_cluster::on_unknown_fncall (const gcall *call,
}
}
/* Mark this cluster as having been clobbered by STMT. */
void
binding_cluster::on_asm (const gasm *stmt,
store_manager *mgr)
{
m_map.empty ();
/* Bind it to a new "conjured" value using CALL. */
const svalue *sval
= mgr->get_svalue_manager ()->get_or_create_conjured_svalue
(m_base_region->get_type (), stmt, m_base_region);
bind (mgr, m_base_region, sval);
m_touched = true;
}
/* Return true if this binding_cluster has no information
i.e. if there are no bindings, and it hasn't been marked as having
escaped, or touched symbolically. */

View file

@ -603,6 +603,7 @@ public:
void mark_as_escaped ();
void on_unknown_fncall (const gcall *call, store_manager *mgr);
void on_asm (const gasm *stmt, store_manager *mgr);
bool escaped_p () const { return m_escaped; }
bool touched_p () const { return m_touched; }

View file

@ -508,6 +508,29 @@ svalue::cmp_ptr (const svalue *sval1, const svalue *sval2)
conjured_sval2->get_id_region ());
}
break;
case SK_ASM_OUTPUT:
{
const asm_output_svalue *asm_output_sval1
= (const asm_output_svalue *)sval1;
const asm_output_svalue *asm_output_sval2
= (const asm_output_svalue *)sval2;
if (int asm_string_cmp = strcmp (asm_output_sval1->get_asm_string (),
asm_output_sval2->get_asm_string ()))
return asm_string_cmp;
if (int output_idx_cmp = ((int)asm_output_sval1->get_output_idx ()
- (int)asm_output_sval2->get_output_idx ()))
return output_idx_cmp;
if (int cmp = ((int)asm_output_sval1->get_num_inputs ()
- (int)asm_output_sval2->get_num_inputs ()))
return cmp;
for (unsigned i = 0; i < asm_output_sval1->get_num_inputs (); i++)
if (int input_cmp
= svalue::cmp_ptr (asm_output_sval1->get_input (i),
asm_output_sval2->get_input (i)))
return input_cmp;
return 0;
}
break;
}
}
@ -1794,6 +1817,72 @@ conjured_svalue::accept (visitor *v) const
m_id_reg->accept (v);
}
/* class asm_output_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for asm_output_svalue. */
void
asm_output_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
{
if (simple)
{
pp_printf (pp, "ASM_OUTPUT(%qs, %%%i, {",
get_asm_string (),
get_output_idx ());
for (unsigned i = 0; i < m_num_inputs; i++)
{
if (i > 0)
pp_string (pp, ", ");
dump_input (pp, 0, m_input_arr[i], simple);
}
pp_string (pp, "})");
}
else
{
pp_printf (pp, "asm_output_svalue (%qs, %%%i, {",
get_asm_string (),
get_output_idx ());
for (unsigned i = 0; i < m_num_inputs; i++)
{
if (i > 0)
pp_string (pp, ", ");
dump_input (pp, 0, m_input_arr[i], simple);
}
pp_string (pp, "})");
}
}
/* Subroutine of asm_output_svalue::dump_to_pp. */
void
asm_output_svalue::dump_input (pretty_printer *pp,
unsigned input_idx,
const svalue *sval,
bool simple) const
{
pp_printf (pp, "%%%i: ", input_idx_to_asm_idx (input_idx));
sval->dump_to_pp (pp, simple);
}
/* Convert INPUT_IDX from an index into the array of inputs
into the index of all operands for the asm stmt. */
unsigned
asm_output_svalue::input_idx_to_asm_idx (unsigned input_idx) const
{
return input_idx + m_num_outputs;
}
/* Implementation of svalue::accept vfunc for asm_output_svalue. */
void
asm_output_svalue::accept (visitor *v) const
{
v->visit_asm_output_svalue (this);
for (unsigned i = 0; i < m_num_inputs; i++)
m_input_arr[i]->accept (v);
}
} // namespace ana
#endif /* #if ENABLE_ANALYZER */

View file

@ -47,7 +47,8 @@ enum svalue_kind
SK_PLACEHOLDER,
SK_WIDENING,
SK_COMPOUND,
SK_CONJURED
SK_CONJURED,
SK_ASM_OUTPUT
};
/* svalue and its subclasses.
@ -74,7 +75,9 @@ enum svalue_kind
widening_svalue (SK_WIDENING): a merger of two svalues (possibly
in an iteration).
compound_svalue (SK_COMPOUND): a mapping of bit-ranges to svalues
conjured_svalue (SK_CONJURED): a value arising from a stmt. */
conjured_svalue (SK_CONJURED): a value arising from a stmt
asm_output_svalue (SK_ASM_OUTPUT): an output from a deterministic
asm stmt. */
/* An abstract base class representing a value held by a region of memory. */
@ -124,6 +127,8 @@ public:
dyn_cast_compound_svalue () const { return NULL; }
virtual const conjured_svalue *
dyn_cast_conjured_svalue () const { return NULL; }
virtual const asm_output_svalue *
dyn_cast_asm_output_svalue () const { return NULL; }
tree maybe_get_constant () const;
const region *maybe_get_region () const;
@ -1394,4 +1399,140 @@ template <> struct default_hash_traits<conjured_svalue::key_t>
static const bool empty_zero_p = true;
};
namespace ana {
/* An output from a deterministic asm stmt, where we want to identify a
particular unknown value, rather than resorting to the unknown_value
singleton.
Comparisons of variables that share the same asm_output_svalue are known
to be equal, even if we don't know what the value is. */
class asm_output_svalue : public svalue
{
public:
/* Imposing an upper limit and using a (small) array allows key_t
to avoid memory management. */
static const unsigned MAX_INPUTS = 2;
/* A support class for uniquifying instances of asm_output_svalue. */
struct key_t
{
key_t (tree type,
const char *asm_string,
unsigned output_idx,
const vec<const svalue *> &inputs)
: m_type (type), m_asm_string (asm_string), m_output_idx (output_idx),
m_num_inputs (inputs.length ())
{
gcc_assert (inputs.length () <= MAX_INPUTS);
for (unsigned i = 0; i < m_num_inputs; i++)
m_input_arr[i] = inputs[i];
}
hashval_t hash () const
{
inchash::hash hstate;
hstate.add_ptr (m_type);
/* We don't bother hashing m_asm_str. */
hstate.add_int (m_output_idx);
for (unsigned i = 0; i < m_num_inputs; i++)
hstate.add_ptr (m_input_arr[i]);
return hstate.end ();
}
bool operator== (const key_t &other) const
{
if (!(m_type == other.m_type
&& 0 == (strcmp (m_asm_string, other.m_asm_string))
&& m_output_idx == other.m_output_idx
&& m_num_inputs == other.m_num_inputs))
return false;
for (unsigned i = 0; i < m_num_inputs; i++)
if (m_input_arr[i] != other.m_input_arr[i])
return false;
return true;
}
/* Use m_asm_string to mark empty/deleted, as m_type can be NULL for
legitimate instances. */
void mark_deleted () { m_asm_string = reinterpret_cast<const char *> (1); }
void mark_empty () { m_asm_string = NULL; }
bool is_deleted () const
{
return m_asm_string == reinterpret_cast<const char *> (1);
}
bool is_empty () const { return m_asm_string == NULL; }
tree m_type;
const char *m_asm_string;
unsigned m_output_idx;
unsigned m_num_inputs;
const svalue *m_input_arr[MAX_INPUTS];
};
asm_output_svalue (tree type,
const char *asm_string,
unsigned output_idx,
unsigned num_outputs,
const vec<const svalue *> &inputs)
: svalue (complexity::from_vec_svalue (inputs), type),
m_asm_string (asm_string),
m_output_idx (output_idx),
m_num_outputs (num_outputs),
m_num_inputs (inputs.length ())
{
gcc_assert (inputs.length () <= MAX_INPUTS);
for (unsigned i = 0; i < m_num_inputs; i++)
m_input_arr[i] = inputs[i];
}
enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_ASM_OUTPUT; }
const asm_output_svalue *
dyn_cast_asm_output_svalue () const FINAL OVERRIDE
{
return this;
}
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
void accept (visitor *v) const FINAL OVERRIDE;
const char *get_asm_string () const { return m_asm_string; }
unsigned get_output_idx () const { return m_output_idx; }
unsigned get_num_inputs () const { return m_num_inputs; }
const svalue *get_input (unsigned idx) const { return m_input_arr[idx]; }
private:
void dump_input (pretty_printer *pp,
unsigned input_idx,
const svalue *sval,
bool simple) const;
unsigned input_idx_to_asm_idx (unsigned input_idx) const;
const char *m_asm_string;
unsigned m_output_idx;
/* We capture this so that we can offset the input indices
to match the %0, %1, %2 in the asm_string when dumping. */
unsigned m_num_outputs;
unsigned m_num_inputs;
const svalue *m_input_arr[MAX_INPUTS];
};
} // namespace ana
template <>
template <>
inline bool
is_a_helper <const asm_output_svalue *>::test (const svalue *sval)
{
return sval->get_kind () == SK_ASM_OUTPUT;
}
template <> struct default_hash_traits<asm_output_svalue::key_t>
: public member_function_hash_traits<asm_output_svalue::key_t>
{
static const bool empty_zero_p = true;
};
#endif /* GCC_ANALYZER_SVALUE_H */

View file

@ -0,0 +1,69 @@
/* { dg-do assemble { target x86_64-*-* } } */
#include "analyzer-decls.h"
int test_out (void)
{
int dst_a, dst_b;
asm ("mov 42, %0"
: "=r" (dst_a));
asm ("mov 42, %0"
: "=r" (dst_b));
__analyzer_eval (dst_a == dst_b); /* { dg-warning "TRUE" } */
return dst_a;
}
int test_out_in (int src_a)
{
int dst_a, dst_b;
asm ("mov %1, %0"
: "=r" (dst_a)
: "r" (src_a));
asm ("mov %1, %0"
: "=r" (dst_b)
: "r" (src_a));
__analyzer_eval (dst_a == dst_b); /* { dg-warning "TRUE" } */
return dst_a;
}
int test_out_in_in (int src_a, int src_b)
{
int dst_a, dst_b;
asm ("mov %1, %0;\n"
"add %2, %0"
: "=r" (dst_a)
: "r" (src_a),
"r" (src_b));
asm ("mov %1, %0;\n"
"add %2, %0"
: "=r" (dst_b)
: "r" (src_a),
"r" (src_b));
__analyzer_eval (dst_a == dst_b); /* { dg-warning "TRUE" } */
return dst_a;
}
void test_inout_1 (int v)
{
int saved = v;
int result_a, result_b;
asm ("dec %0"
: "+r" (v));
result_a = v;
asm ("dec %0"
: "+r" (v));
result_b = v;
__analyzer_eval (v == saved); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (v == result_a); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (v == result_b); /* { dg-warning "TRUE" } */
}
void test_inout_2 (void)
{
int v;
int result_a, result_b;
asm ("dec %0" /* { dg-warning "use of uninitialized value 'v'" } */
: "+r" (v));
}

View file

@ -0,0 +1,131 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-require-effective-target lp64 } */
#include "analyzer-decls.h"
#include <stdint.h>
int test_1 (int src)
{
int dst;
asm ("mov %1, %0\n\t"
"add $1, %0"
: "=r" (dst)
: "r" (src));
return dst;
}
uint32_t test_2 (uint32_t Mask)
{
uint32_t Index;
asm ("bsfl %[aMask], %[aIndex]"
: [aIndex] "=r" (Index)
: [aMask] "r" (Mask)
: "cc");
return Index;
}
int test_3a (int p1, int p2)
{
asm goto ("btl %1, %0\n\t"
"jc %l2"
: // No outputs
: "r" (p1), "r" (p2)
: "cc"
: carry);
return 0;
carry:
return 1;
}
int test_3b (int p1, int p2)
{
asm goto ("btl %1, %0\n\t"
"jc %l[carry]"
: // No outputs
: "r" (p1), "r" (p2)
: "cc"
: carry);
return 0;
carry:
return 1;
}
uint64_t test_4 (void)
{
uint64_t start_time, end_time;
// Get start time
asm volatile ("rdtsc\n\t" // Returns the time in EDX:EAX.
"shl $32, %%rdx\n\t" // Shift the upper bits left.
"or %%rdx, %0" // 'Or' in the lower bits.
: "=a" (start_time)
:
: "rdx");
// could do other work here
// Get end time
asm volatile ("rdtsc\n\t" // Returns the time in EDX:EAX.
"shl $32, %%rdx\n\t" // Shift the upper bits left.
"or %%rdx, %0" // 'Or' in the lower bits.
: "=a" (end_time)
:
: "rdx");
__analyzer_eval (start_time == end_time); /* { dg-warning "UNKNOWN" } */
// Get elapsed time
return end_time - start_time;
}
static uint64_t get_time (void)
{
uint64_t result;
asm volatile ("rdtsc\n\t" // Returns the time in EDX:EAX.
"shl $32, %%rdx\n\t" // Shift the upper bits left.
"or %%rdx, %0" // 'Or' in the lower bits.
: "=a" (result)
:
: "rdx");
return result;
}
uint64_t test_4a (void)
{
uint64_t start_time, end_time;
start_time = get_time ();
// could do other work here
end_time = get_time ();
__analyzer_eval (start_time == end_time); /* { dg-warning "UNKNOWN" } */
// Get elapsed time
return end_time - start_time;
}
asm ("\t.pushsection .text\n"
"\t.globl add_asm\n"
"\t.type add_asm, @function\n"
"add_asm:\n"
"\tmovq %rdi, %rax\n"
"\tadd %rsi, %rax\n"
"\tret\n"
"\t.popsection\n");
int test_5 (int count)
{
asm goto ("dec %0; jb %l[stop]"
: "+r" (count)
:
:
: stop);
return count;
stop:
return 0;
}

View file

@ -0,0 +1,34 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-require-effective-target lp64 } */
/* Adapted from Linux x86: page_ref_dec_and_test.c (GPL-2.0). */
typedef _Bool bool;
typedef struct {
int counter;
} atomic_t;
bool
arch_atomic_dec_and_test(atomic_t *v) {
return ({
bool c;
asm volatile(".pushsection .smp_locks,\"a\"\n"
".balign 4\n"
".long 671f - .\n"
".popsection\n"
"671:"
"\n\tlock; "
"decl"
" "
"%[var]"
"\n\t/* output condition code "
"e"
"*/\n"
: [ var ] "+m"(v->counter), "=@cc"
"e"(c)
:
: "memory");
c;
});
}

View file

@ -0,0 +1,5 @@
void
test2 (_Complex double f)
{
__asm__ ("" : "=r" (__real f));
}

View file

@ -0,0 +1,74 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-require-effective-target lp64 } */
/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
#include "../analyzer-decls.h"
/* Copied from linux: arch/x86/include/asm/barrier.h (GPL-2.0) */
static inline unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size)
{
unsigned long mask;
asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"g"(size),"r" (index)
:"cc");
return mask;
}
/* The analyzer ought to treat array_index_mask_nospec as being
effectively pure. */
void test_1 (unsigned long index, unsigned long size)
{
unsigned long a = array_index_mask_nospec (index, size);
unsigned long b = array_index_mask_nospec (index, size);
__analyzer_eval (a == b); /* { dg-warning "TRUE" } */
}
void test_2 (unsigned long index_a, unsigned long size_a,
unsigned long index_b, unsigned long size_b)
{
unsigned long aa_1 = array_index_mask_nospec (index_a, size_a);
unsigned long ab_1 = array_index_mask_nospec (index_a, size_b);
unsigned long ba_1 = array_index_mask_nospec (index_b, size_a);
unsigned long bb_1 = array_index_mask_nospec (index_b, size_b);
unsigned long aa_2 = array_index_mask_nospec (index_a, size_a);
unsigned long ab_2 = array_index_mask_nospec (index_a, size_b);
unsigned long ba_2 = array_index_mask_nospec (index_b, size_a);
unsigned long bb_2 = array_index_mask_nospec (index_b, size_b);
__analyzer_eval (aa_1 == aa_2); /* { dg-warning "TRUE" } */
__analyzer_eval (ab_1 == ab_2); /* { dg-warning "TRUE" } */
__analyzer_eval (ba_1 == ba_2); /* { dg-warning "TRUE" } */
__analyzer_eval (bb_1 == bb_2); /* { dg-warning "TRUE" } */
__analyzer_eval (aa_1 == ab_1); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (aa_1 == ba_1); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (aa_1 == bb_1); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (ab_1 == ba_1); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (ab_1 == bb_1); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (ba_1 == bb_1); /* { dg-warning "UNKNOWN" } */
}
/* Equivalent asm strings should be treated the same, rather
than requiring the results to come from the same stmt. */
void test_3 (unsigned long index, unsigned long size)
{
unsigned long a = array_index_mask_nospec (index, size);
unsigned long b;
/* Copy of the asm from array_index_mask_nospec. */
asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (b)
:"g"(size),"r" (index)
:"cc");
__analyzer_eval (a == b); /* { dg-warning "TRUE" } */
}

View file

@ -0,0 +1,81 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-require-effective-target lp64 } */
/* { dg-skip-if "" { *-*-* } { "-O0" } { "" } } */
/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
/* Adapted/reduced from linux kernel (GPL-2.0). */
register unsigned long current_stack_pointer asm("rsp");
struct pv_cpu_ops {
/* snip */
void (*cpuid)(unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
unsigned int *edx);
/* snip */
};
struct paravirt_patch_template {
struct pv_cpu_ops cpu;
/* snip */
};
extern struct paravirt_patch_template pv_ops;
/* snip */
static void cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
unsigned int *edx) {
unsigned long __edi = __edi, __esi = __esi, __edx = __edx, __ecx = __ecx,
__eax = __eax;
asm volatile(
"771:\n\t"
"999:\n\t"
".pushsection .discard.retpoline_safe\n\t"
" "
".quad"
" "
" 999b\n\t"
".popsection\n\t"
"call *%c[paravirt_opptr];"
"\n"
"772:\n"
".pushsection .parainstructions,\"a\"\n"
" "
".balign 8"
" "
"\n"
" "
".quad"
" "
" 771b\n"
" .byte "
"%c[paravirt_typenum]"
"\n"
" .byte 772b-771b\n"
" .short "
"%c[paravirt_clobber]"
"\n"
".popsection\n"
: "=D"(__edi), "=S"(__esi), "=d"(__edx), "=c"(__ecx),
"+r"(current_stack_pointer)
: [ paravirt_typenum ] "i"(
(__builtin_offsetof(struct paravirt_patch_template, cpu.cpuid) /
sizeof(void *))),
[ paravirt_opptr ] "i"(&(pv_ops.cpu.cpuid)),
[ paravirt_clobber ] "i"(((1 << 9) - 1)), "D"((unsigned long)(eax)),
"S"((unsigned long)(ebx)), "d"((unsigned long)(ecx)),
"c"((unsigned long)(edx))
: "memory", "cc", "rax", "r8", "r9", "r10", "r11");
}
extern void check_init_int(int v);
void test(unsigned int op) {
unsigned int eax, ebx, ecx, edx;
eax = op;
ecx = 0;
cpuid(&eax, &ebx, &ecx, &edx);
check_init_int(eax);
check_init_int(ebx); /* { dg-bogus "use of uninitialized value 'ebx'" } */
check_init_int(ecx);
check_init_int(edx); /* { dg-bogus "use of uninitialized value 'edx'" } */
}

View file

@ -0,0 +1,135 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-require-effective-target lp64 } */
/* { dg-skip-if "" { *-*-* } { "-O0" } { "" } } */
/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
/* Adapted/reduced from linux kernel (GPL-2.0). */
typedef __SIZE_TYPE__ size_t;
#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
#define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " "
#define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__)
#define __ASM_SEL(a,b) __ASM_FORM(b)
#define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
#define _ASM_SP __ASM_REG(sp)
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#define ANNOTATE_RETPOLINE_SAFE \
"999:\n\t" \
".pushsection .discard.retpoline_safe\n\t" \
_ASM_PTR " 999b\n\t" \
".popsection\n\t"
/* Adapted from Linux arch/x86/include/asm/paravirt.h */
struct pv_cpu_ops {
/* snip */
void (*cpuid)(unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
unsigned int *edx);
/* snip */
};
struct paravirt_patch_template {
struct pv_cpu_ops cpu;
/* snip */
};
extern struct paravirt_patch_template pv_ops;
#define PARAVIRT_PATCH(x) \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
#define paravirt_type(op) \
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
[paravirt_opptr] "i" (&(pv_ops.op))
#define paravirt_clobber(clobber) \
[paravirt_clobber] "i" (clobber)
#define CLBR_ANY ((1 << 9) - 1)
#define _paravirt_alt(insn_string, type, clobber) \
"771:\n\t" insn_string "\n" "772:\n" \
".pushsection .parainstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 771b\n" \
" .byte " type "\n" \
" .byte 772b-771b\n" \
" .short " clobber "\n" \
".popsection\n"
#define paravirt_alt(insn_string) \
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%c[paravirt_opptr];"
#define PVOP_CALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx, __eax = __eax;
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
"=S" (__esi), "=d" (__edx), \
"=c" (__ecx)
/* void functions are still allowed [re]ax for scratch */
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
#define PVOP_TEST_NULL(op) ((void)pv_ops.op)
#define ____PVOP_CALL(ret, op, clbr, call_clbr, extra_clbr, ...) \
({ \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(paravirt_alt(PARAVIRT_CALL) \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
ret; \
})
#define __PVOP_VCALL(op, ...) \
(void)____PVOP_CALL(, op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
VEXTRA_CLOBBERS, ##__VA_ARGS__)
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
static void cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
unsigned int *edx)
{
PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
}
extern void check_init_int(int v);
void test(unsigned int op) {
unsigned int eax, ebx, ecx, edx;
eax = op;
ecx = 0;
cpuid(&eax, &ebx, &ecx, &edx);
check_init_int(eax);
check_init_int(ebx); /* { dg-bogus "use of uninitialized value 'ebx'" } */
check_init_int(ecx);
check_init_int(edx); /* { dg-bogus "use of uninitialized value 'edx'" } */
}

View file

@ -0,0 +1,46 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-require-effective-target lp64 } */
#include "../analyzer-decls.h"
typedef unsigned __INT32_TYPE__ u32;
typedef unsigned __INT64_TYPE__ u64;
extern void check_init_u32 (u32 v);
extern void check_init_u64 (u32 v);
/* Adapted from linux kernel: arch/x86/include/asm/processor.h (GPL-2.0). */
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (*eax), "2" (*ecx)
: "memory");
}
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = 0;
native_cpuid(eax, ebx, ecx, edx);
}
void test_1 (void)
{
u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); /* from "amd_get_topology". */
/* Verify that they are now initialized. */
check_init_u32 (eax);
check_init_u32 (ebx);
check_init_u32 (ecx);
check_init_u32 (edx);
}

View file

@ -0,0 +1,210 @@
/* Adapted from Linux: arch/x86/include/asm/paravirt.h */
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
/* { dg-skip-if "" { *-*-* } { "-O0" } { "" } } */
/* Adapted/reduced from linux kernel (GPL-2.0). */
#include "../analyzer-decls.h"
typedef unsigned char u8;
typedef unsigned __INT32_TYPE__ u32;
typedef unsigned __INT64_TYPE__ u64;
typedef __SIZE_TYPE__ size_t;
#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
# define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " "
# define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__)
#ifndef __x86_64__
/* 32 bit */
# define __ASM_SEL(a,b) __ASM_FORM(a)
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
/* 64 bit */
# define __ASM_SEL(a,b) __ASM_FORM(b)
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
#define _ASM_SP __ASM_REG(sp)
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#define ANNOTATE_RETPOLINE_SAFE \
"999:\n\t" \
".pushsection .discard.retpoline_safe\n\t" \
_ASM_PTR " 999b\n\t" \
".popsection\n\t"
/* Adapted from Linux arch/x86/include/asm/paravirt.h */
/* snip */
/* ./arch/x86/include/asm/paravirt.h I think; was:
PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
*/
#ifndef __x86_64__
#define CLBR_ANY ((1 << 4) - 1)
#else
#define CLBR_ANY ((1 << 9) - 1)
#endif /* X86_64 */
struct pv_cpu_ops {
/* snip */
u64 (*read_msr_safe)(unsigned int msr, int *err);
/* snip */
};
struct paravirt_patch_template {
struct pv_cpu_ops cpu;
/* snip */
};
extern struct paravirt_patch_template pv_ops;
#define PARAVIRT_PATCH(x) \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
#define paravirt_type(op) \
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
[paravirt_opptr] "i" (&(pv_ops.op))
#define paravirt_clobber(clobber) \
[paravirt_clobber] "i" (clobber)
/*
* Generate some code, and mark it as patchable by the
* apply_paravirt() alternate instruction patcher.
*/
#define _paravirt_alt(insn_string, type, clobber) \
"771:\n\t" insn_string "\n" "772:\n" \
".pushsection .parainstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR " 771b\n" \
" .byte " type "\n" \
" .byte 772b-771b\n" \
" .short " clobber "\n" \
".popsection\n"
/* Generate patchable code, with the default asm parameters. */
#define paravirt_alt(insn_string) \
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%c[paravirt_opptr];"
#ifndef __x86_64__
/* 32-bit. */
#define PVOP_CALL_ARGS \
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
#define EXTRA_CLOBBERS
#else
/* 64-bit. */
/* [re]ax isn't an arg, but the return val */
#define PVOP_CALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx, __eax = __eax;
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
"=S" (__esi), "=d" (__edx), \
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
#endif /* CONFIG_X86_32 */
#define PVOP_TEST_NULL(op) ((void)pv_ops.op)
#define PVOP_RETVAL(rettype) \
({ unsigned long __mask = ~0UL; \
switch (sizeof(rettype)) { \
case 1: __mask = 0xffUL; break; \
case 2: __mask = 0xffffUL; break; \
case 4: __mask = 0xffffffffUL; break; \
default: break; \
} \
__mask & __eax; \
})
#define ____PVOP_CALL(ret, op, clbr, call_clbr, extra_clbr, ...) \
({ \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(paravirt_alt(PARAVIRT_CALL) \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
ret; \
})
#define __PVOP_CALL(rettype, op, ...) \
____PVOP_CALL(PVOP_RETVAL(rettype), op, CLBR_ANY, \
PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__)
#define PVOP_CALL2(rettype, op, arg1, arg2) \
__PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
{
return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
}
#define rdmsr_safe(msr, a, b) \
({ \
int _err; \
u64 _l = paravirt_read_msr_safe(msr, &_err); \
(*a) = (u32)_l; \
(*b) = _l >> 32; \
_err; \
})
void check_init_int(int);
void check_init_u32(u32);
void test(void)
{
int err;
u32 eax, edx;
err = rdmsr_safe(0, &eax, &edx);
check_init_int(err);
check_init_u32(eax);
check_init_u32(edx);
}

View file

@ -0,0 +1,33 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-skip-if "" { *-*-* } { "-fno-fat-lto-objects" } { "" } } */
#include "../analyzer-decls.h"
/* Adapted from Linux: arch/x86/include/asm/msr.h (GPL-2.0) */
#ifdef __x86_64__
#define DECLARE_ARGS(val, low, high) unsigned long low, high
#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
#else
#define DECLARE_ARGS(val, low, high) unsigned long long val
#define EAX_EDX_VAL(val, low, high) (val)
#define EAX_EDX_RET(val, low, high) "=A" (val)
#endif
static unsigned long long __rdmsr(unsigned int msr)
{
DECLARE_ARGS(val, low, high);
asm volatile("1: rdmsr\n"
"2:\n"
: EAX_EDX_RET(val, low, high) : "c" (msr));
return EAX_EDX_VAL(val, low, high);
}
void test (void)
{
__analyzer_eval (__rdmsr (0)); /* { dg-warning "UNKNOWN" } */
__analyzer_eval (__rdmsr (1)); /* { dg-warning "UNKNOWN" } */
}

View file

@ -0,0 +1,319 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-require-effective-target lp64 } */
/* { dg-additional-options "-fsanitize=bounds -fno-analyzer-call-summaries" } */
/* { dg-skip-if "" { *-*-* } { "-O0" } { "" } } */
/* Reduced from linux kernel: drivers/staging/wfx/sta.c (GPL-2.0)
on x86_64 with "allyesconfig"
This test is deliberately not fully reduced, as an integration test
that the analyzer doesn't emit bogus "dereference of NULL" warnings
on the repeated wdev_to_wvif calls. */
#define NULL ((void *)0)
/* Types. */
typedef unsigned char __u8;
typedef unsigned short __u16;
__extension__ typedef unsigned long long __u64;
typedef __u8 u8;
typedef __u16 u16;
typedef __u64 u64;
enum { false = 0, true = 1 };
typedef _Bool bool;
struct device;
typedef struct {
int counter;
} atomic_t;
struct static_key {
atomic_t enabled;
union {
unsigned long type;
struct jump_entry *entries;
struct static_key_mod *next;
};
};
struct static_key_true {
struct static_key key;
};
struct static_key_false {
struct static_key key;
};
struct _ddebug {
const char *modname;
const char *function;
const char *filename;
const char *format;
unsigned int lineno : 18;
unsigned int flags : 8;
union {
struct static_key_true dd_key_true;
struct static_key_false dd_key_false;
} key;
} __attribute__((aligned(8)));
enum nl80211_iftype {
/* [...snip...] */
NL80211_IFTYPE_AP,
/* [...snip...] */
NUM_NL80211_IFTYPES,
NL80211_IFTYPE_MAX = NUM_NL80211_IFTYPES - 1
};
struct ieee80211_channel {
/* [...snip...] */
u16 hw_value;
/* [...snip...] */
};
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
/* [...snip...] */
};
struct ieee80211_bss_conf {
/* [...snip...] */
bool assoc, ibss_joined;
/* [...snip...] */
struct cfg80211_chan_def chandef;
/* [...snip...] */
bool ps;
/* [...snip...] */
};
struct ieee80211_conf {
/* [...snip...] */
int power_level, dynamic_ps_timeout;
/* [...snip...] */
};
struct ieee80211_vif {
enum nl80211_iftype type;
struct ieee80211_bss_conf bss_conf;
/* [...snip...] */
u8 drv_priv[] __attribute__((__aligned__(sizeof(void *))));
};
struct ieee80211_hw {
struct ieee80211_conf conf;
/* [...snip...] */
};
struct wfx_dev {
/* [...snip...] */
struct device *dev;
struct ieee80211_hw *hw;
struct ieee80211_vif *vif[2];
/* [...snip...] */
int force_ps_timeout;
};
struct wfx_vif {
struct wfx_dev *wdev;
struct ieee80211_vif *vif;
/* [...snip...] */
};
/* Function decls. */
extern __attribute__((__format__(printf, 1, 2))) void
__warn_printk(const char *fmt, ...);
extern bool ____wrong_branch_error(void);
extern __attribute__((__format__(printf, 3, 4))) void
__dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev,
const char *fmt, ...);
bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor);
/* Function defns. */
static inline unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size) {
unsigned long mask;
asm volatile("cmp %1,%2; sbb %0,%0;"
: "=r"(mask)
: "g"(size), "r"(index)
: "cc");
return mask;
}
static inline __attribute__((__always_inline__)) bool
arch_static_branch(struct static_key *key, bool branch) {
asm goto("1:"
"jmp %l[l_yes] # objtool NOPs this \n\t"
".pushsection __jump_table, \"aw\" \n\t"
" "
".balign 8"
" "
"\n\t"
".long 1b - . \n\t"
".long %l[l_yes] - . \n\t"
" "
".quad"
" "
"%c0 + %c1 - .\n\t"
".popsection \n\t"
:
: "i"(key), "i"(2 | branch)
:
: l_yes);
asm("");
return false;
l_yes:
return true;
}
static inline __attribute__((__always_inline__)) bool
arch_static_branch_jump(struct static_key *const key, const bool branch) {
asm goto("1:"
"jmp %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\" \n\t"
" "
".balign 8"
" "
"\n\t"
".long 1b - . \n\t"
".long %l[l_yes] - . \n\t"
" "
".quad"
" "
"%c0 + %c1 - .\n\t"
".popsection \n\t"
:
: "i"(key), "i"(branch)
:
: l_yes);
asm("");
return false;
l_yes:
return true;
}
static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id) {
if (vif_id >=
(sizeof(wdev->vif) / sizeof((wdev->vif)[0]) + ((int)(sizeof(struct {
int : (-!!(__builtin_types_compatible_p(typeof((wdev->vif)),
typeof(&(wdev->vif)[0]))));
}))))) {
static struct _ddebug __attribute__((__aligned__(8)))
__attribute__((__section__("__dyndbg"))) __UNIQUE_ID_ddebug1678 = {
.modname = "wfx",
.function = __func__,
.filename = "drivers/staging/wfx/wfx.h",
.format = ("requesting non-existent vif: %d\n"),
.lineno = 97,
.flags = 0,
.key.dd_key_false = ((struct static_key_false){
.key = {.enabled = {0}, {.entries = (void *)0UL}},
})};
if (({
bool branch;
if (__builtin_types_compatible_p(
typeof(*&__UNIQUE_ID_ddebug1678.key.dd_key_false),
struct static_key_true))
branch = arch_static_branch_jump(
&(&__UNIQUE_ID_ddebug1678.key.dd_key_false)->key, false);
else if (__builtin_types_compatible_p(
typeof(*&__UNIQUE_ID_ddebug1678.key.dd_key_false),
struct static_key_false))
branch = arch_static_branch(
&(&__UNIQUE_ID_ddebug1678.key.dd_key_false)->key, false);
else
branch = ____wrong_branch_error();
__builtin_expect(!!(branch), 0);
}))
__dynamic_dev_dbg(&__UNIQUE_ID_ddebug1678, wdev->dev,
"requesting non-existent vif: %d\n", vif_id);
return NULL;
}
typeof(vif_id) _i = (vif_id);
typeof((sizeof(wdev->vif) / sizeof((wdev->vif)[0]) + ((int)(sizeof(struct {
int : (-!!(__builtin_types_compatible_p(typeof((wdev->vif)),
typeof(&(wdev->vif)[0]))));
}))))) _s =
((sizeof(wdev->vif) / sizeof((wdev->vif)[0]) + ((int)(sizeof(struct {
int : (-!!(__builtin_types_compatible_p(typeof((wdev->vif)),
typeof(&(wdev->vif)[0]))));
})))));
unsigned long _mask = array_index_mask_nospec(_i, _s);
vif_id = (typeof(_i))(_i & _mask);
if (!wdev->vif[vif_id]) {
static struct _ddebug __attribute__((__aligned__(8)))
__attribute__((__section__("__dyndbg"))) __UNIQUE_ID_ddebug1681 = {
.modname = "wfx",
.function = __func__,
.filename = "drivers/staging/wfx/wfx.h",
.format = ("requesting non-allocated vif: %d\n"),
.lineno = 102,
.flags = 0,
.key.dd_key_false = ((struct static_key_false){
.key = {.enabled = {0}, {.entries = (void *)0UL}},
})};
if (({
bool branch;
if (__builtin_types_compatible_p(
typeof(*&__UNIQUE_ID_ddebug1681.key.dd_key_false),
struct static_key_true))
branch = arch_static_branch_jump(
&(&__UNIQUE_ID_ddebug1681.key.dd_key_false)->key, false);
else if (__builtin_types_compatible_p(
typeof(*&__UNIQUE_ID_ddebug1681.key.dd_key_false),
struct static_key_false))
branch = arch_static_branch(
&(&__UNIQUE_ID_ddebug1681.key.dd_key_false)->key, false);
else
branch = ____wrong_branch_error();
__builtin_expect(!!(branch), 0);
}))
__dynamic_dev_dbg(&__UNIQUE_ID_ddebug1681, wdev->dev,
"requesting non-allocated vif: %d\n", vif_id);
return NULL;
}
return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv;
}
int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps) {
struct ieee80211_channel *chan0 = NULL, *chan1 = NULL;
struct ieee80211_conf *conf = &wvif->wdev->hw->conf;
if (wdev_to_wvif(wvif->wdev, 0))
chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan; /* { dg-bogus "dereference of NULL" } */
if (wdev_to_wvif(wvif->wdev, 1))
chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan; /* { dg-bogus "dereference of NULL" } */
if (chan0 && chan1 && chan0->hw_value != chan1->hw_value &&
wvif->vif->type != NL80211_IFTYPE_AP) {
if (enable_ps)
*enable_ps = true;
if (wvif->wdev->force_ps_timeout > -1)
return wvif->wdev->force_ps_timeout;
else if (wfx_api_older_than(wvif->wdev, 3, 2))
return 0;
else
return 30;
}
if (enable_ps)
*enable_ps = wvif->vif->bss_conf.ps;
if (wvif->wdev->force_ps_timeout > -1)
return wvif->wdev->force_ps_timeout;
else if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps)
return conf->dynamic_ps_timeout;
else
return -1;
}

View file

@ -0,0 +1,77 @@
/* { dg-do assemble { target x86_64-*-* } } */
/* { dg-require-effective-target lp64 } */
/* Reproducer for false positive from -Wanalyzer-null-dereference seen
in Linux kernel (drivers/staging/wfx/sta.c; GPL-2.0) due to
the analyzer not grokking that array_index_mask_nospec is
effectively pure, and thus not realizing that array_index_no_spec
is also pure, leading to wdev_to_wvif not being treated as pure,
and thus able to return non-NULL and then NULL. */
typedef unsigned char u8;
#define NULL ((void *)0)
/* Types. */
struct ieee80211_vif {
int placeholder;
/* snip */
u8 drv_priv[];
};
struct wfx_dev {
/* snip */
struct ieee80211_vif *vif[2];
/* snip */
};
struct wfx_vif {
struct wfx_dev *wdev;
struct ieee80211_vif *vif;
/* snip */
};
/* Copied from arch/x86/include/asm/barrier.h */
static inline unsigned long array_index_mask_nospec(unsigned long index,
unsigned long size)
{
unsigned long mask;
asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"g"(size),"r" (index)
:"cc");
return mask;
}
/* Simplified from include/linux/kernel.h */
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
/* Simplified from include/linux/nospec.h */
#define array_index_nospec(index, size) \
({ \
typeof(index) _i = (index); \
typeof(size) _s = (size); \
unsigned long _mask = array_index_mask_nospec(_i, _s); \
/* snip */ \
(typeof(_i)) (_i & _mask); \
})
/* Simplified from drivers/staging/wfx/wfx.h */
static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id) {
vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif));
if (!wdev->vif[vif_id]) {
return NULL;
}
return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv;
}
struct ieee80211_vif *test (struct wfx_vif *wvif) {
if (wdev_to_wvif(wvif->wdev, 1))
return wdev_to_wvif(wvif->wdev, 1)->vif; /* { dg-bogus "dereference of NULL" } */
else
return NULL;
}