analyzer: add region::tracked_p to optimize state objects [PR104954]

PR analyzer/104954 tracks that -fanalyzer was taking a very long time
on a particular source file in the Linux kernel:
  drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c

One issue occurs with the repeated use of dynamic debug lines e.g. via
the DC_LOG_BANDWIDTH_CALCS macro, such as in print_bw_calcs_dceip in
drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h:

  DC_LOG_BANDWIDTH_CALCS("#####################################################################");
  DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip");
  DC_LOG_BANDWIDTH_CALCS("#####################################################################");

  [...snip dozens of lines...]

  DC_LOG_BANDWIDTH_CALCS("[bw_fixed] dmif_request_buffer_size: %d",
                         bw_fixed_to_int(dceip->dmif_request_buffer_size));

When this is configured to use __dynamic_pr_debug, each of these becomes
code like:

  do {
    static struct _ddebug __attribute__((__aligned__(8)))
    __attribute__((__section__("__dyndbg"))) __UNIQUE_ID_ddebug277 = {
      [...snip...]
    };
    if (arch_static_branch(&__UNIQUE_ID_ddebug277.key, false))
      __dynamic_pr_debug(&__UNIQUE_ID_ddebug277, [...the message...]);
  } while (0);

The analyzer was naively seeing each call to __dynamic_pr_debug, noting
that the __UNIQUE_ID_nnnn object escapes.  At each call, as successive
__UNIQUE_ID_nnnn object escapes, there are N escaped objects, and thus N
need clobbering, and so we have O(N^2) clobbering of escaped objects overall,
leading to huge amounts of pointless work: print_bw_calcs_data has 225
uses of DC_LOG_BANDWIDTH_CALCS, many of which are in loops.

This patch adds a way to identify declarations that aren't interesting
to the analyzer, so that we don't attempt to create binding_clusters
for them (i.e. we don't store any state for them in our state objects).
This is implemented by adding a new region::tracked_p, implemented for
declarations by walking the existing IPA data the first time the
analyzer sees a declaration, setting it to false for global vars that
have no loads/stores/aliases, and "sufficiently safe" address-of
ipa-refs.

The patch gives a large speedup of -fanalyzer on the above kernel
source file:
                           Before  After
Total cc1 wallclock time:    180s    36s
analyzer wallclock time:     162s    17s
% spent in analyzer:          90%    47%

gcc/analyzer/ChangeLog:
	PR analyzer/104954
	* analyzer.opt (-fdump-analyzer-untracked): New option.
	* engine.cc (impl_run_checkers): Handle it.
	* region-model-asm.cc (region_model::on_asm_stmt): Don't attempt
	to clobber regions with !tracked_p ().
	* region-model-manager.cc (dump_untracked_region): New.
	(region_model_manager::dump_untracked_regions): New.
	(frame_region::dump_untracked_regions): New.
	* region-model.h (region_model_manager::dump_untracked_regions):
	New decl.
	* region.cc (ipa_ref_requires_tracking): New.
	(symnode_requires_tracking_p): New.
	(decl_region::calc_tracked_p): New.
	* region.h (region::tracked_p): New vfunc.
	(frame_region::dump_untracked_regions): New decl.
	(class decl_region): Note that this is also used fo SSA names.
	(decl_region::decl_region): Initialize m_tracked.
	(decl_region::tracked_p): New.
	(decl_region::calc_tracked_p): New decl.
	(decl_region::m_tracked): New.
	* store.cc (store::get_or_create_cluster): Assert that we
	don't try to create clusters for base regions that aren't
	trackable.
	(store::mark_as_escaped): Don't mark base regions that we're not
	tracking.

gcc/ChangeLog:
	PR analyzer/104954
	* doc/invoke.texi (Static Analyzer Options): Add
	-fdump-analyzer-untracked.

gcc/testsuite/ChangeLog:
	PR analyzer/104954
	* gcc.dg/analyzer/asm-x86-dyndbg-1.c: New test.
	* gcc.dg/analyzer/asm-x86-dyndbg-2.c: New test.
	* gcc.dg/analyzer/many-unused-locals.c: New test.
	* gcc.dg/analyzer/untracked-1.c: New test.
	* gcc.dg/analyzer/unused-local-1.c: New test.

Signed-off-by: David Malcolm <dmalcolm@redhat.com>
This commit is contained in:
David Malcolm 2022-03-24 20:58:10 -04:00
parent 319ba7e241
commit 5f6197d7c1
14 changed files with 565 additions and 4 deletions

View file

@ -250,4 +250,8 @@ fdump-analyzer-supergraph
Common RejectNegative Var(flag_dump_analyzer_supergraph)
Dump the analyzer supergraph to a SRCFILE.supergraph.dot file.
fdump-analyzer-untracked
Common RejectNegative Var(flag_dump_analyzer_untracked)
Emit custom warnings with internal details intended for analyzer developers.
; This comment is to ensure we retain the blank line above.

View file

@ -5811,6 +5811,9 @@ impl_run_checkers (logger *logger)
if (flag_dump_analyzer_json)
dump_analyzer_json (sg, eg);
if (flag_dump_analyzer_untracked)
eng.get_model_manager ()->dump_untracked_regions ();
delete purge_map;
}

View file

@ -267,7 +267,8 @@ region_model::on_asm_stmt (const gasm *stmt, region_model_context *ctxt)
iter != reachable_regs.end_mutable_base_regs (); ++iter)
{
const region *base_reg = *iter;
if (base_reg->symbolic_for_unknown_ptr_p ())
if (base_reg->symbolic_for_unknown_ptr_p ()
|| !base_reg->tracked_p ())
continue;
binding_cluster *cluster = m_store.get_or_create_cluster (base_reg);

View file

@ -1740,6 +1740,47 @@ store_manager::log_stats (logger *logger, bool show_objs) const
m_symbolic_binding_key_mgr);
}
/* Emit a warning showing DECL_REG->tracked_p () for use in DejaGnu tests
(using -fdump-analyzer-untracked). */
static void
dump_untracked_region (const decl_region *decl_reg)
{
tree decl = decl_reg->get_decl ();
if (TREE_CODE (decl) != VAR_DECL)
return;
warning_at (DECL_SOURCE_LOCATION (decl), 0,
"track %qD: %s",
decl, (decl_reg->tracked_p () ? "yes" : "no"));
}
/* Implementation of -fdump-analyzer-untracked. */
void
region_model_manager::dump_untracked_regions () const
{
for (auto iter : m_globals_map)
{
const decl_region *decl_reg = iter.second;
dump_untracked_region (decl_reg);
}
for (auto frame_iter : m_frame_regions)
{
const frame_region *frame_reg = frame_iter.second;
frame_reg->dump_untracked_regions ();
}
}
void
frame_region::dump_untracked_regions () const
{
for (auto iter : m_locals)
{
const decl_region *decl_reg = iter.second;
dump_untracked_region (decl_reg);
}
}
} // namespace ana
#endif /* #if ENABLE_ANALYZER */

View file

@ -349,6 +349,8 @@ public:
logger *get_logger () const { return m_logger; }
void dump_untracked_regions () const;
private:
bool too_complex_p (const complexity &c) const;
bool reject_if_too_complex (svalue *sval);

View file

@ -1167,6 +1167,94 @@ decl_region::get_svalue_for_initializer (region_model_manager *mgr) const
return m.get_rvalue (path_var (init, 0), NULL);
}
/* Subroutine of symnode_requires_tracking_p; return true if REF
within CONTEXT_FNDECL might imply that we should be tracking the
value of a decl. */
static bool
ipa_ref_requires_tracking (const ipa_ref *ref, tree context_fndecl)
{
/* If we have a load/store/alias of the symbol, then we'll track
the decl's value. */
if (ref->use != IPA_REF_ADDR)
return true;
if (ref->stmt == NULL)
return true;
switch (ref->stmt->code)
{
default:
return true;
case GIMPLE_CALL:
{
cgraph_node *context_cnode = cgraph_node::get (context_fndecl);
cgraph_edge *edge = context_cnode->get_edge (ref->stmt);
if (!edge)
return true;
if (edge->callee == NULL)
return true; /* e.g. call through function ptr. */
if (edge->callee->definition)
return true;
/* If we get here, then this ref is a pointer passed to
a function we don't have the definition for. */
return false;
}
break;
case GIMPLE_ASM:
{
const gasm *asm_stmt = as_a <const gasm *> (ref->stmt);
if (gimple_asm_noutputs (asm_stmt) > 0)
return true;
if (gimple_asm_nclobbers (asm_stmt) > 0)
return true;
/* If we get here, then this ref is the decl being passed
by pointer to asm with no outputs. */
return false;
}
break;
}
}
/* Determine if the decl for SYMNODE should have binding_clusters
in our state objects; return false to optimize away tracking
certain decls in our state objects, as an optimization. */
static bool
symnode_requires_tracking_p (symtab_node *symnode)
{
gcc_assert (symnode);
if (symnode->externally_visible)
return true;
tree context_fndecl = DECL_CONTEXT (symnode->decl);
if (context_fndecl == NULL)
return true;
if (TREE_CODE (context_fndecl) != FUNCTION_DECL)
return true;
for (auto ref : symnode->ref_list.referring)
if (ipa_ref_requires_tracking (ref, context_fndecl))
return true;
/* If we get here, then we don't have uses of this decl that require
tracking; we never read from it or write to it explicitly. */
return false;
}
/* Subroutine of decl_region ctor: determine whether this decl_region
can have binding_clusters; return false to optimize away tracking
of certain decls in our state objects, as an optimization. */
bool
decl_region::calc_tracked_p (tree decl)
{
/* Precondition of symtab_node::get. */
if (TREE_CODE (decl) == VAR_DECL
&& (TREE_STATIC (decl) || DECL_EXTERNAL (decl) || in_lto_p))
if (symtab_node *symnode = symtab_node::get (decl))
return symnode_requires_tracking_p (symnode);
return true;
}
/* class field_region : public region. */
/* Implementation of region::dump_to_pp vfunc for field_region. */

View file

@ -197,6 +197,11 @@ public:
bool symbolic_for_unknown_ptr_p () const;
/* For most base regions it makes sense to track the bindings of the region
within the store. As an optimization, some are not tracked (to avoid
bloating the store object with redundant binding clusters). */
virtual bool tracked_p () const { return true; }
const complexity &get_complexity () const { return m_complexity; }
bool is_named_decl_p (const char *decl_name) const;
@ -319,6 +324,9 @@ public:
unsigned get_num_locals () const { return m_locals.elements (); }
/* Implemented in region-model-manager.cc. */
void dump_untracked_regions () const;
private:
const frame_region *m_calling_frame;
function *m_fun;
@ -633,13 +641,15 @@ template <> struct default_hash_traits<symbolic_region::key_t>
namespace ana {
/* Concrete region subclass representing the memory occupied by a
variable (whether for a global or a local). */
variable (whether for a global or a local).
Also used for representing SSA names, as if they were locals. */
class decl_region : public region
{
public:
decl_region (unsigned id, const region *parent, tree decl)
: region (complexity (parent), id, parent, TREE_TYPE (decl)), m_decl (decl)
: region (complexity (parent), id, parent, TREE_TYPE (decl)), m_decl (decl),
m_tracked (calc_tracked_p (decl))
{}
enum region_kind get_kind () const FINAL OVERRIDE { return RK_DECL; }
@ -648,6 +658,8 @@ public:
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
bool tracked_p () const FINAL OVERRIDE { return m_tracked; }
tree get_decl () const { return m_decl; }
int get_stack_depth () const;
@ -657,7 +669,15 @@ public:
const svalue *get_svalue_for_initializer (region_model_manager *mgr) const;
private:
static bool calc_tracked_p (tree decl);
tree m_decl;
/* Cached result of calc_tracked_p, so that we can quickly determine when
we don't to track a binding_cluster for this decl (to avoid bloating
store objects).
This can be debugged using -fdump-analyzer-untracked. */
bool m_tracked;
};
} // namespace ana

View file

@ -2654,6 +2654,9 @@ store::get_or_create_cluster (const region *base_reg)
/* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
/* We shouldn't create clusters for base regions that aren't trackable. */
gcc_assert (base_reg->tracked_p ());
if (binding_cluster **slot = m_cluster_map.get (base_reg))
return *slot;
@ -2742,7 +2745,8 @@ store::mark_as_escaped (const region *base_reg)
gcc_assert (base_reg);
gcc_assert (base_reg->get_base_region () == base_reg);
if (base_reg->symbolic_for_unknown_ptr_p ())
if (base_reg->symbolic_for_unknown_ptr_p ()
|| !base_reg->tracked_p ())
return;
binding_cluster *cluster = get_or_create_cluster (base_reg);

View file

@ -439,6 +439,7 @@ Objective-C and Objective-C++ Dialects}.
-fdump-analyzer-state-purge @gol
-fdump-analyzer-stderr @gol
-fdump-analyzer-supergraph @gol
-fdump-analyzer-untracked @gol
-Wno-analyzer-double-fclose @gol
-Wno-analyzer-double-free @gol
-Wno-analyzer-exposure-through-output-file @gol
@ -10212,6 +10213,10 @@ control flow graphs in the program, with interprocedural edges for
calls and returns. The second dump contains annotations showing nodes
in the ``exploded graph'' and diagnostics associated with them.
@item -fdump-analyzer-untracked
@opindex fdump-analyzer-untracked
Emit custom warnings with internal details intended for analyzer developers.
@end table
@node Debugging Options

View file

@ -0,0 +1,126 @@
/* Test reduced from use of dynamic_pr_debug on Linux kernel, to verify that
we treat the static struct _ddebug as not needing to be tracked by the
analyzer, thus optimizing away bloat in the analyzer's state tracking. */
/* { dg-do compile { target x86_64-*-* } } */
/* { dg-additional-options "-fdump-analyzer-untracked" } */
/* Adapted from various files in the Linux kernel, all of which have: */
/* SPDX-License-Identifier: GPL-2.0 */
typedef _Bool bool;
#define true 1
#define false 0
typedef struct {
int counter;
} atomic_t;
/* Adapted from include/linux/compiler_attributes.h */
#define __always_inline inline __attribute__((__always_inline__))
/* Adapted from include/linux/compiler-gcc.h */
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
/* Adapted from include/linux/jump_label.h, which has: */
struct static_key {
atomic_t enabled;
union {
/* [...snip...] */
struct jump_entry *entries;
/* [...snip...] */
};
};
struct static_key_true {
struct static_key key;
};
struct static_key_false {
struct static_key key;
};
extern bool ____wrong_branch_error(void);
/* Adapted from arch/x86/include/asm/jump_label.h */
#define JUMP_TABLE_ENTRY \
".pushsection __jump_table, \"aw\" \n\t" \
/*_ASM_ALIGN*/ "\n\t" \
".long 1b - . \n\t" \
".long %l[l_yes] - . \n\t" \
/*_ASM_PTR*/ "%c0 + %c1 - .\n\t" \
".popsection \n\t"
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
/*".byte " __stringify(BYTES_NOP5) "\n\t" */
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
"jmp %l[l_yes]\n\t"
JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
l_yes:
return true;
}
/* Adapted from include/linux/dynamic_debug.h */
struct _ddebug {
/* [...snip...] */
const char *function;
const char *filename;
const char *format;
unsigned int lineno:18;
/* [...snip...] */
unsigned int flags:8;
union {
struct static_key_true dd_key_true;
struct static_key_false dd_key_false;
} key;
} __attribute__((aligned(8)));
extern void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
static void expanded_dynamic_pr_debug(void) {
do {
static struct _ddebug __attribute__((__aligned__(8)))
__attribute__((__section__("__dyndbg"))) __UNIQUE_ID_ddebug277 = { /* { dg-warning "track '__UNIQUE_ID_ddebug277': no" } */
.function = __func__,
.filename = __FILE__,
.format = ("hello world"),
.lineno = __LINE__,
.flags = 0};
if (({
bool branch;
if (__builtin_types_compatible_p(
typeof(*&__UNIQUE_ID_ddebug277.key.dd_key_false),
struct static_key_true))
branch = arch_static_branch_jump(
&(&__UNIQUE_ID_ddebug277.key.dd_key_false)->key, false);
else if (__builtin_types_compatible_p(
typeof(*&__UNIQUE_ID_ddebug277.key.dd_key_false),
struct static_key_false))
branch = arch_static_branch(
&(&__UNIQUE_ID_ddebug277.key.dd_key_false)->key, false);
else
branch = ____wrong_branch_error();
__builtin_expect(!!(branch), 0);
}))
__dynamic_pr_debug(&__UNIQUE_ID_ddebug277,
"hello world");
} while (0);
}

View file

@ -0,0 +1,77 @@
/* Test reduced from use of dynamic_pr_debug on Linux kernel, to verify that
we treat the static struct _ddebug as not needing to be tracked by the
analyzer, thus optimizing away bloat in the analyzer's state tracking. */
/* { dg-do compile { target x86_64-*-* } } */
/* { dg-additional-options "-fdump-analyzer-untracked" } */
/* Adapted from various files in the Linux kernel, all of which have: */
/* SPDX-License-Identifier: GPL-2.0 */
typedef _Bool bool;
#define true 1
#define false 0
typedef struct {} atomic_t;
/* Adapted from include/linux/compiler_attributes.h */
#define __always_inline inline __attribute__((__always_inline__))
/* Adapted from include/linux/compiler-gcc.h */
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
/* Adapted from include/linux/jump_label.h, which has: */
struct static_key {};
/* Adapted from arch/x86/include/asm/jump_label.h */
static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
: : "i" (key), "i" (branch) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
: : "i" (key), "i" (branch) : : l_yes);
return false;
l_yes:
return true;
}
/* Adapted from include/linux/dynamic_debug.h */
struct _ddebug {
/* [...snip...] */
const char *function;
const char *filename;
const char *format;
unsigned int lineno:18;
/* [...snip...] */
unsigned int flags:8;
struct static_key key;
} __attribute__((aligned(8)));
extern void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
static void expanded_dynamic_pr_debug(void) {
do {
static struct _ddebug __attribute__((__aligned__(8)))
__attribute__((__section__("__dyndbg"))) __UNIQUE_ID_ddebug277 = { /* { dg-warning "track '__UNIQUE_ID_ddebug277': no" } */
.function = __func__,
.filename = __FILE__,
.format = ("hello world"),
.lineno = __LINE__,
.flags = 0};
if (arch_static_branch(&__UNIQUE_ID_ddebug277.key, false))
__dynamic_pr_debug(&__UNIQUE_ID_ddebug277,
"hello world");
} while (0);
}

View file

@ -0,0 +1,69 @@
struct st
{
const char *m_filename;
int m_line;
const char *m_function;
};
extern void debug (struct st *);
#define TEST_x_1(NAME) \
do \
{ \
static struct st NAME = { __FILE__, __LINE__, __func__ }; \
debug (&NAME); \
} \
while (0)
#define TEST_x_10(PREFIX) \
do \
{ \
TEST_x_1(PREFIX ## _1); \
TEST_x_1(PREFIX ## _2); \
TEST_x_1(PREFIX ## _3); \
TEST_x_1(PREFIX ## _4); \
TEST_x_1(PREFIX ## _5); \
TEST_x_1(PREFIX ## _6); \
TEST_x_1(PREFIX ## _7); \
TEST_x_1(PREFIX ## _8); \
TEST_x_1(PREFIX ## _9); \
TEST_x_1(PREFIX ## _10); \
} \
while(0)
#define TEST_x_100(PREFIX) \
do \
{ \
TEST_x_10(PREFIX ## _1); \
TEST_x_10(PREFIX ## _2); \
TEST_x_10(PREFIX ## _3); \
TEST_x_10(PREFIX ## _4); \
TEST_x_10(PREFIX ## _5); \
TEST_x_10(PREFIX ## _6); \
TEST_x_10(PREFIX ## _7); \
TEST_x_10(PREFIX ## _8); \
TEST_x_10(PREFIX ## _9); \
TEST_x_10(PREFIX ## _10); \
} \
while(0)
#define TEST_x_1000(PREFIX) \
do \
{ \
TEST_x_100(PREFIX ## _1); \
TEST_x_100(PREFIX ## _2); \
TEST_x_100(PREFIX ## _3); \
TEST_x_100(PREFIX ## _4); \
TEST_x_100(PREFIX ## _5); \
TEST_x_100(PREFIX ## _6); \
TEST_x_100(PREFIX ## _7); \
TEST_x_100(PREFIX ## _8); \
TEST_x_100(PREFIX ## _9); \
TEST_x_100(PREFIX ## _10); \
} \
while(0)
void test_many (void)
{
TEST_x_1000(s);
}

View file

@ -0,0 +1,99 @@
/* { dg-additional-options "-fdump-analyzer-untracked" } */
struct st
{
const char *m_filename;
int m_line;
};
typedef struct boxed_int { int value; } boxed_int;
extern void extern_fn (struct st *);
static void __attribute__((noinline)) internal_fn (struct st *) {}
extern int extern_get_int (void);
void test_0 (void)
{
/* Not ever referenced; will get optimized away before
analyzer ever sees it, so no message. */
static struct st s1 = { __FILE__, __LINE__ };
}
void test_1 (void)
{
static struct st s1 = { __FILE__, __LINE__ }; /* { dg-warning "track 's1': no" } */
extern_fn (&s1);
}
static struct st s2 = { __FILE__, __LINE__ }; /* { dg-warning "track 's2': yes" } */
void test_2 (void)
{
extern_fn (&s2);
}
void test_3 (void)
{
struct st s3 = { __FILE__, __LINE__ }; /* { dg-warning "track 's3': yes" } */
extern_fn (&s3);
}
extern void called_by_test_4 (int *);
int test_4 (void)
{
int i; /* { dg-warning "track 'i': yes" } */
called_by_test_4 (&i);
return i;
}
void test_5 (int i)
{
boxed_int bi5 = { i }; /* { dg-warning "track 'bi5': yes" } */
}
int test_6 (int i)
{
static boxed_int bi6; /* { dg-warning "track 'bi6': yes" } */
bi6.value = i;
return bi6.value;
}
int test_7 (void)
{
boxed_int bi7; /* { dg-warning "track 'bi7': yes" } */
return bi7.value; /* { dg-warning "use of uninitialized value 'bi7.value'" "uninit" } */
}
void test_8 (void)
{
static struct st s8 = { __FILE__, __LINE__ }; /* { dg-warning "track 's8': no" } */
extern_fn (&s8);
extern_fn (&s8);
}
void test_9 (void)
{
static struct st s9 = { __FILE__, __LINE__ }; /* { dg-warning "track 's9': yes" } */
internal_fn (&s9);
}
int test_10 (void)
{
static struct st s10 = { __FILE__, __LINE__ }; /* { dg-warning "track 's10': yes" } */
extern_fn (&s10);
return s10.m_line;
}
int test_11 (void)
{
static struct st s10 = { __FILE__, __LINE__ }; /* { dg-warning "track 's10': yes" } */
s10.m_line = extern_get_int ();
return 42;
}
int test_12 (void (*fnptr) (struct st *))
{
static struct st s12 = { __FILE__, __LINE__ }; /* { dg-warning "track 's12': yes" } */
fnptr (&s12);
}

View file

@ -0,0 +1,22 @@
/* { dg-additional-options "-fdump-analyzer-untracked" } */
struct st
{
const char *m_filename;
int m_line;
const char *m_function;
};
extern void debug (struct st *);
void test (void)
{
{
static struct st s1 = { __FILE__, __LINE__, __func__ }; /* { dg-warning "track 's1': no" } */
debug (&s1);
}
{
static struct st s2 = { __FILE__, __LINE__, __func__ }; /* { dg-warning "track 's2': no" } */
debug (&s2);
}
}