libsanitizer merge from upstream r250806.
libsanitizer/ 2015-10-20 Maxim Ostapenko <m.ostapenko@partner.samsung.com> * All source files: Merge from upstream r250806. * configure.ac (link_sanitizer_common): Add -lrt flag. * configure.tgt: Enable TSAN and LSAN for aarch64-linux targets. Set CXX_ABI_NEEDED=true for darwin. * asan/Makefile.am (asan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0. * asan/Makefile.in: Regenerate. * ubsan/Makefile.am (ubsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=1. (libubsan_la_LIBADD): Add -lc++abi if CXX_ABI_NEEDED is true. * ubsan/Makefile.in: Regenerate. * tsan/Makefile.am (tsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0. * tsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. * sanitizer_common/Makefile.in: Regenerate. * asan/libtool-version: Bump the libasan SONAME. From-SVN: r229111
This commit is contained in:
parent
013a8899f5
commit
696d846a56
218 changed files with 12360 additions and 6496 deletions
|
@ -1,3 +1,24 @@
|
|||
2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
|
||||
|
||||
* All source files: Merge from upstream r250806.
|
||||
* configure.ac (link_sanitizer_common): Add -lrt flag.
|
||||
* configure.tgt: Enable TSAN and LSAN for aarch64-linux targets.
|
||||
Set USE_CXX_ABI_FLAG=true for darwin.
|
||||
* asan/Makefile.am (asan_files): Add new files.
|
||||
(DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy
|
||||
DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0.
|
||||
* asan/Makefile.in: Regenerate.
|
||||
* ubsan/Makefile.am (ubsan_files): Add new files.
|
||||
(DEFS): Add DCAN_SANITIZE_UB=1.
|
||||
(libubsan_la_LIBADD): Add -lc++abi if USE_CXX_ABI_FLAG is true.
|
||||
* ubsan/Makefile.in: Regenerate.
|
||||
* tsan/Makefile.am (tsan_files): Add new files.
|
||||
(DEFS): Add DCAN_SANITIZE_UB=0.
|
||||
* tsan/Makefile.in: Regenerate.
|
||||
* sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
|
||||
* sanitizer_common/Makefile.in: Regenerate.
|
||||
* asan/libtool-version: Bump the libasan SONAME.
|
||||
|
||||
2015-09-09 Markus Trippelsdorf <markus@trippelsdorf.de>
|
||||
|
||||
PR sanitizer/67258
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
221802
|
||||
250806
|
||||
|
||||
The first line of this file holds the svn revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
|
|
@ -3,7 +3,7 @@ AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir)
|
|||
# May be used by toolexeclibdir.
|
||||
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
|
||||
|
||||
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 -DASAN_NEEDS_SEGV=1
|
||||
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 -DCAN_SANITIZE_UB=0
|
||||
if USING_MAC_INTERPOSE
|
||||
DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT
|
||||
endif
|
||||
|
@ -17,9 +17,10 @@ nodist_toolexeclib_HEADERS = libasan_preinit.o
|
|||
|
||||
asan_files = \
|
||||
asan_activation.cc \
|
||||
asan_allocator2.cc \
|
||||
asan_allocator.cc \
|
||||
asan_debugging.cc \
|
||||
asan_fake_stack.cc \
|
||||
asan_flags.cc \
|
||||
asan_globals.cc \
|
||||
asan_interceptors.cc \
|
||||
asan_linux.cc \
|
||||
|
@ -34,6 +35,7 @@ asan_files = \
|
|||
asan_rtl.cc \
|
||||
asan_stack.cc \
|
||||
asan_stats.cc \
|
||||
asan_suppressions.cc \
|
||||
asan_thread.cc \
|
||||
asan_win.cc \
|
||||
asan_win_dll_thunk.cc \
|
||||
|
|
|
@ -111,14 +111,14 @@ libasan_la_DEPENDENCIES = \
|
|||
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
|
||||
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
|
||||
$(am__append_3) $(am__DEPENDENCIES_1)
|
||||
am__objects_1 = asan_activation.lo asan_allocator2.lo \
|
||||
asan_debugging.lo asan_fake_stack.lo asan_globals.lo \
|
||||
am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
|
||||
asan_fake_stack.lo asan_flags.lo asan_globals.lo \
|
||||
asan_interceptors.lo asan_linux.lo asan_mac.lo \
|
||||
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
|
||||
asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
|
||||
asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
|
||||
asan_thread.lo asan_win.lo asan_win_dll_thunk.lo \
|
||||
asan_win_dynamic_runtime_thunk.lo
|
||||
asan_suppressions.lo asan_thread.lo asan_win.lo \
|
||||
asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo
|
||||
am_libasan_la_OBJECTS = $(am__objects_1)
|
||||
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
|
||||
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
|
||||
|
@ -172,8 +172,8 @@ CXXFLAGS = @CXXFLAGS@
|
|||
CYGPATH_W = @CYGPATH_W@
|
||||
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS \
|
||||
-D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS \
|
||||
-DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 \
|
||||
-DASAN_NEEDS_SEGV=1 $(am__append_1)
|
||||
-DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 \
|
||||
-DCAN_SANITIZE_UB=0 $(am__append_1)
|
||||
DEPDIR = @DEPDIR@
|
||||
DSYMUTIL = @DSYMUTIL@
|
||||
DUMPBIN = @DUMPBIN@
|
||||
|
@ -306,9 +306,10 @@ toolexeclib_LTLIBRARIES = libasan.la
|
|||
nodist_toolexeclib_HEADERS = libasan_preinit.o
|
||||
asan_files = \
|
||||
asan_activation.cc \
|
||||
asan_allocator2.cc \
|
||||
asan_allocator.cc \
|
||||
asan_debugging.cc \
|
||||
asan_fake_stack.cc \
|
||||
asan_flags.cc \
|
||||
asan_globals.cc \
|
||||
asan_interceptors.cc \
|
||||
asan_linux.cc \
|
||||
|
@ -323,6 +324,7 @@ asan_files = \
|
|||
asan_rtl.cc \
|
||||
asan_stack.cc \
|
||||
asan_stats.cc \
|
||||
asan_suppressions.cc \
|
||||
asan_thread.cc \
|
||||
asan_win.cc \
|
||||
asan_win_dll_thunk.cc \
|
||||
|
@ -450,9 +452,10 @@ distclean-compile:
|
|||
-rm -f *.tab.c
|
||||
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_linux.Plo@am__quote@
|
||||
|
@ -467,6 +470,7 @@ distclean-compile:
|
|||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_suppressions.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@
|
||||
|
|
|
@ -14,32 +14,106 @@
|
|||
#include "asan_allocator.h"
|
||||
#include "asan_flags.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static struct AsanDeactivatedFlags {
|
||||
int quarantine_size;
|
||||
int max_redzone;
|
||||
AllocatorOptions allocator_options;
|
||||
int malloc_context_size;
|
||||
bool poison_heap;
|
||||
bool coverage;
|
||||
const char *coverage_dir;
|
||||
|
||||
void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
|
||||
#define ASAN_ACTIVATION_FLAG(Type, Name) \
|
||||
RegisterFlag(parser, #Name, "", &f->Name);
|
||||
#define COMMON_ACTIVATION_FLAG(Type, Name) \
|
||||
RegisterFlag(parser, #Name, "", &cf->Name);
|
||||
#include "asan_activation_flags.inc"
|
||||
#undef ASAN_ACTIVATION_FLAG
|
||||
#undef COMMON_ACTIVATION_FLAG
|
||||
|
||||
RegisterIncludeFlags(parser, cf);
|
||||
}
|
||||
|
||||
void OverrideFromActivationFlags() {
|
||||
Flags f;
|
||||
CommonFlags cf;
|
||||
FlagParser parser;
|
||||
RegisterActivationFlags(&parser, &f, &cf);
|
||||
|
||||
// Copy the current activation flags.
|
||||
allocator_options.CopyTo(&f, &cf);
|
||||
cf.malloc_context_size = malloc_context_size;
|
||||
f.poison_heap = poison_heap;
|
||||
cf.coverage = coverage;
|
||||
cf.coverage_dir = coverage_dir;
|
||||
cf.verbosity = Verbosity();
|
||||
cf.help = false; // this is activation-specific help
|
||||
|
||||
// Check if activation flags need to be overriden.
|
||||
if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
|
||||
parser.ParseString(env);
|
||||
}
|
||||
|
||||
// Override from getprop asan.options.
|
||||
char buf[100];
|
||||
GetExtraActivationFlags(buf, sizeof(buf));
|
||||
parser.ParseString(buf);
|
||||
|
||||
SetVerbosity(cf.verbosity);
|
||||
|
||||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
|
||||
if (cf.help) parser.PrintFlagDescriptions();
|
||||
|
||||
allocator_options.SetFrom(&f, &cf);
|
||||
malloc_context_size = cf.malloc_context_size;
|
||||
poison_heap = f.poison_heap;
|
||||
coverage = cf.coverage;
|
||||
coverage_dir = cf.coverage_dir;
|
||||
}
|
||||
|
||||
void Print() {
|
||||
Report(
|
||||
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
|
||||
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
|
||||
"allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
|
||||
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
|
||||
poison_heap, malloc_context_size,
|
||||
allocator_options.alloc_dealloc_mismatch,
|
||||
allocator_options.may_return_null, coverage, coverage_dir);
|
||||
}
|
||||
} asan_deactivated_flags;
|
||||
|
||||
static bool asan_is_deactivated;
|
||||
|
||||
void AsanStartDeactivated() {
|
||||
void AsanDeactivate() {
|
||||
CHECK(!asan_is_deactivated);
|
||||
VReport(1, "Deactivating ASan\n");
|
||||
// Save flag values.
|
||||
asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
|
||||
asan_deactivated_flags.max_redzone = flags()->max_redzone;
|
||||
asan_deactivated_flags.poison_heap = flags()->poison_heap;
|
||||
asan_deactivated_flags.malloc_context_size =
|
||||
common_flags()->malloc_context_size;
|
||||
|
||||
flags()->quarantine_size = 0;
|
||||
flags()->max_redzone = 16;
|
||||
flags()->poison_heap = false;
|
||||
common_flags()->malloc_context_size = 0;
|
||||
// Stash runtime state.
|
||||
GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
|
||||
asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
|
||||
asan_deactivated_flags.poison_heap = CanPoisonMemory();
|
||||
asan_deactivated_flags.coverage = common_flags()->coverage;
|
||||
asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
|
||||
|
||||
// Deactivate the runtime.
|
||||
SetCanPoisonMemory(false);
|
||||
SetMallocContextSize(1);
|
||||
ReInitializeCoverage(false, nullptr);
|
||||
|
||||
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
|
||||
disabled.quarantine_size_mb = 0;
|
||||
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
|
||||
disabled.max_redzone = 16;
|
||||
disabled.alloc_dealloc_mismatch = false;
|
||||
disabled.may_return_null = true;
|
||||
ReInitializeAllocator(disabled);
|
||||
|
||||
asan_is_deactivated = true;
|
||||
}
|
||||
|
@ -48,25 +122,21 @@ void AsanActivate() {
|
|||
if (!asan_is_deactivated) return;
|
||||
VReport(1, "Activating ASan\n");
|
||||
|
||||
// Restore flag values.
|
||||
// FIXME: this is not atomic, and there may be other threads alive.
|
||||
flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
|
||||
flags()->max_redzone = asan_deactivated_flags.max_redzone;
|
||||
flags()->poison_heap = asan_deactivated_flags.poison_heap;
|
||||
common_flags()->malloc_context_size =
|
||||
asan_deactivated_flags.malloc_context_size;
|
||||
UpdateProcessName();
|
||||
|
||||
ParseExtraActivationFlags();
|
||||
asan_deactivated_flags.OverrideFromActivationFlags();
|
||||
|
||||
ReInitializeAllocator();
|
||||
SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
|
||||
SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
|
||||
ReInitializeCoverage(asan_deactivated_flags.coverage,
|
||||
asan_deactivated_flags.coverage_dir);
|
||||
ReInitializeAllocator(asan_deactivated_flags.allocator_options);
|
||||
|
||||
asan_is_deactivated = false;
|
||||
VReport(
|
||||
1,
|
||||
"quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size "
|
||||
"%d\n",
|
||||
flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
|
||||
common_flags()->malloc_context_size);
|
||||
if (Verbosity()) {
|
||||
Report("Activated with flags:\n");
|
||||
asan_deactivated_flags.Print();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#define ASAN_ACTIVATION_H
|
||||
|
||||
namespace __asan {
|
||||
void AsanStartDeactivated();
|
||||
void AsanDeactivate();
|
||||
void AsanActivate();
|
||||
} // namespace __asan
|
||||
|
||||
|
|
33
libsanitizer/asan/asan_activation_flags.inc
Normal file
33
libsanitizer/asan/asan_activation_flags.inc
Normal file
|
@ -0,0 +1,33 @@
|
|||
//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// A subset of ASan (and common) runtime flags supported at activation time.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef ASAN_ACTIVATION_FLAG
|
||||
# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_ACTIVATION_FLAG
|
||||
# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
|
||||
#endif
|
||||
|
||||
// ASAN_ACTIVATION_FLAG(Type, Name)
|
||||
// See COMMON_FLAG in sanitizer_flags.inc for more details.
|
||||
|
||||
ASAN_ACTIVATION_FLAG(int, redzone)
|
||||
ASAN_ACTIVATION_FLAG(int, max_redzone)
|
||||
ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
|
||||
ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
|
||||
ASAN_ACTIVATION_FLAG(bool, poison_heap)
|
||||
|
||||
COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
|
||||
COMMON_ACTIVATION_FLAG(int, malloc_context_size)
|
||||
COMMON_ACTIVATION_FLAG(bool, coverage)
|
||||
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
|
||||
COMMON_ACTIVATION_FLAG(int, verbosity)
|
||||
COMMON_ACTIVATION_FLAG(bool, help)
|
906
libsanitizer/asan/asan_allocator.cc
Normal file
906
libsanitizer/asan/asan_allocator.cc
Normal file
|
@ -0,0 +1,906 @@
|
|||
//===-- asan_allocator.cc -------------------------------------------------===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Implementation of ASan's memory allocator, 2-nd version.
|
||||
// This variant uses the allocator from sanitizer_common, i.e. the one shared
|
||||
// with ThreadSanitizer and MemorySanitizer.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_list.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_quarantine.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
|
||||
// We use adaptive redzones: for larger allocation larger redzones are used.
|
||||
static u32 RZLog2Size(u32 rz_log) {
|
||||
CHECK_LT(rz_log, 8);
|
||||
return 16 << rz_log;
|
||||
}
|
||||
|
||||
static u32 RZSize2Log(u32 rz_size) {
|
||||
CHECK_GE(rz_size, 16);
|
||||
CHECK_LE(rz_size, 2048);
|
||||
CHECK(IsPowerOfTwo(rz_size));
|
||||
u32 res = Log2(rz_size) - 4;
|
||||
CHECK_EQ(rz_size, RZLog2Size(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
static AsanAllocator &get_allocator();
|
||||
|
||||
// The memory chunk allocated from the underlying allocator looks like this:
|
||||
// L L L L L L H H U U U U U U R R
|
||||
// L -- left redzone words (0 or more bytes)
|
||||
// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
|
||||
// U -- user memory.
|
||||
// R -- right redzone (0 or more bytes)
|
||||
// ChunkBase consists of ChunkHeader and other bytes that overlap with user
|
||||
// memory.
|
||||
|
||||
// If the left redzone is greater than the ChunkHeader size we store a magic
|
||||
// value in the first uptr word of the memory block and store the address of
|
||||
// ChunkBase in the next uptr.
|
||||
// M B L L L L L L L L L H H U U U U U U
|
||||
// | ^
|
||||
// ---------------------|
|
||||
// M -- magic value kAllocBegMagic
|
||||
// B -- address of ChunkHeader pointing to the first 'H'
|
||||
static const uptr kAllocBegMagic = 0xCC6E96B9;
|
||||
|
||||
struct ChunkHeader {
|
||||
// 1-st 8 bytes.
|
||||
u32 chunk_state : 8; // Must be first.
|
||||
u32 alloc_tid : 24;
|
||||
|
||||
u32 free_tid : 24;
|
||||
u32 from_memalign : 1;
|
||||
u32 alloc_type : 2;
|
||||
u32 rz_log : 3;
|
||||
u32 lsan_tag : 2;
|
||||
// 2-nd 8 bytes
|
||||
// This field is used for small sizes. For large sizes it is equal to
|
||||
// SizeClassMap::kMaxSize and the actual size is stored in the
|
||||
// SecondaryAllocator's metadata.
|
||||
u32 user_requested_size;
|
||||
u32 alloc_context_id;
|
||||
};
|
||||
|
||||
struct ChunkBase : ChunkHeader {
|
||||
// Header2, intersects with user memory.
|
||||
u32 free_context_id;
|
||||
};
|
||||
|
||||
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
|
||||
static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
|
||||
COMPILER_CHECK(kChunkHeaderSize == 16);
|
||||
COMPILER_CHECK(kChunkHeader2Size <= 16);
|
||||
|
||||
// Every chunk of memory allocated by this allocator can be in one of 3 states:
|
||||
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
|
||||
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
|
||||
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
|
||||
enum {
|
||||
CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
|
||||
CHUNK_ALLOCATED = 2,
|
||||
CHUNK_QUARANTINE = 3
|
||||
};
|
||||
|
||||
struct AsanChunk: ChunkBase {
|
||||
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
|
||||
uptr UsedSize(bool locked_version = false) {
|
||||
if (user_requested_size != SizeClassMap::kMaxSize)
|
||||
return user_requested_size;
|
||||
return *reinterpret_cast<uptr *>(
|
||||
get_allocator().GetMetaData(AllocBeg(locked_version)));
|
||||
}
|
||||
void *AllocBeg(bool locked_version = false) {
|
||||
if (from_memalign) {
|
||||
if (locked_version)
|
||||
return get_allocator().GetBlockBeginFastLocked(
|
||||
reinterpret_cast<void *>(this));
|
||||
return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
|
||||
}
|
||||
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
|
||||
}
|
||||
bool AddrIsInside(uptr addr, bool locked_version = false) {
|
||||
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
|
||||
}
|
||||
};
|
||||
|
||||
struct QuarantineCallback {
|
||||
explicit QuarantineCallback(AllocatorCache *cache)
|
||||
: cache_(cache) {
|
||||
}
|
||||
|
||||
void Recycle(AsanChunk *m) {
|
||||
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
|
||||
atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
|
||||
CHECK_NE(m->alloc_tid, kInvalidTid);
|
||||
CHECK_NE(m->free_tid, kInvalidTid);
|
||||
PoisonShadow(m->Beg(),
|
||||
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||
kAsanHeapLeftRedzoneMagic);
|
||||
void *p = reinterpret_cast<void *>(m->AllocBeg());
|
||||
if (p != m) {
|
||||
uptr *alloc_magic = reinterpret_cast<uptr *>(p);
|
||||
CHECK_EQ(alloc_magic[0], kAllocBegMagic);
|
||||
// Clear the magic value, as allocator internals may overwrite the
|
||||
// contents of deallocated chunk, confusing GetAsanChunk lookup.
|
||||
alloc_magic[0] = 0;
|
||||
CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
|
||||
}
|
||||
|
||||
// Statistics.
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.real_frees++;
|
||||
thread_stats.really_freed += m->UsedSize();
|
||||
|
||||
get_allocator().Deallocate(cache_, p);
|
||||
}
|
||||
|
||||
void *Allocate(uptr size) {
|
||||
return get_allocator().Allocate(cache_, size, 1, false);
|
||||
}
|
||||
|
||||
void Deallocate(void *p) {
|
||||
get_allocator().Deallocate(cache_, p);
|
||||
}
|
||||
|
||||
AllocatorCache *cache_;
|
||||
};
|
||||
|
||||
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
|
||||
typedef AsanQuarantine::Cache QuarantineCache;
|
||||
|
||||
void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
|
||||
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
|
||||
// Statistics.
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.mmaps++;
|
||||
thread_stats.mmaped += size;
|
||||
}
|
||||
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
|
||||
PoisonShadow(p, size, 0);
|
||||
// We are about to unmap a chunk of user memory.
|
||||
// Mark the corresponding shadow memory as not needed.
|
||||
FlushUnneededASanShadowMemory(p, size);
|
||||
// Statistics.
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.munmaps++;
|
||||
thread_stats.munmaped += size;
|
||||
}
|
||||
|
||||
// We can not use THREADLOCAL because it is not supported on some of the
|
||||
// platforms we care about (OSX 10.6, Android).
|
||||
// static THREADLOCAL AllocatorCache cache;
|
||||
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
|
||||
CHECK(ms);
|
||||
return &ms->allocator_cache;
|
||||
}
|
||||
|
||||
QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
|
||||
CHECK(ms);
|
||||
CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
|
||||
return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
|
||||
}
|
||||
|
||||
void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
|
||||
quarantine_size_mb = f->quarantine_size_mb;
|
||||
min_redzone = f->redzone;
|
||||
max_redzone = f->max_redzone;
|
||||
may_return_null = cf->allocator_may_return_null;
|
||||
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
|
||||
}
|
||||
|
||||
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
|
||||
f->quarantine_size_mb = quarantine_size_mb;
|
||||
f->redzone = min_redzone;
|
||||
f->max_redzone = max_redzone;
|
||||
cf->allocator_may_return_null = may_return_null;
|
||||
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
|
||||
}
|
||||
|
||||
struct Allocator {
|
||||
static const uptr kMaxAllowedMallocSize =
|
||||
FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
|
||||
static const uptr kMaxThreadLocalQuarantine =
|
||||
FIRST_32_SECOND_64(1 << 18, 1 << 20);
|
||||
|
||||
AsanAllocator allocator;
|
||||
AsanQuarantine quarantine;
|
||||
StaticSpinMutex fallback_mutex;
|
||||
AllocatorCache fallback_allocator_cache;
|
||||
QuarantineCache fallback_quarantine_cache;
|
||||
|
||||
// ------------------- Options --------------------------
|
||||
atomic_uint16_t min_redzone;
|
||||
atomic_uint16_t max_redzone;
|
||||
atomic_uint8_t alloc_dealloc_mismatch;
|
||||
|
||||
// ------------------- Initialization ------------------------
|
||||
explicit Allocator(LinkerInitialized)
|
||||
: quarantine(LINKER_INITIALIZED),
|
||||
fallback_quarantine_cache(LINKER_INITIALIZED) {}
|
||||
|
||||
void CheckOptions(const AllocatorOptions &options) const {
|
||||
CHECK_GE(options.min_redzone, 16);
|
||||
CHECK_GE(options.max_redzone, options.min_redzone);
|
||||
CHECK_LE(options.max_redzone, 2048);
|
||||
CHECK(IsPowerOfTwo(options.min_redzone));
|
||||
CHECK(IsPowerOfTwo(options.max_redzone));
|
||||
}
|
||||
|
||||
void SharedInitCode(const AllocatorOptions &options) {
|
||||
CheckOptions(options);
|
||||
quarantine.Init((uptr)options.quarantine_size_mb << 20,
|
||||
kMaxThreadLocalQuarantine);
|
||||
atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
|
||||
memory_order_release);
|
||||
atomic_store(&min_redzone, options.min_redzone, memory_order_release);
|
||||
atomic_store(&max_redzone, options.max_redzone, memory_order_release);
|
||||
}
|
||||
|
||||
void Initialize(const AllocatorOptions &options) {
|
||||
allocator.Init(options.may_return_null);
|
||||
SharedInitCode(options);
|
||||
}
|
||||
|
||||
void ReInitialize(const AllocatorOptions &options) {
|
||||
allocator.SetMayReturnNull(options.may_return_null);
|
||||
SharedInitCode(options);
|
||||
}
|
||||
|
||||
void GetOptions(AllocatorOptions *options) const {
|
||||
options->quarantine_size_mb = quarantine.GetSize() >> 20;
|
||||
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
|
||||
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
|
||||
options->may_return_null = allocator.MayReturnNull();
|
||||
options->alloc_dealloc_mismatch =
|
||||
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
|
||||
}
|
||||
|
||||
// -------------------- Helper methods. -------------------------
|
||||
uptr ComputeRZLog(uptr user_requested_size) {
|
||||
u32 rz_log =
|
||||
user_requested_size <= 64 - 16 ? 0 :
|
||||
user_requested_size <= 128 - 32 ? 1 :
|
||||
user_requested_size <= 512 - 64 ? 2 :
|
||||
user_requested_size <= 4096 - 128 ? 3 :
|
||||
user_requested_size <= (1 << 14) - 256 ? 4 :
|
||||
user_requested_size <= (1 << 15) - 512 ? 5 :
|
||||
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
|
||||
u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
|
||||
u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
|
||||
return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
|
||||
}
|
||||
|
||||
// We have an address between two chunks, and we want to report just one.
|
||||
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
|
||||
AsanChunk *right_chunk) {
|
||||
// Prefer an allocated chunk over freed chunk and freed chunk
|
||||
// over available chunk.
|
||||
if (left_chunk->chunk_state != right_chunk->chunk_state) {
|
||||
if (left_chunk->chunk_state == CHUNK_ALLOCATED)
|
||||
return left_chunk;
|
||||
if (right_chunk->chunk_state == CHUNK_ALLOCATED)
|
||||
return right_chunk;
|
||||
if (left_chunk->chunk_state == CHUNK_QUARANTINE)
|
||||
return left_chunk;
|
||||
if (right_chunk->chunk_state == CHUNK_QUARANTINE)
|
||||
return right_chunk;
|
||||
}
|
||||
// Same chunk_state: choose based on offset.
|
||||
sptr l_offset = 0, r_offset = 0;
|
||||
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
|
||||
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
|
||||
if (l_offset < r_offset)
|
||||
return left_chunk;
|
||||
return right_chunk;
|
||||
}
|
||||
|
||||
// -------------------- Allocation/Deallocation routines ---------------
|
||||
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
|
||||
AllocType alloc_type, bool can_fill) {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
AsanInitFromRtl();
|
||||
Flags &fl = *flags();
|
||||
CHECK(stack);
|
||||
const uptr min_alignment = SHADOW_GRANULARITY;
|
||||
if (alignment < min_alignment)
|
||||
alignment = min_alignment;
|
||||
if (size == 0) {
|
||||
// We'd be happy to avoid allocating memory for zero-size requests, but
|
||||
// some programs/tests depend on this behavior and assume that malloc
|
||||
// would not return NULL even for zero-size allocations. Moreover, it
|
||||
// looks like operator new should never return NULL, and results of
|
||||
// consecutive "new" calls must be different even if the allocated size
|
||||
// is zero.
|
||||
size = 1;
|
||||
}
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr rz_log = ComputeRZLog(size);
|
||||
uptr rz_size = RZLog2Size(rz_log);
|
||||
uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
|
||||
uptr needed_size = rounded_size + rz_size;
|
||||
if (alignment > min_alignment)
|
||||
needed_size += alignment;
|
||||
bool using_primary_allocator = true;
|
||||
// If we are allocating from the secondary allocator, there will be no
|
||||
// automatic right redzone, so add the right redzone manually.
|
||||
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
|
||||
needed_size += rz_size;
|
||||
using_primary_allocator = false;
|
||||
}
|
||||
CHECK(IsAligned(needed_size, min_alignment));
|
||||
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
|
||||
(void*)size);
|
||||
return allocator.ReturnNullOrDie();
|
||||
}
|
||||
|
||||
AsanThread *t = GetCurrentThread();
|
||||
void *allocated;
|
||||
bool check_rss_limit = true;
|
||||
if (t) {
|
||||
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
|
||||
allocated =
|
||||
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
allocated =
|
||||
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
|
||||
}
|
||||
|
||||
if (!allocated)
|
||||
return allocator.ReturnNullOrDie();
|
||||
|
||||
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
|
||||
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
||||
// chunk. This is possible if CanPoisonMemory() was false for some
|
||||
// time, for example, due to flags()->start_disabled.
|
||||
// Anyway, poison the block before using it for anything else.
|
||||
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
|
||||
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
|
||||
}
|
||||
|
||||
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
|
||||
uptr alloc_end = alloc_beg + needed_size;
|
||||
uptr beg_plus_redzone = alloc_beg + rz_size;
|
||||
uptr user_beg = beg_plus_redzone;
|
||||
if (!IsAligned(user_beg, alignment))
|
||||
user_beg = RoundUpTo(user_beg, alignment);
|
||||
uptr user_end = user_beg + size;
|
||||
CHECK_LE(user_end, alloc_end);
|
||||
uptr chunk_beg = user_beg - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
m->alloc_type = alloc_type;
|
||||
m->rz_log = rz_log;
|
||||
u32 alloc_tid = t ? t->tid() : 0;
|
||||
m->alloc_tid = alloc_tid;
|
||||
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
|
||||
m->free_tid = kInvalidTid;
|
||||
m->from_memalign = user_beg != beg_plus_redzone;
|
||||
if (alloc_beg != chunk_beg) {
|
||||
CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
|
||||
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
|
||||
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
|
||||
}
|
||||
if (using_primary_allocator) {
|
||||
CHECK(size);
|
||||
m->user_requested_size = size;
|
||||
CHECK(allocator.FromPrimary(allocated));
|
||||
} else {
|
||||
CHECK(!allocator.FromPrimary(allocated));
|
||||
m->user_requested_size = SizeClassMap::kMaxSize;
|
||||
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
|
||||
meta[0] = size;
|
||||
meta[1] = chunk_beg;
|
||||
}
|
||||
|
||||
m->alloc_context_id = StackDepotPut(*stack);
|
||||
|
||||
uptr size_rounded_down_to_granularity =
|
||||
RoundDownTo(size, SHADOW_GRANULARITY);
|
||||
// Unpoison the bulk of the memory region.
|
||||
if (size_rounded_down_to_granularity)
|
||||
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
|
||||
// Deal with the end of the region if size is not aligned to granularity.
|
||||
if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
|
||||
u8 *shadow =
|
||||
(u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
|
||||
*shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
|
||||
}
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.mallocs++;
|
||||
thread_stats.malloced += size;
|
||||
thread_stats.malloced_redzones += needed_size - size;
|
||||
if (needed_size > SizeClassMap::kMaxSize)
|
||||
thread_stats.malloc_large++;
|
||||
else
|
||||
thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
|
||||
|
||||
void *res = reinterpret_cast<void *>(user_beg);
|
||||
if (can_fill && fl.max_malloc_fill_size) {
|
||||
uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
|
||||
REAL(memset)(res, fl.malloc_fill_byte, fill_size);
|
||||
}
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
|
||||
: __lsan::kDirectlyLeaked;
|
||||
#endif
|
||||
// Must be the last mutation of metadata in this function.
|
||||
atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
|
||||
ASAN_MALLOC_HOOK(res, size);
|
||||
return res;
|
||||
}
|
||||
|
||||
void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
|
||||
BufferedStackTrace *stack) {
|
||||
u8 old_chunk_state = CHUNK_ALLOCATED;
|
||||
// Flip the chunk_state atomically to avoid race on double-free.
|
||||
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
|
||||
CHUNK_QUARANTINE, memory_order_acquire))
|
||||
ReportInvalidFree(ptr, old_chunk_state, stack);
|
||||
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
|
||||
}
|
||||
|
||||
// Expects the chunk to already be marked as quarantined by using
|
||||
// AtomicallySetQuarantineFlag.
|
||||
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
|
||||
|
||||
if (m->alloc_type != alloc_type) {
|
||||
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
|
||||
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
|
||||
(AllocType)alloc_type);
|
||||
}
|
||||
}
|
||||
|
||||
CHECK_GE(m->alloc_tid, 0);
|
||||
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
|
||||
CHECK_EQ(m->free_tid, kInvalidTid);
|
||||
AsanThread *t = GetCurrentThread();
|
||||
m->free_tid = t ? t->tid() : 0;
|
||||
m->free_context_id = StackDepotPut(*stack);
|
||||
// Poison the region.
|
||||
PoisonShadow(m->Beg(),
|
||||
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||
kAsanHeapFreeMagic);
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.frees++;
|
||||
thread_stats.freed += m->UsedSize();
|
||||
|
||||
// Push into quarantine.
|
||||
if (t) {
|
||||
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
|
||||
AllocatorCache *ac = GetAllocatorCache(ms);
|
||||
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
|
||||
m->UsedSize());
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *ac = &fallback_allocator_cache;
|
||||
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
|
||||
m->UsedSize());
|
||||
}
|
||||
}
|
||||
|
||||
void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
if (p == 0) return;
|
||||
|
||||
uptr chunk_beg = p - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
if (delete_size && flags()->new_delete_type_mismatch &&
|
||||
delete_size != m->UsedSize()) {
|
||||
ReportNewDeleteSizeMismatch(p, delete_size, stack);
|
||||
}
|
||||
ASAN_FREE_HOOK(ptr);
|
||||
// Must mark the chunk as quarantined before any changes to its metadata.
|
||||
AtomicallySetQuarantineFlag(m, ptr, stack);
|
||||
QuarantineChunk(m, ptr, stack, alloc_type);
|
||||
}
|
||||
|
||||
void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
|
||||
CHECK(old_ptr && new_size);
|
||||
uptr p = reinterpret_cast<uptr>(old_ptr);
|
||||
uptr chunk_beg = p - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.reallocs++;
|
||||
thread_stats.realloced += new_size;
|
||||
|
||||
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
|
||||
if (new_ptr) {
|
||||
u8 chunk_state = m->chunk_state;
|
||||
if (chunk_state != CHUNK_ALLOCATED)
|
||||
ReportInvalidFree(old_ptr, chunk_state, stack);
|
||||
CHECK_NE(REAL(memcpy), nullptr);
|
||||
uptr memcpy_size = Min(new_size, m->UsedSize());
|
||||
// If realloc() races with free(), we may start copying freed memory.
|
||||
// However, we will report racy double-free later anyway.
|
||||
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
|
||||
Deallocate(old_ptr, 0, stack, FROM_MALLOC);
|
||||
}
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||
return allocator.ReturnNullOrDie();
|
||||
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
|
||||
// If the memory comes from the secondary allocator no need to clear it
|
||||
// as it comes directly from mmap.
|
||||
if (ptr && allocator.FromPrimary(ptr))
|
||||
REAL(memset)(ptr, 0, nmemb * size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
|
||||
if (chunk_state == CHUNK_QUARANTINE)
|
||||
ReportDoubleFree((uptr)ptr, stack);
|
||||
else
|
||||
ReportFreeNotMalloced((uptr)ptr, stack);
|
||||
}
|
||||
|
||||
void CommitBack(AsanThreadLocalMallocStorage *ms) {
|
||||
AllocatorCache *ac = GetAllocatorCache(ms);
|
||||
quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
|
||||
allocator.SwallowCache(ac);
|
||||
}
|
||||
|
||||
// -------------------------- Chunk lookup ----------------------
|
||||
|
||||
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
|
||||
AsanChunk *GetAsanChunk(void *alloc_beg) {
|
||||
if (!alloc_beg) return nullptr;
|
||||
if (!allocator.FromPrimary(alloc_beg)) {
|
||||
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
|
||||
return m;
|
||||
}
|
||||
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
|
||||
if (alloc_magic[0] == kAllocBegMagic)
|
||||
return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
|
||||
return reinterpret_cast<AsanChunk *>(alloc_beg);
|
||||
}
|
||||
|
||||
AsanChunk *GetAsanChunkByAddr(uptr p) {
|
||||
void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
|
||||
return GetAsanChunk(alloc_beg);
|
||||
}
|
||||
|
||||
// Allocator must be locked when this function is called.
|
||||
AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
|
||||
void *alloc_beg =
|
||||
allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
|
||||
return GetAsanChunk(alloc_beg);
|
||||
}
|
||||
|
||||
uptr AllocationSize(uptr p) {
|
||||
AsanChunk *m = GetAsanChunkByAddr(p);
|
||||
if (!m) return 0;
|
||||
if (m->chunk_state != CHUNK_ALLOCATED) return 0;
|
||||
if (m->Beg() != p) return 0;
|
||||
return m->UsedSize();
|
||||
}
|
||||
|
||||
AsanChunkView FindHeapChunkByAddress(uptr addr) {
|
||||
AsanChunk *m1 = GetAsanChunkByAddr(addr);
|
||||
if (!m1) return AsanChunkView(m1);
|
||||
sptr offset = 0;
|
||||
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
|
||||
// The address is in the chunk's left redzone, so maybe it is actually
|
||||
// a right buffer overflow from the other chunk to the left.
|
||||
// Search a bit to the left to see if there is another chunk.
|
||||
AsanChunk *m2 = nullptr;
|
||||
for (uptr l = 1; l < GetPageSizeCached(); l++) {
|
||||
m2 = GetAsanChunkByAddr(addr - l);
|
||||
if (m2 == m1) continue; // Still the same chunk.
|
||||
break;
|
||||
}
|
||||
if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
|
||||
m1 = ChooseChunk(addr, m2, m1);
|
||||
}
|
||||
return AsanChunkView(m1);
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
allocator.PrintStats();
|
||||
}
|
||||
|
||||
void ForceLock() {
|
||||
allocator.ForceLock();
|
||||
fallback_mutex.Lock();
|
||||
}
|
||||
|
||||
void ForceUnlock() {
|
||||
fallback_mutex.Unlock();
|
||||
allocator.ForceUnlock();
|
||||
}
|
||||
};
|
||||
|
||||
static Allocator instance(LINKER_INITIALIZED);
|
||||
|
||||
static AsanAllocator &get_allocator() {
|
||||
return instance.allocator;
|
||||
}
|
||||
|
||||
bool AsanChunkView::IsValid() {
|
||||
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
|
||||
}
|
||||
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
|
||||
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
|
||||
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
|
||||
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
|
||||
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
|
||||
|
||||
static StackTrace GetStackTraceFromId(u32 id) {
|
||||
CHECK(id);
|
||||
StackTrace res = StackDepotGet(id);
|
||||
CHECK(res.trace);
|
||||
return res;
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetAllocStack() {
|
||||
return GetStackTraceFromId(chunk_->alloc_context_id);
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetFreeStack() {
|
||||
return GetStackTraceFromId(chunk_->free_context_id);
|
||||
}
|
||||
|
||||
void InitializeAllocator(const AllocatorOptions &options) {
|
||||
instance.Initialize(options);
|
||||
}
|
||||
|
||||
void ReInitializeAllocator(const AllocatorOptions &options) {
|
||||
instance.ReInitialize(options);
|
||||
}
|
||||
|
||||
void GetAllocatorOptions(AllocatorOptions *options) {
|
||||
instance.GetOptions(options);
|
||||
}
|
||||
|
||||
AsanChunkView FindHeapChunkByAddress(uptr addr) {
|
||||
return instance.FindHeapChunkByAddress(addr);
|
||||
}
|
||||
|
||||
void AsanThreadLocalMallocStorage::CommitBack() {
|
||||
instance.CommitBack(this);
|
||||
}
|
||||
|
||||
void PrintInternalAllocatorStats() {
|
||||
instance.PrintStats();
|
||||
}
|
||||
|
||||
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
return instance.Allocate(size, alignment, stack, alloc_type, true);
|
||||
}
|
||||
|
||||
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
|
||||
instance.Deallocate(ptr, 0, stack, alloc_type);
|
||||
}
|
||||
|
||||
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
instance.Deallocate(ptr, size, stack, alloc_type);
|
||||
}
|
||||
|
||||
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
|
||||
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
|
||||
}
|
||||
|
||||
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
||||
return instance.Calloc(nmemb, size, stack);
|
||||
}
|
||||
|
||||
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
|
||||
if (!p)
|
||||
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
|
||||
if (size == 0) {
|
||||
instance.Deallocate(p, 0, stack, FROM_MALLOC);
|
||||
return nullptr;
|
||||
}
|
||||
return instance.Reallocate(p, size, stack);
|
||||
}
|
||||
|
||||
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
|
||||
return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
|
||||
}
|
||||
|
||||
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
size = RoundUpTo(size, PageSize);
|
||||
if (size == 0) {
|
||||
// pvalloc(0) should allocate one page.
|
||||
size = PageSize;
|
||||
}
|
||||
return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
|
||||
}
|
||||
|
||||
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
BufferedStackTrace *stack) {
|
||||
void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
|
||||
CHECK(IsAligned((uptr)ptr, alignment));
|
||||
*memptr = ptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
|
||||
if (!ptr) return 0;
|
||||
uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
|
||||
GET_STACK_TRACE_FATAL(pc, bp);
|
||||
ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
|
||||
}
|
||||
return usable_size;
|
||||
}
|
||||
|
||||
uptr asan_mz_size(const void *ptr) {
|
||||
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||
}
|
||||
|
||||
void asan_mz_force_lock() {
|
||||
instance.ForceLock();
|
||||
}
|
||||
|
||||
void asan_mz_force_unlock() {
|
||||
instance.ForceUnlock();
|
||||
}
|
||||
|
||||
void AsanSoftRssLimitExceededCallback(bool exceeded) {
|
||||
instance.allocator.SetRssLimitIsExceeded(exceeded);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
void LockAllocator() {
|
||||
__asan::get_allocator().ForceLock();
|
||||
}
|
||||
|
||||
void UnlockAllocator() {
|
||||
__asan::get_allocator().ForceUnlock();
|
||||
}
|
||||
|
||||
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
|
||||
*begin = (uptr)&__asan::get_allocator();
|
||||
*end = *begin + sizeof(__asan::get_allocator());
|
||||
}
|
||||
|
||||
uptr PointsIntoChunk(void* p) {
|
||||
uptr addr = reinterpret_cast<uptr>(p);
|
||||
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
|
||||
if (!m) return 0;
|
||||
uptr chunk = m->Beg();
|
||||
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
|
||||
return 0;
|
||||
if (m->AddrIsInside(addr, /*locked_version=*/true))
|
||||
return chunk;
|
||||
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
|
||||
addr))
|
||||
return chunk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr GetUserBegin(uptr chunk) {
|
||||
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
|
||||
CHECK(m);
|
||||
return m->Beg();
|
||||
}
|
||||
|
||||
LsanMetadata::LsanMetadata(uptr chunk) {
|
||||
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
|
||||
}
|
||||
|
||||
bool LsanMetadata::allocated() const {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
return m->chunk_state == __asan::CHUNK_ALLOCATED;
|
||||
}
|
||||
|
||||
ChunkTag LsanMetadata::tag() const {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
return static_cast<ChunkTag>(m->lsan_tag);
|
||||
}
|
||||
|
||||
void LsanMetadata::set_tag(ChunkTag value) {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
m->lsan_tag = value;
|
||||
}
|
||||
|
||||
uptr LsanMetadata::requested_size() const {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
return m->UsedSize(/*locked_version=*/true);
|
||||
}
|
||||
|
||||
u32 LsanMetadata::stack_trace_id() const {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
return m->alloc_context_id;
|
||||
}
|
||||
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
__asan::get_allocator().ForEachChunk(callback, arg);
|
||||
}
|
||||
|
||||
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
||||
uptr addr = reinterpret_cast<uptr>(p);
|
||||
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
|
||||
if (!m) return kIgnoreObjectInvalid;
|
||||
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
|
||||
if (m->lsan_tag == kIgnored)
|
||||
return kIgnoreObjectAlreadyIgnored;
|
||||
m->lsan_tag = __lsan::kIgnored;
|
||||
return kIgnoreObjectSuccess;
|
||||
} else {
|
||||
return kIgnoreObjectInvalid;
|
||||
}
|
||||
}
|
||||
} // namespace __lsan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
// ASan allocator doesn't reserve extra bytes, so normally we would
|
||||
// just return "size". We don't want to expose our redzone sizes, etc here.
|
||||
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
|
||||
return size;
|
||||
}
|
||||
|
||||
int __sanitizer_get_ownership(const void *p) {
|
||||
uptr ptr = reinterpret_cast<uptr>(p);
|
||||
return instance.AllocationSize(ptr) > 0;
|
||||
}
|
||||
|
||||
uptr __sanitizer_get_allocated_size(const void *p) {
|
||||
if (!p) return 0;
|
||||
uptr ptr = reinterpret_cast<uptr>(p);
|
||||
uptr allocated_size = instance.AllocationSize(ptr);
|
||||
// Die if p is not malloced or if it is already freed.
|
||||
if (allocated_size == 0) {
|
||||
GET_STACK_TRACE_FATAL_HERE;
|
||||
ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
|
||||
}
|
||||
return allocated_size;
|
||||
}
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// Provide default (no-op) implementation of malloc hooks.
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_malloc_hook(void *ptr, uptr size) {
|
||||
(void)ptr;
|
||||
(void)size;
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_free_hook(void *ptr) {
|
||||
(void)ptr;
|
||||
}
|
||||
} // extern "C"
|
||||
#endif
|
|
@ -7,12 +7,13 @@
|
|||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// ASan-private header for asan_allocator2.cc.
|
||||
// ASan-private header for asan_allocator.cc.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_ALLOCATOR_H
|
||||
#define ASAN_ALLOCATOR_H
|
||||
|
||||
#include "asan_flags.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_interceptors.h"
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
|
@ -26,11 +27,22 @@ enum AllocType {
|
|||
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
|
||||
};
|
||||
|
||||
static const uptr kNumberOfSizeClasses = 255;
|
||||
struct AsanChunk;
|
||||
|
||||
void InitializeAllocator();
|
||||
void ReInitializeAllocator();
|
||||
struct AllocatorOptions {
|
||||
u32 quarantine_size_mb;
|
||||
u16 min_redzone;
|
||||
u16 max_redzone;
|
||||
u8 may_return_null;
|
||||
u8 alloc_dealloc_mismatch;
|
||||
|
||||
void SetFrom(const Flags *f, const CommonFlags *cf);
|
||||
void CopyTo(Flags *f, CommonFlags *cf);
|
||||
};
|
||||
|
||||
void InitializeAllocator(const AllocatorOptions &options);
|
||||
void ReInitializeAllocator(const AllocatorOptions &options);
|
||||
void GetAllocatorOptions(AllocatorOptions *options);
|
||||
|
||||
class AsanChunkView {
|
||||
public:
|
||||
|
@ -100,6 +112,11 @@ struct AsanMapUnmapCallback {
|
|||
# if defined(__powerpc64__)
|
||||
const uptr kAllocatorSpace = 0xa0000000000ULL;
|
||||
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
|
||||
# elif defined(__aarch64__)
|
||||
// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
|
||||
// so no need to different values for different VMA.
|
||||
const uptr kAllocatorSpace = 0x10000000000ULL;
|
||||
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
|
||||
# else
|
||||
const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
|
@ -122,15 +139,16 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
|
|||
AsanMapUnmapCallback> PrimaryAllocator;
|
||||
#endif // SANITIZER_CAN_USE_ALLOCATOR64
|
||||
|
||||
static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
|
||||
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||
SecondaryAllocator> Allocator;
|
||||
SecondaryAllocator> AsanAllocator;
|
||||
|
||||
|
||||
struct AsanThreadLocalMallocStorage {
|
||||
uptr quarantine_cache[16];
|
||||
AllocatorCache allocator2_cache;
|
||||
AllocatorCache allocator_cache;
|
||||
void CommitBack();
|
||||
private:
|
||||
// These objects are allocated via mmap() and are zero-initialized.
|
||||
|
@ -158,6 +176,7 @@ void asan_mz_force_lock();
|
|||
void asan_mz_force_unlock();
|
||||
|
||||
void PrintInternalAllocatorStats();
|
||||
void AsanSoftRssLimitExceededCallback(bool exceeded);
|
||||
|
||||
} // namespace __asan
|
||||
#endif // ASAN_ALLOCATOR_H
|
||||
|
|
|
@ -1,790 +0,0 @@
|
|||
//===-- asan_allocator2.cc ------------------------------------------------===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Implementation of ASan's memory allocator, 2-nd version.
|
||||
// This variant uses the allocator from sanitizer_common, i.e. the one shared
|
||||
// with ThreadSanitizer and MemorySanitizer.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "asan_allocator.h"
|
||||
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_list.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_quarantine.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
|
||||
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
|
||||
// Statistics.
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.mmaps++;
|
||||
thread_stats.mmaped += size;
|
||||
}
|
||||
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
|
||||
PoisonShadow(p, size, 0);
|
||||
// We are about to unmap a chunk of user memory.
|
||||
// Mark the corresponding shadow memory as not needed.
|
||||
FlushUnneededASanShadowMemory(p, size);
|
||||
// Statistics.
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.munmaps++;
|
||||
thread_stats.munmaped += size;
|
||||
}
|
||||
|
||||
// We can not use THREADLOCAL because it is not supported on some of the
|
||||
// platforms we care about (OSX 10.6, Android).
|
||||
// static THREADLOCAL AllocatorCache cache;
|
||||
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
|
||||
CHECK(ms);
|
||||
return &ms->allocator2_cache;
|
||||
}
|
||||
|
||||
static Allocator allocator;
|
||||
|
||||
static const uptr kMaxAllowedMallocSize =
|
||||
FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
|
||||
|
||||
static const uptr kMaxThreadLocalQuarantine =
|
||||
FIRST_32_SECOND_64(1 << 18, 1 << 20);
|
||||
|
||||
// Every chunk of memory allocated by this allocator can be in one of 3 states:
|
||||
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
|
||||
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
|
||||
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
|
||||
enum {
|
||||
CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
|
||||
CHUNK_ALLOCATED = 2,
|
||||
CHUNK_QUARANTINE = 3
|
||||
};
|
||||
|
||||
// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
|
||||
// We use adaptive redzones: for larger allocation larger redzones are used.
|
||||
static u32 RZLog2Size(u32 rz_log) {
|
||||
CHECK_LT(rz_log, 8);
|
||||
return 16 << rz_log;
|
||||
}
|
||||
|
||||
static u32 RZSize2Log(u32 rz_size) {
|
||||
CHECK_GE(rz_size, 16);
|
||||
CHECK_LE(rz_size, 2048);
|
||||
CHECK(IsPowerOfTwo(rz_size));
|
||||
u32 res = Log2(rz_size) - 4;
|
||||
CHECK_EQ(rz_size, RZLog2Size(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
static uptr ComputeRZLog(uptr user_requested_size) {
|
||||
u32 rz_log =
|
||||
user_requested_size <= 64 - 16 ? 0 :
|
||||
user_requested_size <= 128 - 32 ? 1 :
|
||||
user_requested_size <= 512 - 64 ? 2 :
|
||||
user_requested_size <= 4096 - 128 ? 3 :
|
||||
user_requested_size <= (1 << 14) - 256 ? 4 :
|
||||
user_requested_size <= (1 << 15) - 512 ? 5 :
|
||||
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
|
||||
return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
|
||||
RZSize2Log(flags()->max_redzone));
|
||||
}
|
||||
|
||||
// The memory chunk allocated from the underlying allocator looks like this:
|
||||
// L L L L L L H H U U U U U U R R
|
||||
// L -- left redzone words (0 or more bytes)
|
||||
// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
|
||||
// U -- user memory.
|
||||
// R -- right redzone (0 or more bytes)
|
||||
// ChunkBase consists of ChunkHeader and other bytes that overlap with user
|
||||
// memory.
|
||||
|
||||
// If the left redzone is greater than the ChunkHeader size we store a magic
|
||||
// value in the first uptr word of the memory block and store the address of
|
||||
// ChunkBase in the next uptr.
|
||||
// M B L L L L L L L L L H H U U U U U U
|
||||
// | ^
|
||||
// ---------------------|
|
||||
// M -- magic value kAllocBegMagic
|
||||
// B -- address of ChunkHeader pointing to the first 'H'
|
||||
static const uptr kAllocBegMagic = 0xCC6E96B9;
|
||||
|
||||
struct ChunkHeader {
|
||||
// 1-st 8 bytes.
|
||||
u32 chunk_state : 8; // Must be first.
|
||||
u32 alloc_tid : 24;
|
||||
|
||||
u32 free_tid : 24;
|
||||
u32 from_memalign : 1;
|
||||
u32 alloc_type : 2;
|
||||
u32 rz_log : 3;
|
||||
u32 lsan_tag : 2;
|
||||
// 2-nd 8 bytes
|
||||
// This field is used for small sizes. For large sizes it is equal to
|
||||
// SizeClassMap::kMaxSize and the actual size is stored in the
|
||||
// SecondaryAllocator's metadata.
|
||||
u32 user_requested_size;
|
||||
u32 alloc_context_id;
|
||||
};
|
||||
|
||||
struct ChunkBase : ChunkHeader {
|
||||
// Header2, intersects with user memory.
|
||||
u32 free_context_id;
|
||||
};
|
||||
|
||||
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
|
||||
static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
|
||||
COMPILER_CHECK(kChunkHeaderSize == 16);
|
||||
COMPILER_CHECK(kChunkHeader2Size <= 16);
|
||||
|
||||
struct AsanChunk: ChunkBase {
|
||||
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
|
||||
uptr UsedSize(bool locked_version = false) {
|
||||
if (user_requested_size != SizeClassMap::kMaxSize)
|
||||
return user_requested_size;
|
||||
return *reinterpret_cast<uptr *>(
|
||||
allocator.GetMetaData(AllocBeg(locked_version)));
|
||||
}
|
||||
void *AllocBeg(bool locked_version = false) {
|
||||
if (from_memalign) {
|
||||
if (locked_version)
|
||||
return allocator.GetBlockBeginFastLocked(
|
||||
reinterpret_cast<void *>(this));
|
||||
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
|
||||
}
|
||||
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
|
||||
}
|
||||
bool AddrIsInside(uptr addr, bool locked_version = false) {
|
||||
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
|
||||
}
|
||||
};
|
||||
|
||||
bool AsanChunkView::IsValid() {
|
||||
return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
|
||||
}
|
||||
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
|
||||
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
|
||||
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
|
||||
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
|
||||
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
|
||||
|
||||
static StackTrace GetStackTraceFromId(u32 id) {
|
||||
CHECK(id);
|
||||
StackTrace res = StackDepotGet(id);
|
||||
CHECK(res.trace);
|
||||
return res;
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetAllocStack() {
|
||||
return GetStackTraceFromId(chunk_->alloc_context_id);
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetFreeStack() {
|
||||
return GetStackTraceFromId(chunk_->free_context_id);
|
||||
}
|
||||
|
||||
struct QuarantineCallback;
|
||||
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
|
||||
typedef AsanQuarantine::Cache QuarantineCache;
|
||||
static AsanQuarantine quarantine(LINKER_INITIALIZED);
|
||||
static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
|
||||
static AllocatorCache fallback_allocator_cache;
|
||||
static SpinMutex fallback_mutex;
|
||||
|
||||
QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
|
||||
CHECK(ms);
|
||||
CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
|
||||
return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
|
||||
}
|
||||
|
||||
struct QuarantineCallback {
|
||||
explicit QuarantineCallback(AllocatorCache *cache)
|
||||
: cache_(cache) {
|
||||
}
|
||||
|
||||
void Recycle(AsanChunk *m) {
|
||||
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
|
||||
atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
|
||||
CHECK_NE(m->alloc_tid, kInvalidTid);
|
||||
CHECK_NE(m->free_tid, kInvalidTid);
|
||||
PoisonShadow(m->Beg(),
|
||||
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||
kAsanHeapLeftRedzoneMagic);
|
||||
void *p = reinterpret_cast<void *>(m->AllocBeg());
|
||||
if (p != m) {
|
||||
uptr *alloc_magic = reinterpret_cast<uptr *>(p);
|
||||
CHECK_EQ(alloc_magic[0], kAllocBegMagic);
|
||||
// Clear the magic value, as allocator internals may overwrite the
|
||||
// contents of deallocated chunk, confusing GetAsanChunk lookup.
|
||||
alloc_magic[0] = 0;
|
||||
CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
|
||||
}
|
||||
|
||||
// Statistics.
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.real_frees++;
|
||||
thread_stats.really_freed += m->UsedSize();
|
||||
|
||||
allocator.Deallocate(cache_, p);
|
||||
}
|
||||
|
||||
void *Allocate(uptr size) {
|
||||
return allocator.Allocate(cache_, size, 1, false);
|
||||
}
|
||||
|
||||
void Deallocate(void *p) {
|
||||
allocator.Deallocate(cache_, p);
|
||||
}
|
||||
|
||||
AllocatorCache *cache_;
|
||||
};
|
||||
|
||||
void InitializeAllocator() {
|
||||
allocator.Init();
|
||||
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
|
||||
}
|
||||
|
||||
void ReInitializeAllocator() {
|
||||
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
|
||||
}
|
||||
|
||||
static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
|
||||
AllocType alloc_type, bool can_fill) {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
AsanInitFromRtl();
|
||||
Flags &fl = *flags();
|
||||
CHECK(stack);
|
||||
const uptr min_alignment = SHADOW_GRANULARITY;
|
||||
if (alignment < min_alignment)
|
||||
alignment = min_alignment;
|
||||
if (size == 0) {
|
||||
// We'd be happy to avoid allocating memory for zero-size requests, but
|
||||
// some programs/tests depend on this behavior and assume that malloc would
|
||||
// not return NULL even for zero-size allocations. Moreover, it looks like
|
||||
// operator new should never return NULL, and results of consecutive "new"
|
||||
// calls must be different even if the allocated size is zero.
|
||||
size = 1;
|
||||
}
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr rz_log = ComputeRZLog(size);
|
||||
uptr rz_size = RZLog2Size(rz_log);
|
||||
uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
|
||||
uptr needed_size = rounded_size + rz_size;
|
||||
if (alignment > min_alignment)
|
||||
needed_size += alignment;
|
||||
bool using_primary_allocator = true;
|
||||
// If we are allocating from the secondary allocator, there will be no
|
||||
// automatic right redzone, so add the right redzone manually.
|
||||
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
|
||||
needed_size += rz_size;
|
||||
using_primary_allocator = false;
|
||||
}
|
||||
CHECK(IsAligned(needed_size, min_alignment));
|
||||
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
|
||||
(void*)size);
|
||||
return AllocatorReturnNull();
|
||||
}
|
||||
|
||||
AsanThread *t = GetCurrentThread();
|
||||
void *allocated;
|
||||
if (t) {
|
||||
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
|
||||
allocated = allocator.Allocate(cache, needed_size, 8, false);
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
allocated = allocator.Allocate(cache, needed_size, 8, false);
|
||||
}
|
||||
|
||||
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
|
||||
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
||||
// chunk. This is possible if flags()->poison_heap was disabled for some
|
||||
// time, for example, due to flags()->start_disabled.
|
||||
// Anyway, poison the block before using it for anything else.
|
||||
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
|
||||
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
|
||||
}
|
||||
|
||||
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
|
||||
uptr alloc_end = alloc_beg + needed_size;
|
||||
uptr beg_plus_redzone = alloc_beg + rz_size;
|
||||
uptr user_beg = beg_plus_redzone;
|
||||
if (!IsAligned(user_beg, alignment))
|
||||
user_beg = RoundUpTo(user_beg, alignment);
|
||||
uptr user_end = user_beg + size;
|
||||
CHECK_LE(user_end, alloc_end);
|
||||
uptr chunk_beg = user_beg - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
m->alloc_type = alloc_type;
|
||||
m->rz_log = rz_log;
|
||||
u32 alloc_tid = t ? t->tid() : 0;
|
||||
m->alloc_tid = alloc_tid;
|
||||
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
|
||||
m->free_tid = kInvalidTid;
|
||||
m->from_memalign = user_beg != beg_plus_redzone;
|
||||
if (alloc_beg != chunk_beg) {
|
||||
CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
|
||||
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
|
||||
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
|
||||
}
|
||||
if (using_primary_allocator) {
|
||||
CHECK(size);
|
||||
m->user_requested_size = size;
|
||||
CHECK(allocator.FromPrimary(allocated));
|
||||
} else {
|
||||
CHECK(!allocator.FromPrimary(allocated));
|
||||
m->user_requested_size = SizeClassMap::kMaxSize;
|
||||
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
|
||||
meta[0] = size;
|
||||
meta[1] = chunk_beg;
|
||||
}
|
||||
|
||||
m->alloc_context_id = StackDepotPut(*stack);
|
||||
|
||||
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
|
||||
// Unpoison the bulk of the memory region.
|
||||
if (size_rounded_down_to_granularity)
|
||||
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
|
||||
// Deal with the end of the region if size is not aligned to granularity.
|
||||
if (size != size_rounded_down_to_granularity && fl.poison_heap) {
|
||||
u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
|
||||
*shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
|
||||
}
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.mallocs++;
|
||||
thread_stats.malloced += size;
|
||||
thread_stats.malloced_redzones += needed_size - size;
|
||||
uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
|
||||
thread_stats.malloced_by_size[class_id]++;
|
||||
if (needed_size > SizeClassMap::kMaxSize)
|
||||
thread_stats.malloc_large++;
|
||||
|
||||
void *res = reinterpret_cast<void *>(user_beg);
|
||||
if (can_fill && fl.max_malloc_fill_size) {
|
||||
uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
|
||||
REAL(memset)(res, fl.malloc_fill_byte, fill_size);
|
||||
}
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
|
||||
: __lsan::kDirectlyLeaked;
|
||||
#endif
|
||||
// Must be the last mutation of metadata in this function.
|
||||
atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
|
||||
ASAN_MALLOC_HOOK(res, size);
|
||||
return res;
|
||||
}
|
||||
|
||||
static void ReportInvalidFree(void *ptr, u8 chunk_state,
|
||||
BufferedStackTrace *stack) {
|
||||
if (chunk_state == CHUNK_QUARANTINE)
|
||||
ReportDoubleFree((uptr)ptr, stack);
|
||||
else
|
||||
ReportFreeNotMalloced((uptr)ptr, stack);
|
||||
}
|
||||
|
||||
static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
|
||||
BufferedStackTrace *stack) {
|
||||
u8 old_chunk_state = CHUNK_ALLOCATED;
|
||||
// Flip the chunk_state atomically to avoid race on double-free.
|
||||
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
|
||||
CHUNK_QUARANTINE, memory_order_acquire))
|
||||
ReportInvalidFree(ptr, old_chunk_state, stack);
|
||||
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
|
||||
}
|
||||
|
||||
// Expects the chunk to already be marked as quarantined by using
|
||||
// AtomicallySetQuarantineFlag.
|
||||
static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
|
||||
|
||||
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
|
||||
ReportAllocTypeMismatch((uptr)ptr, stack,
|
||||
(AllocType)m->alloc_type, (AllocType)alloc_type);
|
||||
|
||||
CHECK_GE(m->alloc_tid, 0);
|
||||
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
|
||||
CHECK_EQ(m->free_tid, kInvalidTid);
|
||||
AsanThread *t = GetCurrentThread();
|
||||
m->free_tid = t ? t->tid() : 0;
|
||||
m->free_context_id = StackDepotPut(*stack);
|
||||
// Poison the region.
|
||||
PoisonShadow(m->Beg(),
|
||||
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||
kAsanHeapFreeMagic);
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.frees++;
|
||||
thread_stats.freed += m->UsedSize();
|
||||
|
||||
// Push into quarantine.
|
||||
if (t) {
|
||||
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
|
||||
AllocatorCache *ac = GetAllocatorCache(ms);
|
||||
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
|
||||
m, m->UsedSize());
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *ac = &fallback_allocator_cache;
|
||||
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
|
||||
m, m->UsedSize());
|
||||
}
|
||||
}
|
||||
|
||||
static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
if (p == 0) return;
|
||||
|
||||
uptr chunk_beg = p - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
if (delete_size && flags()->new_delete_type_mismatch &&
|
||||
delete_size != m->UsedSize()) {
|
||||
ReportNewDeleteSizeMismatch(p, delete_size, stack);
|
||||
}
|
||||
ASAN_FREE_HOOK(ptr);
|
||||
// Must mark the chunk as quarantined before any changes to its metadata.
|
||||
AtomicallySetQuarantineFlag(m, ptr, stack);
|
||||
QuarantineChunk(m, ptr, stack, alloc_type);
|
||||
}
|
||||
|
||||
static void *Reallocate(void *old_ptr, uptr new_size,
|
||||
BufferedStackTrace *stack) {
|
||||
CHECK(old_ptr && new_size);
|
||||
uptr p = reinterpret_cast<uptr>(old_ptr);
|
||||
uptr chunk_beg = p - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
|
||||
AsanStats &thread_stats = GetCurrentThreadStats();
|
||||
thread_stats.reallocs++;
|
||||
thread_stats.realloced += new_size;
|
||||
|
||||
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
|
||||
if (new_ptr) {
|
||||
u8 chunk_state = m->chunk_state;
|
||||
if (chunk_state != CHUNK_ALLOCATED)
|
||||
ReportInvalidFree(old_ptr, chunk_state, stack);
|
||||
CHECK_NE(REAL(memcpy), (void*)0);
|
||||
uptr memcpy_size = Min(new_size, m->UsedSize());
|
||||
// If realloc() races with free(), we may start copying freed memory.
|
||||
// However, we will report racy double-free later anyway.
|
||||
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
|
||||
Deallocate(old_ptr, 0, stack, FROM_MALLOC);
|
||||
}
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
|
||||
static AsanChunk *GetAsanChunk(void *alloc_beg) {
|
||||
if (!alloc_beg) return 0;
|
||||
if (!allocator.FromPrimary(alloc_beg)) {
|
||||
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
|
||||
return m;
|
||||
}
|
||||
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
|
||||
if (alloc_magic[0] == kAllocBegMagic)
|
||||
return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
|
||||
return reinterpret_cast<AsanChunk *>(alloc_beg);
|
||||
}
|
||||
|
||||
static AsanChunk *GetAsanChunkByAddr(uptr p) {
|
||||
void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
|
||||
return GetAsanChunk(alloc_beg);
|
||||
}
|
||||
|
||||
// Allocator must be locked when this function is called.
|
||||
static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
|
||||
void *alloc_beg =
|
||||
allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
|
||||
return GetAsanChunk(alloc_beg);
|
||||
}
|
||||
|
||||
static uptr AllocationSize(uptr p) {
|
||||
AsanChunk *m = GetAsanChunkByAddr(p);
|
||||
if (!m) return 0;
|
||||
if (m->chunk_state != CHUNK_ALLOCATED) return 0;
|
||||
if (m->Beg() != p) return 0;
|
||||
return m->UsedSize();
|
||||
}
|
||||
|
||||
// We have an address between two chunks, and we want to report just one.
|
||||
AsanChunk *ChooseChunk(uptr addr,
|
||||
AsanChunk *left_chunk, AsanChunk *right_chunk) {
|
||||
// Prefer an allocated chunk over freed chunk and freed chunk
|
||||
// over available chunk.
|
||||
if (left_chunk->chunk_state != right_chunk->chunk_state) {
|
||||
if (left_chunk->chunk_state == CHUNK_ALLOCATED)
|
||||
return left_chunk;
|
||||
if (right_chunk->chunk_state == CHUNK_ALLOCATED)
|
||||
return right_chunk;
|
||||
if (left_chunk->chunk_state == CHUNK_QUARANTINE)
|
||||
return left_chunk;
|
||||
if (right_chunk->chunk_state == CHUNK_QUARANTINE)
|
||||
return right_chunk;
|
||||
}
|
||||
// Same chunk_state: choose based on offset.
|
||||
sptr l_offset = 0, r_offset = 0;
|
||||
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
|
||||
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
|
||||
if (l_offset < r_offset)
|
||||
return left_chunk;
|
||||
return right_chunk;
|
||||
}
|
||||
|
||||
AsanChunkView FindHeapChunkByAddress(uptr addr) {
|
||||
AsanChunk *m1 = GetAsanChunkByAddr(addr);
|
||||
if (!m1) return AsanChunkView(m1);
|
||||
sptr offset = 0;
|
||||
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
|
||||
// The address is in the chunk's left redzone, so maybe it is actually
|
||||
// a right buffer overflow from the other chunk to the left.
|
||||
// Search a bit to the left to see if there is another chunk.
|
||||
AsanChunk *m2 = 0;
|
||||
for (uptr l = 1; l < GetPageSizeCached(); l++) {
|
||||
m2 = GetAsanChunkByAddr(addr - l);
|
||||
if (m2 == m1) continue; // Still the same chunk.
|
||||
break;
|
||||
}
|
||||
if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
|
||||
m1 = ChooseChunk(addr, m2, m1);
|
||||
}
|
||||
return AsanChunkView(m1);
|
||||
}
|
||||
|
||||
void AsanThreadLocalMallocStorage::CommitBack() {
|
||||
AllocatorCache *ac = GetAllocatorCache(this);
|
||||
quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
|
||||
allocator.SwallowCache(GetAllocatorCache(this));
|
||||
}
|
||||
|
||||
void PrintInternalAllocatorStats() {
|
||||
allocator.PrintStats();
|
||||
}
|
||||
|
||||
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
return Allocate(size, alignment, stack, alloc_type, true);
|
||||
}
|
||||
|
||||
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
|
||||
Deallocate(ptr, 0, stack, alloc_type);
|
||||
}
|
||||
|
||||
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
Deallocate(ptr, size, stack, alloc_type);
|
||||
}
|
||||
|
||||
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
|
||||
return Allocate(size, 8, stack, FROM_MALLOC, true);
|
||||
}
|
||||
|
||||
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||
return AllocatorReturnNull();
|
||||
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
|
||||
// If the memory comes from the secondary allocator no need to clear it
|
||||
// as it comes directly from mmap.
|
||||
if (ptr && allocator.FromPrimary(ptr))
|
||||
REAL(memset)(ptr, 0, nmemb * size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
|
||||
if (p == 0)
|
||||
return Allocate(size, 8, stack, FROM_MALLOC, true);
|
||||
if (size == 0) {
|
||||
Deallocate(p, 0, stack, FROM_MALLOC);
|
||||
return 0;
|
||||
}
|
||||
return Reallocate(p, size, stack);
|
||||
}
|
||||
|
||||
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
|
||||
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
|
||||
}
|
||||
|
||||
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
size = RoundUpTo(size, PageSize);
|
||||
if (size == 0) {
|
||||
// pvalloc(0) should allocate one page.
|
||||
size = PageSize;
|
||||
}
|
||||
return Allocate(size, PageSize, stack, FROM_MALLOC, true);
|
||||
}
|
||||
|
||||
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
BufferedStackTrace *stack) {
|
||||
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
|
||||
CHECK(IsAligned((uptr)ptr, alignment));
|
||||
*memptr = ptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
|
||||
if (ptr == 0) return 0;
|
||||
uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
|
||||
GET_STACK_TRACE_FATAL(pc, bp);
|
||||
ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
|
||||
}
|
||||
return usable_size;
|
||||
}
|
||||
|
||||
uptr asan_mz_size(const void *ptr) {
|
||||
return AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||
}
|
||||
|
||||
void asan_mz_force_lock() {
|
||||
allocator.ForceLock();
|
||||
fallback_mutex.Lock();
|
||||
}
|
||||
|
||||
void asan_mz_force_unlock() {
|
||||
fallback_mutex.Unlock();
|
||||
allocator.ForceUnlock();
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
void LockAllocator() {
|
||||
__asan::allocator.ForceLock();
|
||||
}
|
||||
|
||||
void UnlockAllocator() {
|
||||
__asan::allocator.ForceUnlock();
|
||||
}
|
||||
|
||||
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
|
||||
*begin = (uptr)&__asan::allocator;
|
||||
*end = *begin + sizeof(__asan::allocator);
|
||||
}
|
||||
|
||||
uptr PointsIntoChunk(void* p) {
|
||||
uptr addr = reinterpret_cast<uptr>(p);
|
||||
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
|
||||
if (!m) return 0;
|
||||
uptr chunk = m->Beg();
|
||||
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
|
||||
return 0;
|
||||
if (m->AddrIsInside(addr, /*locked_version=*/true))
|
||||
return chunk;
|
||||
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
|
||||
addr))
|
||||
return chunk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr GetUserBegin(uptr chunk) {
|
||||
__asan::AsanChunk *m =
|
||||
__asan::GetAsanChunkByAddrFastLocked(chunk);
|
||||
CHECK(m);
|
||||
return m->Beg();
|
||||
}
|
||||
|
||||
LsanMetadata::LsanMetadata(uptr chunk) {
|
||||
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
|
||||
}
|
||||
|
||||
bool LsanMetadata::allocated() const {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
return m->chunk_state == __asan::CHUNK_ALLOCATED;
|
||||
}
|
||||
|
||||
ChunkTag LsanMetadata::tag() const {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
return static_cast<ChunkTag>(m->lsan_tag);
|
||||
}
|
||||
|
||||
void LsanMetadata::set_tag(ChunkTag value) {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
m->lsan_tag = value;
|
||||
}
|
||||
|
||||
uptr LsanMetadata::requested_size() const {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
return m->UsedSize(/*locked_version=*/true);
|
||||
}
|
||||
|
||||
u32 LsanMetadata::stack_trace_id() const {
|
||||
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
|
||||
return m->alloc_context_id;
|
||||
}
|
||||
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
__asan::allocator.ForEachChunk(callback, arg);
|
||||
}
|
||||
|
||||
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
||||
uptr addr = reinterpret_cast<uptr>(p);
|
||||
__asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
|
||||
if (!m) return kIgnoreObjectInvalid;
|
||||
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
|
||||
if (m->lsan_tag == kIgnored)
|
||||
return kIgnoreObjectAlreadyIgnored;
|
||||
m->lsan_tag = __lsan::kIgnored;
|
||||
return kIgnoreObjectSuccess;
|
||||
} else {
|
||||
return kIgnoreObjectInvalid;
|
||||
}
|
||||
}
|
||||
} // namespace __lsan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
// ASan allocator doesn't reserve extra bytes, so normally we would
|
||||
// just return "size". We don't want to expose our redzone sizes, etc here.
|
||||
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
|
||||
return size;
|
||||
}
|
||||
|
||||
int __sanitizer_get_ownership(const void *p) {
|
||||
uptr ptr = reinterpret_cast<uptr>(p);
|
||||
return (AllocationSize(ptr) > 0);
|
||||
}
|
||||
|
||||
uptr __sanitizer_get_allocated_size(const void *p) {
|
||||
if (p == 0) return 0;
|
||||
uptr ptr = reinterpret_cast<uptr>(p);
|
||||
uptr allocated_size = AllocationSize(ptr);
|
||||
// Die if p is not malloced or if it is already freed.
|
||||
if (allocated_size == 0) {
|
||||
GET_STACK_TRACE_FATAL_HERE;
|
||||
ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
|
||||
}
|
||||
return allocated_size;
|
||||
}
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// Provide default (no-op) implementation of malloc hooks.
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_malloc_hook(void *ptr, uptr size) {
|
||||
(void)ptr;
|
||||
(void)size;
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_free_hook(void *ptr) {
|
||||
(void)ptr;
|
||||
}
|
||||
} // extern "C"
|
||||
#endif
|
|
@ -79,8 +79,8 @@ void AsanLocateAddress(uptr addr, AddressDescription *descr) {
|
|||
GetInfoForHeapAddress(addr, descr);
|
||||
}
|
||||
|
||||
uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
|
||||
bool alloc_stack) {
|
||||
static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
|
||||
bool alloc_stack) {
|
||||
AsanChunkView chunk = FindHeapChunkByAddress(addr);
|
||||
if (!chunk.IsValid()) return 0;
|
||||
|
||||
|
@ -106,14 +106,14 @@ uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
|
|||
return 0;
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
using namespace __asan;
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
|
||||
uptr *region_address, uptr *region_size) {
|
||||
AddressDescription descr = { name, name_size, 0, 0, 0 };
|
||||
AddressDescription descr = { name, name_size, 0, 0, nullptr };
|
||||
AsanLocateAddress(addr, &descr);
|
||||
if (region_address) *region_address = descr.region_address;
|
||||
if (region_size) *region_size = descr.region_size;
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
//
|
||||
// FakeStack is used to detect use-after-return bugs.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_poisoning.h"
|
||||
#include "asan_thread.h"
|
||||
|
@ -20,13 +21,19 @@ static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
|
|||
static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
|
||||
static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
|
||||
|
||||
static const u64 kAllocaRedzoneSize = 32UL;
|
||||
static const u64 kAllocaRedzoneMask = 31UL;
|
||||
|
||||
// For small size classes inline PoisonShadow for better performance.
|
||||
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
|
||||
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
|
||||
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
|
||||
if (class_id <= 6) {
|
||||
for (uptr i = 0; i < (1U << class_id); i++)
|
||||
for (uptr i = 0; i < (1U << class_id); i++) {
|
||||
shadow[i] = magic;
|
||||
// Make sure this does not become memset.
|
||||
SanitizerBreakOptimization(nullptr);
|
||||
}
|
||||
} else {
|
||||
// The size class is too big, it's cheaper to poison only size bytes.
|
||||
PoisonShadow(ptr, size, static_cast<u8>(magic));
|
||||
|
@ -56,7 +63,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
|
|||
|
||||
void FakeStack::Destroy(int tid) {
|
||||
PoisonAll(0);
|
||||
if (common_flags()->verbosity >= 2) {
|
||||
if (Verbosity() >= 2) {
|
||||
InternalScopedString str(kNumberOfSizeClasses * 50);
|
||||
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
|
||||
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
|
||||
|
@ -73,7 +80,9 @@ void FakeStack::PoisonAll(u8 magic) {
|
|||
magic);
|
||||
}
|
||||
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
ALWAYS_INLINE USED
|
||||
#endif
|
||||
FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
|
||||
uptr real_stack) {
|
||||
CHECK_LT(class_id, kNumberOfSizeClasses);
|
||||
|
@ -99,7 +108,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
|
|||
*SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
|
||||
return res;
|
||||
}
|
||||
return 0; // We are out of fake stack.
|
||||
return nullptr; // We are out of fake stack.
|
||||
}
|
||||
|
||||
uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
|
||||
|
@ -176,7 +185,7 @@ void SetTLSFakeStack(FakeStack *fs) { }
|
|||
|
||||
static FakeStack *GetFakeStack() {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) return 0;
|
||||
if (!t) return nullptr;
|
||||
return t->fake_stack();
|
||||
}
|
||||
|
||||
|
@ -184,40 +193,39 @@ static FakeStack *GetFakeStackFast() {
|
|||
if (FakeStack *fs = GetTLSFakeStack())
|
||||
return fs;
|
||||
if (!__asan_option_detect_stack_use_after_return)
|
||||
return 0;
|
||||
return nullptr;
|
||||
return GetFakeStack();
|
||||
}
|
||||
|
||||
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
|
||||
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
|
||||
FakeStack *fs = GetFakeStackFast();
|
||||
if (!fs) return real_stack;
|
||||
if (!fs) return 0;
|
||||
uptr local_stack;
|
||||
uptr real_stack = reinterpret_cast<uptr>(&local_stack);
|
||||
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
|
||||
if (!ff)
|
||||
return real_stack; // Out of fake stack, return the real one.
|
||||
if (!ff) return 0; // Out of fake stack.
|
||||
uptr ptr = reinterpret_cast<uptr>(ff);
|
||||
SetShadow(ptr, size, class_id, 0);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
|
||||
if (ptr == real_stack)
|
||||
return;
|
||||
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
|
||||
FakeStack::Deallocate(ptr, class_id);
|
||||
SetShadow(ptr, size, class_id, kMagic8);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan;
|
||||
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
|
||||
__asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
|
||||
return OnMalloc(class_id, size, real_stack); \
|
||||
__asan_stack_malloc_##class_id(uptr size) { \
|
||||
return OnMalloc(class_id, size); \
|
||||
} \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
|
||||
uptr ptr, uptr size, uptr real_stack) { \
|
||||
OnFree(ptr, class_id, size, real_stack); \
|
||||
uptr ptr, uptr size) { \
|
||||
OnFree(ptr, class_id, size); \
|
||||
}
|
||||
|
||||
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
|
||||
|
@ -239,15 +247,35 @@ SANITIZER_INTERFACE_ATTRIBUTE
|
|||
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
|
||||
void **end) {
|
||||
FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
|
||||
if (!fs) return 0;
|
||||
if (!fs) return nullptr;
|
||||
uptr frame_beg, frame_end;
|
||||
FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
|
||||
reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
|
||||
if (!frame) return 0;
|
||||
if (!frame) return nullptr;
|
||||
if (frame->magic != kCurrentStackFrameMagic)
|
||||
return 0;
|
||||
return nullptr;
|
||||
if (beg) *beg = reinterpret_cast<void*>(frame_beg);
|
||||
if (end) *end = reinterpret_cast<void*>(frame_end);
|
||||
return reinterpret_cast<void*>(frame->real_stack);
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_alloca_poison(uptr addr, uptr size) {
|
||||
uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
|
||||
uptr PartialRzAddr = addr + size;
|
||||
uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
|
||||
uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
|
||||
FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
|
||||
FastPoisonShadowPartialRightRedzone(
|
||||
PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
|
||||
RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
|
||||
FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_allocas_unpoison(uptr top, uptr bottom) {
|
||||
if ((!top) || (top > bottom)) return;
|
||||
REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
|
||||
(bottom - top) / SHADOW_GRANULARITY);
|
||||
}
|
||||
} // extern "C"
|
||||
|
|
177
libsanitizer/asan/asan_flags.cc
Normal file
177
libsanitizer/asan/asan_flags.cc
Normal file
|
@ -0,0 +1,177 @@
|
|||
//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// ASan flag parsing logic.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_activation.h"
|
||||
#include "asan_flags.h"
|
||||
#include "asan_interface_internal.h"
|
||||
#include "asan_stack.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_flag_parser.h"
|
||||
#include "ubsan/ubsan_flags.h"
|
||||
#include "ubsan/ubsan_platform.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
Flags asan_flags_dont_use_directly; // use via flags().
|
||||
|
||||
static const char *MaybeCallAsanDefaultOptions() {
|
||||
return (&__asan_default_options) ? __asan_default_options() : "";
|
||||
}
|
||||
|
||||
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
|
||||
#ifdef ASAN_DEFAULT_OPTIONS
|
||||
// Stringize the macro value.
|
||||
# define ASAN_STRINGIZE(x) #x
|
||||
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
|
||||
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
|
||||
#else
|
||||
return "";
|
||||
#endif
|
||||
}
|
||||
|
||||
void Flags::SetDefaults() {
|
||||
#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||
#include "asan_flags.inc"
|
||||
#undef ASAN_FLAG
|
||||
}
|
||||
|
||||
static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
|
||||
#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
|
||||
RegisterFlag(parser, #Name, Description, &f->Name);
|
||||
#include "asan_flags.inc"
|
||||
#undef ASAN_FLAG
|
||||
}
|
||||
|
||||
void InitializeFlags() {
|
||||
// Set the default values and prepare for parsing ASan and common flags.
|
||||
SetCommonFlagsDefaults();
|
||||
{
|
||||
CommonFlags cf;
|
||||
cf.CopyFrom(*common_flags());
|
||||
cf.detect_leaks = CAN_SANITIZE_LEAKS;
|
||||
cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
|
||||
cf.malloc_context_size = kDefaultMallocContextSize;
|
||||
cf.intercept_tls_get_addr = true;
|
||||
cf.exitcode = 1;
|
||||
OverrideCommonFlags(cf);
|
||||
}
|
||||
Flags *f = flags();
|
||||
f->SetDefaults();
|
||||
|
||||
FlagParser asan_parser;
|
||||
RegisterAsanFlags(&asan_parser, f);
|
||||
RegisterCommonFlags(&asan_parser);
|
||||
|
||||
// Set the default values and prepare for parsing LSan and UBSan flags
|
||||
// (which can also overwrite common flags).
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::Flags *lf = __lsan::flags();
|
||||
lf->SetDefaults();
|
||||
|
||||
FlagParser lsan_parser;
|
||||
__lsan::RegisterLsanFlags(&lsan_parser, lf);
|
||||
RegisterCommonFlags(&lsan_parser);
|
||||
#endif
|
||||
|
||||
#if CAN_SANITIZE_UB
|
||||
__ubsan::Flags *uf = __ubsan::flags();
|
||||
uf->SetDefaults();
|
||||
|
||||
FlagParser ubsan_parser;
|
||||
__ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
|
||||
RegisterCommonFlags(&ubsan_parser);
|
||||
#endif
|
||||
|
||||
// Override from ASan compile definition.
|
||||
const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
|
||||
asan_parser.ParseString(asan_compile_def);
|
||||
|
||||
// Override from user-specified string.
|
||||
const char *asan_default_options = MaybeCallAsanDefaultOptions();
|
||||
asan_parser.ParseString(asan_default_options);
|
||||
#if CAN_SANITIZE_UB
|
||||
const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
|
||||
ubsan_parser.ParseString(ubsan_default_options);
|
||||
#endif
|
||||
|
||||
// Override from command line.
|
||||
asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
lsan_parser.ParseString(GetEnv("LSAN_OPTIONS"));
|
||||
#endif
|
||||
#if CAN_SANITIZE_UB
|
||||
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
|
||||
#endif
|
||||
|
||||
// Let activation flags override current settings. On Android they come
|
||||
// from a system property. On other platforms this is no-op.
|
||||
if (!flags()->start_deactivated) {
|
||||
char buf[100];
|
||||
GetExtraActivationFlags(buf, sizeof(buf));
|
||||
asan_parser.ParseString(buf);
|
||||
}
|
||||
|
||||
SetVerbosity(common_flags()->verbosity);
|
||||
|
||||
// TODO(eugenis): dump all flags at verbosity>=2?
|
||||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
|
||||
if (common_flags()->help) {
|
||||
// TODO(samsonov): print all of the flags (ASan, LSan, common).
|
||||
asan_parser.PrintFlagDescriptions();
|
||||
}
|
||||
|
||||
// Flag validation:
|
||||
if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
|
||||
Report("%s: detect_leaks is not supported on this platform.\n",
|
||||
SanitizerToolName);
|
||||
Die();
|
||||
}
|
||||
// Make "strict_init_order" imply "check_initialization_order".
|
||||
// TODO(samsonov): Use a single runtime flag for an init-order checker.
|
||||
if (f->strict_init_order) {
|
||||
f->check_initialization_order = true;
|
||||
}
|
||||
CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
|
||||
CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
|
||||
CHECK_GE(f->redzone, 16);
|
||||
CHECK_GE(f->max_redzone, f->redzone);
|
||||
CHECK_LE(f->max_redzone, 2048);
|
||||
CHECK(IsPowerOfTwo(f->redzone));
|
||||
CHECK(IsPowerOfTwo(f->max_redzone));
|
||||
|
||||
// quarantine_size is deprecated but we still honor it.
|
||||
// quarantine_size can not be used together with quarantine_size_mb.
|
||||
if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
|
||||
Report("%s: please use either 'quarantine_size' (deprecated) or "
|
||||
"quarantine_size_mb, but not both\n", SanitizerToolName);
|
||||
Die();
|
||||
}
|
||||
if (f->quarantine_size >= 0)
|
||||
f->quarantine_size_mb = f->quarantine_size >> 20;
|
||||
if (f->quarantine_size_mb < 0) {
|
||||
const int kDefaultQuarantineSizeMb =
|
||||
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
|
||||
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
const char* __asan_default_options() { return ""; }
|
||||
} // extern "C"
|
||||
#endif
|
|
@ -14,6 +14,7 @@
|
|||
#define ASAN_FLAGS_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_flag_parser.h"
|
||||
|
||||
// ASan flag values can be defined in four ways:
|
||||
// 1) initialized with default values at startup.
|
||||
|
@ -22,55 +23,24 @@
|
|||
// 3) overriden from string returned by user-specified function
|
||||
// __asan_default_options().
|
||||
// 4) overriden from env variable ASAN_OPTIONS.
|
||||
// 5) overriden during ASan activation (for now used on Android only).
|
||||
|
||||
namespace __asan {
|
||||
|
||||
struct Flags {
|
||||
// Flag descriptions are in asan_rtl.cc.
|
||||
int quarantine_size;
|
||||
int redzone;
|
||||
int max_redzone;
|
||||
bool debug;
|
||||
int report_globals;
|
||||
bool check_initialization_order;
|
||||
bool replace_str;
|
||||
bool replace_intrin;
|
||||
bool mac_ignore_invalid_free;
|
||||
bool detect_stack_use_after_return;
|
||||
int min_uar_stack_size_log;
|
||||
int max_uar_stack_size_log;
|
||||
bool uar_noreserve;
|
||||
int max_malloc_fill_size, malloc_fill_byte;
|
||||
int exitcode;
|
||||
bool allow_user_poisoning;
|
||||
int sleep_before_dying;
|
||||
bool check_malloc_usable_size;
|
||||
bool unmap_shadow_on_exit;
|
||||
bool abort_on_error;
|
||||
bool print_stats;
|
||||
bool print_legend;
|
||||
bool atexit;
|
||||
bool allow_reexec;
|
||||
bool print_full_thread_history;
|
||||
bool poison_heap;
|
||||
bool poison_partial;
|
||||
bool poison_array_cookie;
|
||||
bool alloc_dealloc_mismatch;
|
||||
bool new_delete_type_mismatch;
|
||||
bool strict_memcmp;
|
||||
bool strict_init_order;
|
||||
bool start_deactivated;
|
||||
int detect_invalid_pointer_pairs;
|
||||
bool detect_container_overflow;
|
||||
int detect_odr_violation;
|
||||
bool dump_instruction_bytes;
|
||||
#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
||||
#include "asan_flags.inc"
|
||||
#undef ASAN_FLAG
|
||||
|
||||
void SetDefaults();
|
||||
};
|
||||
|
||||
extern Flags asan_flags_dont_use_directly;
|
||||
inline Flags *flags() {
|
||||
return &asan_flags_dont_use_directly;
|
||||
}
|
||||
void InitializeFlags(Flags *f, const char *env);
|
||||
|
||||
void InitializeFlags();
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
|
|
134
libsanitizer/asan/asan_flags.inc
Normal file
134
libsanitizer/asan/asan_flags.inc
Normal file
|
@ -0,0 +1,134 @@
|
|||
//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// ASan runtime flags.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef ASAN_FLAG
|
||||
# error "Define ASAN_FLAG prior to including this file!"
|
||||
#endif
|
||||
|
||||
// ASAN_FLAG(Type, Name, DefaultValue, Description)
|
||||
// See COMMON_FLAG in sanitizer_flags.inc for more details.
|
||||
|
||||
ASAN_FLAG(int, quarantine_size, -1,
|
||||
"Deprecated, please use quarantine_size_mb.")
|
||||
ASAN_FLAG(int, quarantine_size_mb, -1,
|
||||
"Size (in Mb) of quarantine used to detect use-after-free "
|
||||
"errors. Lower value may reduce memory usage but increase the "
|
||||
"chance of false negatives.")
|
||||
ASAN_FLAG(int, redzone, 16,
|
||||
"Minimal size (in bytes) of redzones around heap objects. "
|
||||
"Requirement: redzone >= 16, is a power of two.")
|
||||
ASAN_FLAG(int, max_redzone, 2048,
|
||||
"Maximal size (in bytes) of redzones around heap objects.")
|
||||
ASAN_FLAG(
|
||||
bool, debug, false,
|
||||
"If set, prints some debugging information and does additional checks.")
|
||||
ASAN_FLAG(
|
||||
int, report_globals, 1,
|
||||
"Controls the way to handle globals (0 - don't detect buffer overflow on "
|
||||
"globals, 1 - detect buffer overflow, 2 - print data about registered "
|
||||
"globals).")
|
||||
ASAN_FLAG(bool, check_initialization_order, false,
|
||||
"If set, attempts to catch initialization order issues.")
|
||||
ASAN_FLAG(
|
||||
bool, replace_str, true,
|
||||
"If set, uses custom wrappers and replacements for libc string functions "
|
||||
"to find more errors.")
|
||||
ASAN_FLAG(bool, replace_intrin, true,
|
||||
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
|
||||
ASAN_FLAG(bool, mac_ignore_invalid_free, false,
|
||||
"Ignore invalid free() calls to work around some bugs. Used on OS X "
|
||||
"only.")
|
||||
ASAN_FLAG(bool, detect_stack_use_after_return, false,
|
||||
"Enables stack-use-after-return checking at run-time.")
|
||||
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
|
||||
"Minimum fake stack size log.")
|
||||
ASAN_FLAG(int, max_uar_stack_size_log,
|
||||
20, // 1Mb per size class, i.e. ~11Mb per thread
|
||||
"Maximum fake stack size log.")
|
||||
ASAN_FLAG(bool, uar_noreserve, false,
|
||||
"Use mmap with 'noreserve' flag to allocate fake stack.")
|
||||
ASAN_FLAG(
|
||||
int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
|
||||
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
|
||||
"bytes that will be filled with malloc_fill_byte on malloc.")
|
||||
ASAN_FLAG(int, malloc_fill_byte, 0xbe,
|
||||
"Value used to fill the newly allocated memory.")
|
||||
ASAN_FLAG(bool, allow_user_poisoning, true,
|
||||
"If set, user may manually mark memory regions as poisoned or "
|
||||
"unpoisoned.")
|
||||
ASAN_FLAG(
|
||||
int, sleep_before_dying, 0,
|
||||
"Number of seconds to sleep between printing an error report and "
|
||||
"terminating the program. Useful for debugging purposes (e.g. when one "
|
||||
"needs to attach gdb).")
|
||||
ASAN_FLAG(bool, check_malloc_usable_size, true,
|
||||
"Allows the users to work around the bug in Nvidia drivers prior to "
|
||||
"295.*.")
|
||||
ASAN_FLAG(bool, unmap_shadow_on_exit, false,
|
||||
"If set, explicitly unmaps the (huge) shadow at exit.")
|
||||
ASAN_FLAG(bool, print_stats, false,
|
||||
"Print various statistics after printing an error message or if "
|
||||
"atexit=1.")
|
||||
ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
|
||||
ASAN_FLAG(bool, atexit, false,
|
||||
"If set, prints ASan exit stats even after program terminates "
|
||||
"successfully.")
|
||||
ASAN_FLAG(
|
||||
bool, print_full_thread_history, true,
|
||||
"If set, prints thread creation stacks for the threads involved in the "
|
||||
"report and their ancestors up to the main thread.")
|
||||
ASAN_FLAG(
|
||||
bool, poison_heap, true,
|
||||
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
|
||||
"for benchmarking the allocator or instrumentator.")
|
||||
ASAN_FLAG(bool, poison_partial, true,
|
||||
"If true, poison partially addressable 8-byte aligned words "
|
||||
"(default=true). This flag affects heap and global buffers, but not "
|
||||
"stack buffers.")
|
||||
ASAN_FLAG(bool, poison_array_cookie, true,
|
||||
"Poison (or not) the array cookie after operator new[].")
|
||||
|
||||
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
|
||||
// TODO(glider,timurrrr): Fix known issues and enable this back.
|
||||
ASAN_FLAG(bool, alloc_dealloc_mismatch,
|
||||
(SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
|
||||
"Report errors on malloc/delete, new/free, new/delete[], etc.")
|
||||
|
||||
ASAN_FLAG(bool, new_delete_type_mismatch, true,
|
||||
"Report errors on mismatch betwen size of new and delete.")
|
||||
ASAN_FLAG(
|
||||
bool, strict_init_order, false,
|
||||
"If true, assume that dynamic initializers can never access globals from "
|
||||
"other modules, even if the latter are already initialized.")
|
||||
ASAN_FLAG(
|
||||
bool, start_deactivated, false,
|
||||
"If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
|
||||
"poisoning) to reduce memory consumption as much as possible, and "
|
||||
"restores them to original values when the first instrumented module is "
|
||||
"loaded into the process. This is mainly intended to be used on "
|
||||
"Android. ")
|
||||
ASAN_FLAG(
|
||||
int, detect_invalid_pointer_pairs, 0,
|
||||
"If non-zero, try to detect operations like <, <=, >, >= and - on "
|
||||
"invalid pointer pairs (e.g. when pointers belong to different objects). "
|
||||
"The bigger the value the harder we try.")
|
||||
ASAN_FLAG(
|
||||
bool, detect_container_overflow, true,
|
||||
"If true, honor the container overflow annotations. "
|
||||
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
|
||||
ASAN_FLAG(int, detect_odr_violation, 2,
|
||||
"If >=2, detect violation of One-Definition-Rule (ODR); "
|
||||
"If ==1, detect ODR-violation only if the two variables "
|
||||
"have different sizes")
|
||||
ASAN_FLAG(bool, dump_instruction_bytes, false,
|
||||
"If true, dump 16 bytes starting at the instruction that caused SEGV")
|
||||
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
|
|
@ -9,6 +9,7 @@
|
|||
//
|
||||
// Handle globals.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
|
@ -16,6 +17,7 @@
|
|||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_suppressions.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
|
@ -71,7 +73,7 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) {
|
|||
|
||||
const uptr kMinimalDistanceFromAnotherGlobal = 64;
|
||||
|
||||
bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
|
||||
static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
|
||||
if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
|
||||
if (addr >= g.beg + g.size_with_redzone) return false;
|
||||
return true;
|
||||
|
@ -88,46 +90,8 @@ static void ReportGlobal(const Global &g, const char *prefix) {
|
|||
}
|
||||
}
|
||||
|
||||
static bool DescribeOrGetInfoIfGlobal(uptr addr, uptr size, bool print,
|
||||
Global *output_global) {
|
||||
if (!flags()->report_globals) return false;
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
bool res = false;
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
const Global &g = *l->g;
|
||||
if (print) {
|
||||
if (flags()->report_globals >= 2)
|
||||
ReportGlobal(g, "Search");
|
||||
res |= DescribeAddressRelativeToGlobal(addr, size, g);
|
||||
} else {
|
||||
if (IsAddressNearGlobal(addr, g)) {
|
||||
CHECK(output_global);
|
||||
*output_global = g;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool DescribeAddressIfGlobal(uptr addr, uptr size) {
|
||||
return DescribeOrGetInfoIfGlobal(addr, size, /* print */ true,
|
||||
/* output_global */ nullptr);
|
||||
}
|
||||
|
||||
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
|
||||
Global g = {};
|
||||
if (DescribeOrGetInfoIfGlobal(addr, /* size */ 1, /* print */ false, &g)) {
|
||||
internal_strncpy(descr->name, g.name, descr->name_size);
|
||||
descr->region_address = g.beg;
|
||||
descr->region_size = g.size;
|
||||
descr->region_kind = "global";
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
u32 FindRegistrationSite(const Global *g) {
|
||||
static u32 FindRegistrationSite(const Global *g) {
|
||||
mu_for_globals.CheckLocked();
|
||||
CHECK(global_registration_site_vector);
|
||||
for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) {
|
||||
GlobalRegistrationSite &grs = (*global_registration_site_vector)[i];
|
||||
|
@ -137,6 +101,38 @@ u32 FindRegistrationSite(const Global *g) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
|
||||
int max_globals) {
|
||||
if (!flags()->report_globals) return 0;
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
int res = 0;
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
const Global &g = *l->g;
|
||||
if (flags()->report_globals >= 2)
|
||||
ReportGlobal(g, "Search");
|
||||
if (IsAddressNearGlobal(addr, g)) {
|
||||
globals[res] = g;
|
||||
if (reg_sites)
|
||||
reg_sites[res] = FindRegistrationSite(&g);
|
||||
res++;
|
||||
if (res == max_globals) break;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
|
||||
Global g = {};
|
||||
if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
|
||||
internal_strncpy(descr->name, g.name, descr->name_size);
|
||||
descr->region_address = g.beg;
|
||||
descr->region_size = g.size;
|
||||
descr->region_kind = "global";
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Register a global variable.
|
||||
// This function may be called more than once for every global
|
||||
// so we store the globals in a map.
|
||||
|
@ -148,9 +144,7 @@ static void RegisterGlobal(const Global *g) {
|
|||
CHECK(AddrIsInMem(g->beg));
|
||||
CHECK(AddrIsAlignedByGranularity(g->beg));
|
||||
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
|
||||
// This "ODR violation" detection is fundamentally incompatible with
|
||||
// how GCC registers globals. Disable as useless until rewritten upstream.
|
||||
if (0 && flags()->detect_odr_violation) {
|
||||
if (flags()->detect_odr_violation) {
|
||||
// Try detecting ODR (One Definition Rule) violation, i.e. the situation
|
||||
// where two globals with the same name are defined in different modules.
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
|
@ -158,20 +152,21 @@ static void RegisterGlobal(const Global *g) {
|
|||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size))
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (flags()->poison_heap)
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
|
||||
l->g = g;
|
||||
l->next = list_of_all_globals;
|
||||
list_of_all_globals = l;
|
||||
if (g->has_dynamic_init) {
|
||||
if (dynamic_init_globals == 0) {
|
||||
if (!dynamic_init_globals) {
|
||||
dynamic_init_globals = new(allocator_for_globals)
|
||||
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity);
|
||||
}
|
||||
|
@ -182,11 +177,13 @@ static void RegisterGlobal(const Global *g) {
|
|||
|
||||
static void UnregisterGlobal(const Global *g) {
|
||||
CHECK(asan_inited);
|
||||
if (flags()->report_globals >= 2)
|
||||
ReportGlobal(*g, "Removed");
|
||||
CHECK(flags()->report_globals);
|
||||
CHECK(AddrIsInMem(g->beg));
|
||||
CHECK(AddrIsAlignedByGranularity(g->beg));
|
||||
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
|
||||
if (flags()->poison_heap)
|
||||
if (CanPoisonMemory())
|
||||
PoisonShadowForGlobal(g, 0);
|
||||
// We unpoison the shadow memory for the global but we do not remove it from
|
||||
// the list because that would require O(n^2) time with the current list
|
||||
|
@ -208,7 +205,7 @@ void StopInitOrderChecking() {
|
|||
}
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
@ -216,7 +213,7 @@ using namespace __asan; // NOLINT
|
|||
// Register an array of globals.
|
||||
void __asan_register_globals(__asan_global *globals, uptr n) {
|
||||
if (!flags()->report_globals) return;
|
||||
GET_STACK_TRACE_FATAL_HERE;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
u32 stack_id = StackDepotPut(stack);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
if (!global_registration_site_vector)
|
||||
|
@ -249,7 +246,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
|
|||
// initializer can only touch global variables in the same TU.
|
||||
void __asan_before_dynamic_init(const char *module_name) {
|
||||
if (!flags()->check_initialization_order ||
|
||||
!flags()->poison_heap)
|
||||
!CanPoisonMemory())
|
||||
return;
|
||||
bool strict_init_order = flags()->strict_init_order;
|
||||
CHECK(dynamic_init_globals);
|
||||
|
@ -275,7 +272,7 @@ void __asan_before_dynamic_init(const char *module_name) {
|
|||
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
|
||||
void __asan_after_dynamic_init() {
|
||||
if (!flags()->check_initialization_order ||
|
||||
!flags()->poison_heap)
|
||||
!CanPoisonMemory())
|
||||
return;
|
||||
CHECK(asan_inited);
|
||||
BlockingMutexLock lock(&mu_for_globals);
|
||||
|
|
|
@ -23,8 +23,10 @@ extern "C" {
|
|||
// contains the function PC as the 3-rd field (see
|
||||
// DescribeAddressIfStack).
|
||||
// v3=>v4: added '__asan_global_source_location' to __asan_global.
|
||||
#define __asan_init __asan_init_v4
|
||||
#define __asan_init_name "__asan_init_v4"
|
||||
// v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
|
||||
// __asan_stack_free_ functions.
|
||||
// v5=>v6: changed the name of the version check symbol
|
||||
#define __asan_version_mismatch_check __asan_version_mismatch_check_v6
|
||||
}
|
||||
|
||||
#endif // ASAN_INIT_VERSION_H
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
//
|
||||
// Intercept various libc functions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "asan_interceptors.h"
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
|
@ -18,8 +18,19 @@
|
|||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_suppressions.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
#if SANITIZER_POSIX
|
||||
#include "sanitizer_common/sanitizer_posix.h"
|
||||
#endif
|
||||
|
||||
#if defined(__i386) && SANITIZER_LINUX
|
||||
#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1"
|
||||
#elif defined(__mips__) && SANITIZER_LINUX
|
||||
#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2"
|
||||
#endif
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// Return true if we can quickly decide that the region is unpoisoned.
|
||||
|
@ -32,12 +43,16 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
|
|||
return false;
|
||||
}
|
||||
|
||||
struct AsanInterceptorContext {
|
||||
const char *interceptor_name;
|
||||
};
|
||||
|
||||
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
|
||||
// and ASAN_WRITE_RANGE as macro instead of function so
|
||||
// that no extra frames are created, and stack trace contains
|
||||
// relevant information only.
|
||||
// We check all shadow bytes.
|
||||
#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
|
||||
#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
|
||||
uptr __offset = (uptr)(offset); \
|
||||
uptr __size = (uptr)(size); \
|
||||
uptr __bad = 0; \
|
||||
|
@ -47,13 +62,33 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
|
|||
} \
|
||||
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
|
||||
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
|
||||
GET_CURRENT_PC_BP_SP; \
|
||||
__asan_report_error(pc, bp, sp, __bad, isWrite, __size); \
|
||||
AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
|
||||
bool suppressed = false; \
|
||||
if (_ctx) { \
|
||||
suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
|
||||
if (!suppressed && HaveStackTraceBasedSuppressions()) { \
|
||||
GET_STACK_TRACE_FATAL_HERE; \
|
||||
suppressed = IsStackTraceSuppressed(&stack); \
|
||||
} \
|
||||
} \
|
||||
if (!suppressed) { \
|
||||
GET_CURRENT_PC_BP_SP; \
|
||||
__asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0); \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false)
|
||||
#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true)
|
||||
#define ASAN_READ_RANGE(ctx, offset, size) \
|
||||
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
|
||||
#define ASAN_WRITE_RANGE(ctx, offset, size) \
|
||||
ACCESS_MEMORY_RANGE(ctx, offset, size, true)
|
||||
|
||||
#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \
|
||||
ASAN_READ_RANGE((ctx), (s), \
|
||||
common_flags()->strict_string_checks ? (len) + 1 : (n))
|
||||
|
||||
#define ASAN_READ_STRING(ctx, s, n) \
|
||||
ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
|
||||
|
||||
// Behavior of functions like "memcpy" or "strcpy" is undefined
|
||||
// if memory intervals overlap. We report error in this case.
|
||||
|
@ -74,7 +109,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
|
|||
|
||||
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
|
||||
#if ASAN_INTERCEPT_STRNLEN
|
||||
if (REAL(strnlen) != 0) {
|
||||
if (REAL(strnlen)) {
|
||||
return REAL(strnlen)(s, maxlen);
|
||||
}
|
||||
#endif
|
||||
|
@ -92,7 +127,7 @@ int OnExit() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
// ---------------------- Wrappers ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
@ -100,31 +135,28 @@ using namespace __asan; // NOLINT
|
|||
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
|
||||
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
|
||||
#if !SANITIZER_MAC
|
||||
#define ASAN_INTERCEPT_FUNC(name) \
|
||||
do { \
|
||||
if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \
|
||||
VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \
|
||||
} while (0)
|
||||
#else
|
||||
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
|
||||
#define ASAN_INTERCEPT_FUNC(name)
|
||||
#endif // SANITIZER_MAC
|
||||
#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
|
||||
AsanInterceptorContext _ctx = {#func}; \
|
||||
ctx = (void *)&_ctx; \
|
||||
(void) ctx; \
|
||||
|
||||
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
|
||||
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
|
||||
ASAN_WRITE_RANGE(ptr, size)
|
||||
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
|
||||
ASAN_WRITE_RANGE(ctx, ptr, size)
|
||||
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
|
||||
ASAN_READ_RANGE(ctx, ptr, size)
|
||||
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, func); \
|
||||
do { \
|
||||
if (asan_init_is_running) \
|
||||
return REAL(func)(__VA_ARGS__); \
|
||||
ctx = 0; \
|
||||
(void) ctx; \
|
||||
if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
|
||||
return REAL(func)(__VA_ARGS__); \
|
||||
ENSURE_ASAN_INITED(); \
|
||||
} while (false)
|
||||
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
|
||||
do { \
|
||||
} while (false)
|
||||
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
|
||||
do { \
|
||||
} while (false)
|
||||
|
@ -143,14 +175,30 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
|||
do { \
|
||||
} while (false)
|
||||
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
|
||||
// Strict init-order checking is dlopen-hostile:
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=178
|
||||
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
|
||||
if (flags()->strict_init_order) { \
|
||||
StopInitOrderChecking(); \
|
||||
}
|
||||
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
|
||||
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping()
|
||||
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping()
|
||||
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
|
||||
CoverageUpdateMapping()
|
||||
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
|
||||
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
|
||||
#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
|
||||
if (AsanThread *t = GetCurrentThread()) { \
|
||||
*begin = t->tls_begin(); \
|
||||
*end = t->tls_end(); \
|
||||
} else { \
|
||||
*begin = *end = 0; \
|
||||
}
|
||||
#include "sanitizer_common/sanitizer_common_interceptors.inc"
|
||||
|
||||
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
|
||||
#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s)
|
||||
// Syscall interceptors don't have contexts, we don't support suppressions
|
||||
// for them.
|
||||
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s)
|
||||
#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s)
|
||||
#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
|
||||
do { \
|
||||
(void)(p); \
|
||||
|
@ -163,56 +211,81 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
|||
} while (false)
|
||||
#include "sanitizer_common/sanitizer_common_syscalls.inc"
|
||||
|
||||
struct ThreadStartParam {
|
||||
atomic_uintptr_t t;
|
||||
atomic_uintptr_t is_registered;
|
||||
};
|
||||
|
||||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
AsanThread *t = (AsanThread*)arg;
|
||||
ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
|
||||
AsanThread *t = nullptr;
|
||||
while ((t = reinterpret_cast<AsanThread *>(
|
||||
atomic_load(¶m->t, memory_order_acquire))) == nullptr)
|
||||
internal_sched_yield();
|
||||
SetCurrentThread(t);
|
||||
return t->ThreadStart(GetTid());
|
||||
return t->ThreadStart(GetTid(), ¶m->is_registered);
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
INTERCEPTOR(int, pthread_create, void *thread,
|
||||
void *attr, void *(*start_routine)(void*), void *arg) {
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
// Strict init-order checking in thread-hostile.
|
||||
// Strict init-order checking is thread-hostile.
|
||||
if (flags()->strict_init_order)
|
||||
StopInitOrderChecking();
|
||||
GET_STACK_TRACE_THREAD;
|
||||
int detached = 0;
|
||||
if (attr != 0)
|
||||
if (attr)
|
||||
REAL(pthread_attr_getdetachstate)(attr, &detached);
|
||||
|
||||
u32 current_tid = GetCurrentTidOrInvalid();
|
||||
AsanThread *t = AsanThread::Create(start_routine, arg);
|
||||
CreateThreadContextArgs args = { t, &stack };
|
||||
asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
|
||||
return REAL(pthread_create)(thread, attr, asan_thread_start, t);
|
||||
ThreadStartParam param;
|
||||
atomic_store(¶m.t, 0, memory_order_relaxed);
|
||||
atomic_store(¶m.is_registered, 0, memory_order_relaxed);
|
||||
int result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m);
|
||||
if (result == 0) {
|
||||
u32 current_tid = GetCurrentTidOrInvalid();
|
||||
AsanThread *t =
|
||||
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
|
||||
atomic_store(¶m.t, reinterpret_cast<uptr>(t), memory_order_release);
|
||||
// Wait until the AsanThread object is initialized and the ThreadRegistry
|
||||
// entry is in "started" state. One reason for this is that after this
|
||||
// interceptor exits, the child thread's stack may be the only thing holding
|
||||
// the |arg| pointer. This may cause LSan to report a leak if leak checking
|
||||
// happens at a point when the interceptor has already exited, but the stack
|
||||
// range for the child thread is not yet known.
|
||||
while (atomic_load(¶m.is_registered, memory_order_acquire) == 0)
|
||||
internal_sched_yield();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, pthread_join, void *t, void **arg) {
|
||||
return real_pthread_join(t, arg);
|
||||
}
|
||||
|
||||
DEFINE_REAL_PTHREAD_FUNCTIONS
|
||||
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
|
||||
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
|
||||
if (!AsanInterceptsSignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
return REAL(bsd_signal)(signum, handler);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
INTERCEPTOR(void*, signal, int signum, void *handler) {
|
||||
if (!AsanInterceptsSignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
return REAL(signal)(signum, handler);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
INTERCEPTOR(void*, signal, int signum, void *handler) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
return REAL(signal)(signum, handler);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
|
||||
struct sigaction *oldact) {
|
||||
if (!AsanInterceptsSignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
return REAL(sigaction)(signum, act, oldact);
|
||||
}
|
||||
return 0;
|
||||
|
@ -220,10 +293,10 @@ INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
|
|||
|
||||
namespace __sanitizer {
|
||||
int real_sigaction(int signum, const void *act, void *oldact) {
|
||||
return REAL(sigaction)(signum,
|
||||
(struct sigaction *)act, (struct sigaction *)oldact);
|
||||
return REAL(sigaction)(signum, (const struct sigaction *)act,
|
||||
(struct sigaction *)oldact);
|
||||
}
|
||||
} // namespace __sanitizer
|
||||
} // namespace __sanitizer
|
||||
|
||||
#elif SANITIZER_POSIX
|
||||
// We need to have defined REAL(sigaction) on posix systems.
|
||||
|
@ -239,7 +312,7 @@ static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
|
|||
ssize += stack - bottom;
|
||||
ssize = RoundUpTo(ssize, PageSize);
|
||||
static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb
|
||||
if (ssize && ssize <= kMaxSaneContextStackSize) {
|
||||
if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) {
|
||||
PoisonShadow(bottom, ssize, 0);
|
||||
}
|
||||
}
|
||||
|
@ -294,113 +367,73 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
|
|||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_WINDOWS
|
||||
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(RaiseException));
|
||||
__asan_handle_no_return();
|
||||
REAL(RaiseException)(a, b, c, d);
|
||||
}
|
||||
// memcpy is called during __asan_init() from the internals of printf(...).
|
||||
// We do not treat memcpy with to==from as a bug.
|
||||
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
|
||||
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
|
||||
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
|
||||
if (asan_init_is_running) { \
|
||||
return REAL(memcpy)(to, from, size); \
|
||||
} \
|
||||
ENSURE_ASAN_INITED(); \
|
||||
if (flags()->replace_intrin) { \
|
||||
if (to != from) { \
|
||||
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
|
||||
} \
|
||||
ASAN_READ_RANGE(ctx, from, size); \
|
||||
ASAN_WRITE_RANGE(ctx, to, size); \
|
||||
} \
|
||||
return REAL(memcpy)(to, from, size); \
|
||||
} while (0)
|
||||
|
||||
INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(_except_handler3));
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler3)(a, b, c, d);
|
||||
}
|
||||
|
||||
#if ASAN_DYNAMIC
|
||||
// This handler is named differently in -MT and -MD CRTs.
|
||||
#define _except_handler4 _except_handler4_common
|
||||
#endif
|
||||
INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(_except_handler4));
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler4)(a, b, c, d);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int CharCmp(unsigned char c1, unsigned char c2) {
|
||||
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
|
||||
if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (flags()->replace_intrin) {
|
||||
if (flags()->strict_memcmp) {
|
||||
// Check the entire regions even if the first bytes of the buffers are
|
||||
// different.
|
||||
ASAN_READ_RANGE(a1, size);
|
||||
ASAN_READ_RANGE(a2, size);
|
||||
// Fallthrough to REAL(memcmp) below.
|
||||
} else {
|
||||
unsigned char c1 = 0, c2 = 0;
|
||||
const unsigned char *s1 = (const unsigned char*)a1;
|
||||
const unsigned char *s2 = (const unsigned char*)a2;
|
||||
uptr i;
|
||||
for (i = 0; i < size; i++) {
|
||||
c1 = s1[i];
|
||||
c2 = s2[i];
|
||||
if (c1 != c2) break;
|
||||
}
|
||||
ASAN_READ_RANGE(s1, Min(i + 1, size));
|
||||
ASAN_READ_RANGE(s2, Min(i + 1, size));
|
||||
return CharCmp(c1, c2);
|
||||
}
|
||||
}
|
||||
return REAL(memcmp(a1, a2, size));
|
||||
}
|
||||
|
||||
void *__asan_memcpy(void *to, const void *from, uptr size) {
|
||||
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size);
|
||||
// memcpy is called during __asan_init() from the internals
|
||||
// of printf(...).
|
||||
if (asan_init_is_running) {
|
||||
return REAL(memcpy)(to, from, size);
|
||||
}
|
||||
ENSURE_ASAN_INITED();
|
||||
if (flags()->replace_intrin) {
|
||||
if (to != from) {
|
||||
// We do not treat memcpy with to==from as a bug.
|
||||
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
|
||||
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
|
||||
}
|
||||
ASAN_READ_RANGE(from, size);
|
||||
ASAN_WRITE_RANGE(to, size);
|
||||
}
|
||||
return REAL(memcpy)(to, from, size);
|
||||
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
|
||||
}
|
||||
|
||||
// memset is called inside Printf.
|
||||
#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
|
||||
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
|
||||
if (asan_init_is_running) { \
|
||||
return REAL(memset)(block, c, size); \
|
||||
} \
|
||||
ENSURE_ASAN_INITED(); \
|
||||
if (flags()->replace_intrin) { \
|
||||
ASAN_WRITE_RANGE(ctx, block, size); \
|
||||
} \
|
||||
return REAL(memset)(block, c, size); \
|
||||
} while (0)
|
||||
|
||||
void *__asan_memset(void *block, int c, uptr size) {
|
||||
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size);
|
||||
// memset is called inside Printf.
|
||||
if (asan_init_is_running) {
|
||||
return REAL(memset)(block, c, size);
|
||||
}
|
||||
ENSURE_ASAN_INITED();
|
||||
if (flags()->replace_intrin) {
|
||||
ASAN_WRITE_RANGE(block, size);
|
||||
}
|
||||
return REAL(memset)(block, c, size);
|
||||
ASAN_MEMSET_IMPL(nullptr, block, c, size);
|
||||
}
|
||||
|
||||
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
|
||||
if (UNLIKELY(!asan_inited)) \
|
||||
return internal_memmove(to, from, size); \
|
||||
ENSURE_ASAN_INITED(); \
|
||||
if (flags()->replace_intrin) { \
|
||||
ASAN_READ_RANGE(ctx, from, size); \
|
||||
ASAN_WRITE_RANGE(ctx, to, size); \
|
||||
} \
|
||||
return internal_memmove(to, from, size); \
|
||||
} while (0)
|
||||
|
||||
void *__asan_memmove(void *to, const void *from, uptr size) {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
return internal_memmove(to, from, size);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (flags()->replace_intrin) {
|
||||
ASAN_READ_RANGE(from, size);
|
||||
ASAN_WRITE_RANGE(to, size);
|
||||
}
|
||||
return internal_memmove(to, from, size);
|
||||
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
|
||||
return __asan_memmove(to, from, size);
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memmove);
|
||||
ASAN_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
|
||||
#if !SANITIZER_MAC
|
||||
return __asan_memcpy(to, from, size);
|
||||
ASAN_MEMCPY_IMPL(ctx, to, from, size);
|
||||
#else
|
||||
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
|
||||
// with WRAP(memcpy). As a result, false positives are reported for memmove()
|
||||
|
@ -408,15 +441,19 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
|
|||
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
|
||||
// internal_memcpy(), which may lead to crashes, see
|
||||
// http://llvm.org/bugs/show_bug.cgi?id=16362.
|
||||
return __asan_memmove(to, from, size);
|
||||
ASAN_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
#endif // !SANITIZER_MAC
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
|
||||
return __asan_memset(block, c, size);
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, memset);
|
||||
ASAN_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(char*, strchr, const char *str, int c) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strchr);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
|
||||
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
|
||||
// used.
|
||||
|
@ -426,8 +463,9 @@ INTERCEPTOR(char*, strchr, const char *str, int c) {
|
|||
ENSURE_ASAN_INITED();
|
||||
char *result = REAL(strchr)(str, c);
|
||||
if (flags()->replace_str) {
|
||||
uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1;
|
||||
ASAN_READ_RANGE(str, bytes_read);
|
||||
uptr len = REAL(strlen)(str);
|
||||
uptr bytes_read = (result ? result - str : len) + 1;
|
||||
ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -449,13 +487,15 @@ DEFINE_REAL(char*, index, const char *string, int c)
|
|||
// For both strcat() and strncat() we need to check the validity of |to|
|
||||
// argument irrespective of the |from| length.
|
||||
INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT
|
||||
ENSURE_ASAN_INITED();
|
||||
if (flags()->replace_str) {
|
||||
uptr from_length = REAL(strlen)(from);
|
||||
ASAN_READ_RANGE(from, from_length + 1);
|
||||
ASAN_READ_RANGE(ctx, from, from_length + 1);
|
||||
uptr to_length = REAL(strlen)(to);
|
||||
ASAN_READ_RANGE(to, to_length);
|
||||
ASAN_WRITE_RANGE(to + to_length, from_length + 1);
|
||||
ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
|
||||
ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
|
||||
// If the copying actually happens, the |from| string should not overlap
|
||||
// with the resulting string starting at |to|, which has a length of
|
||||
// to_length + from_length + 1.
|
||||
|
@ -468,14 +508,16 @@ INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
|
|||
}
|
||||
|
||||
INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strncat);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (flags()->replace_str) {
|
||||
uptr from_length = MaybeRealStrnlen(from, size);
|
||||
uptr copy_length = Min(size, from_length + 1);
|
||||
ASAN_READ_RANGE(from, copy_length);
|
||||
ASAN_READ_RANGE(ctx, from, copy_length);
|
||||
uptr to_length = REAL(strlen)(to);
|
||||
ASAN_READ_RANGE(to, to_length);
|
||||
ASAN_WRITE_RANGE(to + to_length, from_length + 1);
|
||||
ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
|
||||
ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
|
||||
if (from_length > 0) {
|
||||
CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1,
|
||||
from, copy_length);
|
||||
|
@ -485,6 +527,8 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
|
|||
}
|
||||
|
||||
INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT
|
||||
#if SANITIZER_MAC
|
||||
if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT
|
||||
#endif
|
||||
|
@ -497,19 +541,21 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
|
|||
if (flags()->replace_str) {
|
||||
uptr from_size = REAL(strlen)(from) + 1;
|
||||
CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
|
||||
ASAN_READ_RANGE(from, from_size);
|
||||
ASAN_WRITE_RANGE(to, from_size);
|
||||
ASAN_READ_RANGE(ctx, from, from_size);
|
||||
ASAN_WRITE_RANGE(ctx, to, from_size);
|
||||
}
|
||||
return REAL(strcpy)(to, from); // NOLINT
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_STRDUP
|
||||
INTERCEPTOR(char*, strdup, const char *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strdup(s);
|
||||
ENSURE_ASAN_INITED();
|
||||
uptr length = REAL(strlen)(s);
|
||||
if (flags()->replace_str) {
|
||||
ASAN_READ_RANGE(s, length + 1);
|
||||
ASAN_READ_RANGE(ctx, s, length + 1);
|
||||
}
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *new_mem = asan_malloc(length + 1, &stack);
|
||||
|
@ -519,6 +565,8 @@ INTERCEPTOR(char*, strdup, const char *s) {
|
|||
#endif
|
||||
|
||||
INTERCEPTOR(SIZE_T, strlen, const char *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strlen);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strlen(s);
|
||||
// strlen is called from malloc_default_purgeable_zone()
|
||||
// in __asan::ReplaceSystemAlloc() on Mac.
|
||||
|
@ -528,78 +576,65 @@ INTERCEPTOR(SIZE_T, strlen, const char *s) {
|
|||
ENSURE_ASAN_INITED();
|
||||
SIZE_T length = REAL(strlen)(s);
|
||||
if (flags()->replace_str) {
|
||||
ASAN_READ_RANGE(s, length + 1);
|
||||
ASAN_READ_RANGE(ctx, s, length + 1);
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
|
||||
SIZE_T length = REAL(wcslen)(s);
|
||||
if (!asan_init_is_running) {
|
||||
ENSURE_ASAN_INITED();
|
||||
ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t));
|
||||
ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (flags()->replace_str) {
|
||||
uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
|
||||
CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
|
||||
ASAN_READ_RANGE(from, from_size);
|
||||
ASAN_WRITE_RANGE(to, size);
|
||||
ASAN_READ_RANGE(ctx, from, from_size);
|
||||
ASAN_WRITE_RANGE(ctx, to, size);
|
||||
}
|
||||
return REAL(strncpy)(to, from, size);
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_STRNLEN
|
||||
INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
|
||||
ENSURE_ASAN_INITED();
|
||||
uptr length = REAL(strnlen)(s, maxlen);
|
||||
if (flags()->replace_str) {
|
||||
ASAN_READ_RANGE(s, Min(length + 1, maxlen));
|
||||
ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
|
||||
}
|
||||
return length;
|
||||
}
|
||||
#endif // ASAN_INTERCEPT_STRNLEN
|
||||
|
||||
static inline bool IsValidStrtolBase(int base) {
|
||||
return (base == 0) || (2 <= base && base <= 36);
|
||||
}
|
||||
|
||||
static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {
|
||||
CHECK(endptr);
|
||||
if (nptr == *endptr) {
|
||||
// No digits were found at strtol call, we need to find out the last
|
||||
// symbol accessed by strtoll on our own.
|
||||
// We get this symbol by skipping leading blanks and optional +/- sign.
|
||||
while (IsSpace(*nptr)) nptr++;
|
||||
if (*nptr == '+' || *nptr == '-') nptr++;
|
||||
*endptr = (char*)nptr;
|
||||
}
|
||||
CHECK(*endptr >= nptr);
|
||||
}
|
||||
|
||||
INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
|
||||
char **endptr, int base) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strtol);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (!flags()->replace_str) {
|
||||
return REAL(strtol)(nptr, endptr, base);
|
||||
}
|
||||
char *real_endptr;
|
||||
long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT
|
||||
if (endptr != 0) {
|
||||
*endptr = real_endptr;
|
||||
}
|
||||
if (IsValidStrtolBase(base)) {
|
||||
FixRealStrtolEndptr(nptr, &real_endptr);
|
||||
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
|
||||
}
|
||||
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, atoi, const char *nptr) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, atoi);
|
||||
#if SANITIZER_MAC
|
||||
if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
|
||||
#endif
|
||||
|
@ -614,11 +649,13 @@ INTERCEPTOR(int, atoi, const char *nptr) {
|
|||
// different from int). So, we just imitate this behavior.
|
||||
int result = REAL(strtol)(nptr, &real_endptr, 10);
|
||||
FixRealStrtolEndptr(nptr, &real_endptr);
|
||||
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
|
||||
ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, atol);
|
||||
#if SANITIZER_MAC
|
||||
if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
|
||||
#endif
|
||||
|
@ -629,33 +666,28 @@ INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
|
|||
char *real_endptr;
|
||||
long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT
|
||||
FixRealStrtolEndptr(nptr, &real_endptr);
|
||||
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
|
||||
ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
|
||||
return result;
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
|
||||
INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT
|
||||
char **endptr, int base) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (!flags()->replace_str) {
|
||||
return REAL(strtoll)(nptr, endptr, base);
|
||||
}
|
||||
char *real_endptr;
|
||||
long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT
|
||||
if (endptr != 0) {
|
||||
*endptr = real_endptr;
|
||||
}
|
||||
// If base has unsupported value, strtoll can exit with EINVAL
|
||||
// without reading any characters. So do additional checks only
|
||||
// if base is valid.
|
||||
if (IsValidStrtolBase(base)) {
|
||||
FixRealStrtolEndptr(nptr, &real_endptr);
|
||||
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
|
||||
}
|
||||
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
|
||||
return result;
|
||||
}
|
||||
|
||||
INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, atoll);
|
||||
ENSURE_ASAN_INITED();
|
||||
if (!flags()->replace_str) {
|
||||
return REAL(atoll)(nptr);
|
||||
|
@ -663,7 +695,7 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
|
|||
char *real_endptr;
|
||||
long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT
|
||||
FixRealStrtolEndptr(nptr, &real_endptr);
|
||||
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
|
||||
ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
|
||||
return result;
|
||||
}
|
||||
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
|
||||
|
@ -681,7 +713,7 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
|
|||
#endif
|
||||
ENSURE_ASAN_INITED();
|
||||
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
|
||||
REAL(__cxa_atexit)(AtCxaAtexit, 0, 0);
|
||||
REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr);
|
||||
return res;
|
||||
}
|
||||
#endif // ASAN_INTERCEPT___CXA_ATEXIT
|
||||
|
@ -696,35 +728,6 @@ INTERCEPTOR(int, fork, void) {
|
|||
}
|
||||
#endif // ASAN_INTERCEPT_FORK
|
||||
|
||||
#if SANITIZER_WINDOWS
|
||||
INTERCEPTOR_WINAPI(DWORD, CreateThread,
|
||||
void* security, uptr stack_size,
|
||||
DWORD (__stdcall *start_routine)(void*), void* arg,
|
||||
DWORD thr_flags, void* tid) {
|
||||
// Strict init-order checking in thread-hostile.
|
||||
if (flags()->strict_init_order)
|
||||
StopInitOrderChecking();
|
||||
GET_STACK_TRACE_THREAD;
|
||||
u32 current_tid = GetCurrentTidOrInvalid();
|
||||
AsanThread *t = AsanThread::Create(start_routine, arg);
|
||||
CreateThreadContextArgs args = { t, &stack };
|
||||
bool detached = false; // FIXME: how can we determine it on Windows?
|
||||
asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
|
||||
return REAL(CreateThread)(security, stack_size,
|
||||
asan_thread_start, t, thr_flags, tid);
|
||||
}
|
||||
|
||||
namespace __asan {
|
||||
void InitializeWindowsInterceptors() {
|
||||
ASAN_INTERCEPT_FUNC(CreateThread);
|
||||
ASAN_INTERCEPT_FUNC(RaiseException);
|
||||
ASAN_INTERCEPT_FUNC(_except_handler3);
|
||||
ASAN_INTERCEPT_FUNC(_except_handler4);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
#endif
|
||||
|
||||
// ---------------------- InitializeAsanInterceptors ---------------- {{{1
|
||||
namespace __asan {
|
||||
void InitializeAsanInterceptors() {
|
||||
|
@ -734,7 +737,6 @@ void InitializeAsanInterceptors() {
|
|||
InitializeCommonInterceptors();
|
||||
|
||||
// Intercept mem* functions.
|
||||
ASAN_INTERCEPT_FUNC(memcmp);
|
||||
ASAN_INTERCEPT_FUNC(memmove);
|
||||
ASAN_INTERCEPT_FUNC(memset);
|
||||
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
|
||||
|
@ -773,9 +775,8 @@ void InitializeAsanInterceptors() {
|
|||
ASAN_INTERCEPT_FUNC(sigaction);
|
||||
#if SANITIZER_ANDROID
|
||||
ASAN_INTERCEPT_FUNC(bsd_signal);
|
||||
#else
|
||||
ASAN_INTERCEPT_FUNC(signal);
|
||||
#endif
|
||||
ASAN_INTERCEPT_FUNC(signal);
|
||||
#endif
|
||||
#if ASAN_INTERCEPT_SWAPCONTEXT
|
||||
ASAN_INTERCEPT_FUNC(swapcontext);
|
||||
|
@ -794,8 +795,13 @@ void InitializeAsanInterceptors() {
|
|||
|
||||
// Intercept threading-related functions
|
||||
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
#if defined(ASAN_PTHREAD_CREATE_VERSION)
|
||||
ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION);
|
||||
#else
|
||||
ASAN_INTERCEPT_FUNC(pthread_create);
|
||||
#endif
|
||||
ASAN_INTERCEPT_FUNC(pthread_join);
|
||||
#endif
|
||||
|
||||
// Intercept atexit function.
|
||||
#if ASAN_INTERCEPT___CXA_ATEXIT
|
||||
|
@ -806,12 +812,9 @@ void InitializeAsanInterceptors() {
|
|||
ASAN_INTERCEPT_FUNC(fork);
|
||||
#endif
|
||||
|
||||
// Some Windows-specific interceptors.
|
||||
#if SANITIZER_WINDOWS
|
||||
InitializeWindowsInterceptors();
|
||||
#endif
|
||||
InitializePlatformInterceptors();
|
||||
|
||||
VReport(1, "AddressSanitizer: libc interceptors initialized\n");
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#define ASAN_INTERCEPTORS_H
|
||||
|
||||
#include "asan_internal.h"
|
||||
#include "sanitizer_common/sanitizer_interception.h"
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_platform_interceptors.h"
|
||||
|
||||
// Use macro to describe if specific function should be
|
||||
|
@ -90,9 +90,27 @@ struct sigaction;
|
|||
DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
|
||||
struct sigaction *oldact)
|
||||
|
||||
#if !SANITIZER_MAC
|
||||
#define ASAN_INTERCEPT_FUNC(name) \
|
||||
do { \
|
||||
if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \
|
||||
VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \
|
||||
} while (0)
|
||||
#define ASAN_INTERCEPT_FUNC_VER(name, ver) \
|
||||
do { \
|
||||
if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \
|
||||
VReport( \
|
||||
1, "AddressSanitizer: failed to intercept '" #name "@@" #ver "'\n"); \
|
||||
} while (0)
|
||||
#else
|
||||
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
|
||||
#define ASAN_INTERCEPT_FUNC(name)
|
||||
#endif // SANITIZER_MAC
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void InitializeAsanInterceptors();
|
||||
void InitializePlatformInterceptors();
|
||||
|
||||
#define ENSURE_ASAN_INITED() do { \
|
||||
CHECK(!asan_init_is_running); \
|
||||
|
|
|
@ -7,8 +7,11 @@
|
|||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// This header can be included by the instrumented program to fetch
|
||||
// data (mostly allocator statistics) from ASan runtime library.
|
||||
// This header declares the AddressSanitizer runtime interface functions.
|
||||
// The runtime library has to define these functions so the instrumented program
|
||||
// could call them.
|
||||
//
|
||||
// See also include/sanitizer/asan_interface.h
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef ASAN_INTERFACE_INTERNAL_H
|
||||
#define ASAN_INTERFACE_INTERNAL_H
|
||||
|
@ -22,10 +25,14 @@ using __sanitizer::uptr;
|
|||
extern "C" {
|
||||
// This function should be called at the very beginning of the process,
|
||||
// before any instrumented code is executed and before any call to malloc.
|
||||
// Please note that __asan_init is a macro that is replaced with
|
||||
// __asan_init_vXXX at compile-time.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_init();
|
||||
|
||||
// This function exists purely to get a linker/loader error when using
|
||||
// incompatible versions of instrumentation and runtime library. Please note
|
||||
// that __asan_version_mismatch_check is a macro that is replaced with
|
||||
// __asan_version_mismatch_check_vXXX at compile-time.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_version_mismatch_check();
|
||||
|
||||
// This structure is used to describe the source location of a place where
|
||||
// global was defined.
|
||||
struct __asan_global_source_location {
|
||||
|
@ -123,10 +130,8 @@ extern "C" {
|
|||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_report_error(uptr pc, uptr bp, uptr sp,
|
||||
uptr addr, int is_write, uptr access_size);
|
||||
uptr addr, int is_write, uptr access_size, u32 exp);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __asan_set_error_exit_code(int exit_code);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_set_death_callback(void (*callback)(void));
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
|
@ -160,6 +165,21 @@ extern "C" {
|
|||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load1(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load2(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load4(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load8(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load16(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store1(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store2(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store4(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store8(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store16(uptr p, u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr p, uptr size,
|
||||
u32 exp);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr p, uptr size,
|
||||
u32 exp);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void* __asan_memcpy(void *dst, const void *src, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
|
@ -175,6 +195,10 @@ extern "C" {
|
|||
void __asan_poison_intra_object_redzone(uptr p, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_unpoison_intra_object_redzone(uptr p, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_alloca_poison(uptr addr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_allocas_unpoison(uptr top, uptr bottom);
|
||||
} // extern "C"
|
||||
|
||||
#endif // ASAN_INTERFACE_INTERNAL_H
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
#define ASAN_DEFAULT_FAILURE_EXITCODE 1
|
||||
|
||||
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
|
||||
# error "The AddressSanitizer run-time should not be"
|
||||
" instrumented by AddressSanitizer"
|
||||
|
@ -73,13 +71,11 @@ void *AsanDoesNotSupportStaticLinkage();
|
|||
void AsanCheckDynamicRTPrereqs();
|
||||
void AsanCheckIncompatibleRT();
|
||||
|
||||
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
|
||||
void AsanOnSIGSEGV(int, void *siginfo, void *context);
|
||||
void AsanOnDeadlySignal(int, void *siginfo, void *context);
|
||||
|
||||
void DisableReexec();
|
||||
void MaybeReexec();
|
||||
bool AsanInterceptsSignal(int signum);
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
|
||||
void AsanPlatformThreadInit();
|
||||
void StopInitOrderChecking();
|
||||
|
||||
// Wrapper for TLS/TSD.
|
||||
|
@ -90,10 +86,10 @@ void PlatformTSDDtor(void *tsd);
|
|||
|
||||
void AppendToErrorMessageBuffer(const char *buffer);
|
||||
|
||||
void ParseExtraActivationFlags();
|
||||
|
||||
void *AsanDlSymNext(const char *sym);
|
||||
|
||||
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
|
||||
|
||||
// Platform-specific options.
|
||||
#if SANITIZER_MAC
|
||||
bool PlatformHasDifferentMemcpyAndMemmove();
|
||||
|
@ -134,6 +130,8 @@ const int kAsanGlobalRedzoneMagic = 0xf9;
|
|||
const int kAsanInternalHeapMagic = 0xfe;
|
||||
const int kAsanArrayCookieMagic = 0xac;
|
||||
const int kAsanIntraObjectRedzone = 0xbb;
|
||||
const int kAsanAllocaLeftMagic = 0xca;
|
||||
const int kAsanAllocaRightMagic = 0xcb;
|
||||
|
||||
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
|
||||
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
|
||||
|
|
|
@ -66,6 +66,12 @@ asan_rt_version_t __asan_rt_version;
|
|||
|
||||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
|
||||
void DisableReexec() {
|
||||
// No need to re-exec on Linux.
|
||||
}
|
||||
|
||||
void MaybeReexec() {
|
||||
// No need to re-exec on Linux.
|
||||
}
|
||||
|
@ -105,8 +111,11 @@ static void ReportIncompatibleRT() {
|
|||
}
|
||||
|
||||
void AsanCheckDynamicRTPrereqs() {
|
||||
if (!ASAN_DYNAMIC)
|
||||
return;
|
||||
|
||||
// Ensure that dynamic RT is the first DSO in the list
|
||||
const char *first_dso_name = 0;
|
||||
const char *first_dso_name = nullptr;
|
||||
dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
|
||||
if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
|
||||
Report("ASan runtime does not come first in initial library list; "
|
||||
|
@ -131,7 +140,8 @@ void AsanCheckIncompatibleRT() {
|
|||
// system libraries, causing crashes later in ASan initialization.
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
char filename[128];
|
||||
while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) {
|
||||
while (proc_maps.Next(nullptr, nullptr, nullptr, filename,
|
||||
sizeof(filename), nullptr)) {
|
||||
if (IsDynamicRTName(filename)) {
|
||||
Report("Your application is linked against "
|
||||
"incompatible ASan runtimes.\n");
|
||||
|
@ -144,87 +154,7 @@ void AsanCheckIncompatibleRT() {
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif // SANITIZER_ANDROID
|
||||
|
||||
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
|
||||
#if defined(__arm__)
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.arm_pc;
|
||||
*bp = ucontext->uc_mcontext.arm_fp;
|
||||
*sp = ucontext->uc_mcontext.arm_sp;
|
||||
#elif defined(__aarch64__)
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.pc;
|
||||
*bp = ucontext->uc_mcontext.regs[29];
|
||||
*sp = ucontext->uc_mcontext.sp;
|
||||
#elif defined(__hppa__)
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.sc_iaoq[0];
|
||||
/* GCC uses %r3 whenever a frame pointer is needed. */
|
||||
*bp = ucontext->uc_mcontext.sc_gr[3];
|
||||
*sp = ucontext->uc_mcontext.sc_gr[30];
|
||||
#elif defined(__x86_64__)
|
||||
# if SANITIZER_FREEBSD
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.mc_rip;
|
||||
*bp = ucontext->uc_mcontext.mc_rbp;
|
||||
*sp = ucontext->uc_mcontext.mc_rsp;
|
||||
# else
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
|
||||
*bp = ucontext->uc_mcontext.gregs[REG_RBP];
|
||||
*sp = ucontext->uc_mcontext.gregs[REG_RSP];
|
||||
# endif
|
||||
#elif defined(__i386__)
|
||||
# if SANITIZER_FREEBSD
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.mc_eip;
|
||||
*bp = ucontext->uc_mcontext.mc_ebp;
|
||||
*sp = ucontext->uc_mcontext.mc_esp;
|
||||
# else
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
|
||||
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
|
||||
*sp = ucontext->uc_mcontext.gregs[REG_ESP];
|
||||
# endif
|
||||
#elif defined(__powerpc__) || defined(__powerpc64__)
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.regs->nip;
|
||||
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
|
||||
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
|
||||
// pointer, but GCC always uses r31 when we need a frame pointer.
|
||||
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
|
||||
#elif defined(__sparc__)
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
uptr *stk_ptr;
|
||||
# if defined (__arch64__)
|
||||
*pc = ucontext->uc_mcontext.mc_gregs[MC_PC];
|
||||
*sp = ucontext->uc_mcontext.mc_gregs[MC_O6];
|
||||
stk_ptr = (uptr *) (*sp + 2047);
|
||||
*bp = stk_ptr[15];
|
||||
# else
|
||||
*pc = ucontext->uc_mcontext.gregs[REG_PC];
|
||||
*sp = ucontext->uc_mcontext.gregs[REG_O6];
|
||||
stk_ptr = (uptr *) *sp;
|
||||
*bp = stk_ptr[15];
|
||||
# endif
|
||||
#elif defined(__mips__)
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
*pc = ucontext->uc_mcontext.gregs[31];
|
||||
*bp = ucontext->uc_mcontext.gregs[30];
|
||||
*sp = ucontext->uc_mcontext.gregs[29];
|
||||
#else
|
||||
# error "Unsupported arch"
|
||||
#endif
|
||||
}
|
||||
|
||||
bool AsanInterceptsSignal(int signum) {
|
||||
return signum == SIGSEGV && common_flags()->handle_segv;
|
||||
}
|
||||
|
||||
void AsanPlatformThreadInit() {
|
||||
// Nothing here for now.
|
||||
}
|
||||
#endif // SANITIZER_ANDROID
|
||||
|
||||
#if !SANITIZER_ANDROID
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
|
@ -242,6 +172,6 @@ void *AsanDlSymNext(const char *sym) {
|
|||
return dlsym(RTLD_NEXT, sym);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
|
|
|
@ -22,7 +22,14 @@
|
|||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_mac.h"
|
||||
|
||||
#include <crt_externs.h> // for _NSGetArgv
|
||||
#if !SANITIZER_IOS
|
||||
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
|
||||
#else
|
||||
extern "C" {
|
||||
extern char ***_NSGetArgv(void);
|
||||
}
|
||||
#endif
|
||||
|
||||
#include <dlfcn.h> // for dladdr()
|
||||
#include <mach-o/dyld.h>
|
||||
#include <mach-o/loader.h>
|
||||
|
@ -38,19 +45,7 @@
|
|||
|
||||
namespace __asan {
|
||||
|
||||
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
|
||||
ucontext_t *ucontext = (ucontext_t*)context;
|
||||
# if SANITIZER_WORDSIZE == 64
|
||||
*pc = ucontext->uc_mcontext->__ss.__rip;
|
||||
*bp = ucontext->uc_mcontext->__ss.__rbp;
|
||||
*sp = ucontext->uc_mcontext->__ss.__rsp;
|
||||
# else
|
||||
*pc = ucontext->uc_mcontext->__ss.__eip;
|
||||
*bp = ucontext->uc_mcontext->__ss.__ebp;
|
||||
*sp = ucontext->uc_mcontext->__ss.__esp;
|
||||
# endif // SANITIZER_WORDSIZE
|
||||
}
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
|
||||
bool PlatformHasDifferentMemcpyAndMemmove() {
|
||||
// On OS X 10.7 memcpy() and memmove() are both resolved
|
||||
|
@ -72,35 +67,51 @@ LowLevelAllocator allocator_for_env;
|
|||
// otherwise the corresponding "NAME=value" string is replaced with
|
||||
// |name_value|.
|
||||
void LeakyResetEnv(const char *name, const char *name_value) {
|
||||
char ***env_ptr = _NSGetEnviron();
|
||||
CHECK(env_ptr);
|
||||
char **environ = *env_ptr;
|
||||
CHECK(environ);
|
||||
char **env = GetEnviron();
|
||||
uptr name_len = internal_strlen(name);
|
||||
while (*environ != 0) {
|
||||
uptr len = internal_strlen(*environ);
|
||||
while (*env != 0) {
|
||||
uptr len = internal_strlen(*env);
|
||||
if (len > name_len) {
|
||||
const char *p = *environ;
|
||||
const char *p = *env;
|
||||
if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
|
||||
// Match.
|
||||
if (name_value) {
|
||||
// Replace the old value with the new one.
|
||||
*environ = const_cast<char*>(name_value);
|
||||
*env = const_cast<char*>(name_value);
|
||||
} else {
|
||||
// Shift the subsequent pointers back.
|
||||
char **del = environ;
|
||||
char **del = env;
|
||||
do {
|
||||
del[0] = del[1];
|
||||
} while (*del++);
|
||||
}
|
||||
}
|
||||
}
|
||||
environ++;
|
||||
env++;
|
||||
}
|
||||
}
|
||||
|
||||
static bool reexec_disabled = false;
|
||||
|
||||
void DisableReexec() {
|
||||
reexec_disabled = true;
|
||||
}
|
||||
|
||||
extern "C" double dyldVersionNumber;
|
||||
static const double kMinDyldVersionWithAutoInterposition = 360.0;
|
||||
|
||||
bool DyldNeedsEnvVariable() {
|
||||
// If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
|
||||
// DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
|
||||
// GetMacosVersion() doesn't work for the simulator. Let's instead check
|
||||
// `dyldVersionNumber`, which is exported by dyld, against a known version
|
||||
// number from the first OS release where this appeared.
|
||||
return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
|
||||
}
|
||||
|
||||
void MaybeReexec() {
|
||||
if (!flags()->allow_reexec) return;
|
||||
if (reexec_disabled) return;
|
||||
|
||||
// Make sure the dynamic ASan runtime library is preloaded so that the
|
||||
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
|
||||
// ourselves.
|
||||
|
@ -111,8 +122,12 @@ void MaybeReexec() {
|
|||
uptr old_env_len = dyld_insert_libraries ?
|
||||
internal_strlen(dyld_insert_libraries) : 0;
|
||||
uptr fname_len = internal_strlen(info.dli_fname);
|
||||
if (!dyld_insert_libraries ||
|
||||
!REAL(strstr)(dyld_insert_libraries, info.dli_fname)) {
|
||||
const char *dylib_name = StripModuleName(info.dli_fname);
|
||||
uptr dylib_name_len = internal_strlen(dylib_name);
|
||||
|
||||
bool lib_is_in_env =
|
||||
dyld_insert_libraries && REAL(strstr)(dyld_insert_libraries, dylib_name);
|
||||
if (DyldNeedsEnvVariable() && !lib_is_in_env) {
|
||||
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
|
||||
// library.
|
||||
char program_name[1024];
|
||||
|
@ -138,58 +153,77 @@ void MaybeReexec() {
|
|||
VReport(1, "exec()-ing the program with\n");
|
||||
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
|
||||
VReport(1, "to enable ASan wrappers.\n");
|
||||
VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
|
||||
execv(program_name, *_NSGetArgv());
|
||||
} else {
|
||||
// DYLD_INSERT_LIBRARIES is set and contains the runtime library.
|
||||
if (old_env_len == fname_len) {
|
||||
// It's just the runtime library name - fine to unset the variable.
|
||||
LeakyResetEnv(kDyldInsertLibraries, NULL);
|
||||
} else {
|
||||
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
|
||||
// Allocate memory to hold the previous env var name, its value, the '='
|
||||
// sign and the '\0' char.
|
||||
char *new_env = (char*)allocator_for_env.Allocate(
|
||||
old_env_len + 2 + env_name_len);
|
||||
CHECK(new_env);
|
||||
internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
|
||||
internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
|
||||
new_env[env_name_len] = '=';
|
||||
char *new_env_pos = new_env + env_name_len + 1;
|
||||
|
||||
// Iterate over colon-separated pieces of |dyld_insert_libraries|.
|
||||
char *piece_start = dyld_insert_libraries;
|
||||
char *piece_end = NULL;
|
||||
char *old_env_end = dyld_insert_libraries + old_env_len;
|
||||
do {
|
||||
if (piece_start[0] == ':') piece_start++;
|
||||
piece_end = REAL(strchr)(piece_start, ':');
|
||||
if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
|
||||
if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
|
||||
uptr piece_len = piece_end - piece_start;
|
||||
|
||||
// If the current piece isn't the runtime library name,
|
||||
// append it to new_env.
|
||||
if ((piece_len != fname_len) ||
|
||||
(internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) {
|
||||
if (new_env_pos != new_env + env_name_len + 1) {
|
||||
new_env_pos[0] = ':';
|
||||
new_env_pos++;
|
||||
}
|
||||
internal_strncpy(new_env_pos, piece_start, piece_len);
|
||||
}
|
||||
// Move on to the next piece.
|
||||
new_env_pos += piece_len;
|
||||
piece_start = piece_end;
|
||||
} while (piece_start < old_env_end);
|
||||
|
||||
// Can't use setenv() here, because it requires the allocator to be
|
||||
// initialized.
|
||||
// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
|
||||
// a separate function called after InitializeAllocator().
|
||||
LeakyResetEnv(kDyldInsertLibraries, new_env);
|
||||
}
|
||||
// We get here only if execv() failed.
|
||||
Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
|
||||
"which is required for ASan to work. ASan tried to set the "
|
||||
"environment variable and re-execute itself, but execv() failed, "
|
||||
"possibly because of sandbox restrictions. Make sure to launch the "
|
||||
"executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
|
||||
CHECK("execv failed" && 0);
|
||||
}
|
||||
|
||||
if (!lib_is_in_env)
|
||||
return;
|
||||
|
||||
// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
|
||||
// the dylib from the environment variable, because interceptors are installed
|
||||
// and we don't want our children to inherit the variable.
|
||||
|
||||
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
|
||||
// Allocate memory to hold the previous env var name, its value, the '='
|
||||
// sign and the '\0' char.
|
||||
char *new_env = (char*)allocator_for_env.Allocate(
|
||||
old_env_len + 2 + env_name_len);
|
||||
CHECK(new_env);
|
||||
internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
|
||||
internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
|
||||
new_env[env_name_len] = '=';
|
||||
char *new_env_pos = new_env + env_name_len + 1;
|
||||
|
||||
// Iterate over colon-separated pieces of |dyld_insert_libraries|.
|
||||
char *piece_start = dyld_insert_libraries;
|
||||
char *piece_end = NULL;
|
||||
char *old_env_end = dyld_insert_libraries + old_env_len;
|
||||
do {
|
||||
if (piece_start[0] == ':') piece_start++;
|
||||
piece_end = REAL(strchr)(piece_start, ':');
|
||||
if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
|
||||
if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
|
||||
uptr piece_len = piece_end - piece_start;
|
||||
|
||||
char *filename_start =
|
||||
(char *)internal_memrchr(piece_start, '/', piece_len);
|
||||
uptr filename_len = piece_len;
|
||||
if (filename_start) {
|
||||
filename_start += 1;
|
||||
filename_len = piece_len - (filename_start - piece_start);
|
||||
} else {
|
||||
filename_start = piece_start;
|
||||
}
|
||||
|
||||
// If the current piece isn't the runtime library name,
|
||||
// append it to new_env.
|
||||
if ((dylib_name_len != filename_len) ||
|
||||
(internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
|
||||
if (new_env_pos != new_env + env_name_len + 1) {
|
||||
new_env_pos[0] = ':';
|
||||
new_env_pos++;
|
||||
}
|
||||
internal_strncpy(new_env_pos, piece_start, piece_len);
|
||||
new_env_pos += piece_len;
|
||||
}
|
||||
// Move on to the next piece.
|
||||
piece_start = piece_end;
|
||||
} while (piece_start < old_env_end);
|
||||
|
||||
// Can't use setenv() here, because it requires the allocator to be
|
||||
// initialized.
|
||||
// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
|
||||
// a separate function called after InitializeAllocator().
|
||||
if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
|
||||
LeakyResetEnv(kDyldInsertLibraries, new_env);
|
||||
}
|
||||
|
||||
// No-op. Mac does not support static linkage anyway.
|
||||
|
@ -203,14 +237,6 @@ void AsanCheckDynamicRTPrereqs() {}
|
|||
// No-op. Mac does not support static linkage anyway.
|
||||
void AsanCheckIncompatibleRT() {}
|
||||
|
||||
bool AsanInterceptsSignal(int signum) {
|
||||
return (signum == SIGSEGV || signum == SIGBUS) &&
|
||||
common_flags()->handle_segv;
|
||||
}
|
||||
|
||||
void AsanPlatformThreadInit() {
|
||||
}
|
||||
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
@ -262,9 +288,8 @@ ALWAYS_INLINE
|
|||
void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) {
|
||||
t = AsanThread::Create(0, 0);
|
||||
CreateThreadContextArgs args = { t, stack };
|
||||
asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args);
|
||||
t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
|
||||
parent_tid, stack, /* detached */ true);
|
||||
t->Init();
|
||||
asanThreadRegistry().StartThread(t->tid(), 0, 0);
|
||||
SetCurrentThread(t);
|
||||
|
@ -311,7 +336,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
|
|||
dispatch_function_t func) { \
|
||||
GET_STACK_TRACE_THREAD; \
|
||||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
|
||||
if (common_flags()->verbosity >= 2) { \
|
||||
if (Verbosity() >= 2) { \
|
||||
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
|
||||
asan_ctxt, pthread_self()); \
|
||||
PRINT_CURRENT_STACK(); \
|
||||
|
@ -329,7 +354,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
|
|||
dispatch_function_t func) {
|
||||
GET_STACK_TRACE_THREAD;
|
||||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
|
||||
if (common_flags()->verbosity >= 2) {
|
||||
if (Verbosity() >= 2) {
|
||||
Report("dispatch_after_f: %p\n", asan_ctxt);
|
||||
PRINT_CURRENT_STACK();
|
||||
}
|
||||
|
@ -342,7 +367,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
|
|||
dispatch_function_t func) {
|
||||
GET_STACK_TRACE_THREAD;
|
||||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
|
||||
if (common_flags()->verbosity >= 2) {
|
||||
if (Verbosity() >= 2) {
|
||||
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
|
||||
asan_ctxt, pthread_self());
|
||||
PRINT_CURRENT_STACK();
|
||||
|
@ -372,13 +397,6 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
|
|||
work(); \
|
||||
}
|
||||
|
||||
// Forces the compiler to generate a frame pointer in the function.
|
||||
#define ENABLE_FRAME_POINTER \
|
||||
do { \
|
||||
volatile uptr enable_fp; \
|
||||
enable_fp = GET_CURRENT_FRAME(); \
|
||||
} while (0)
|
||||
|
||||
INTERCEPTOR(void, dispatch_async,
|
||||
dispatch_queue_t dq, void(^work)(void)) {
|
||||
ENABLE_FRAME_POINTER;
|
||||
|
@ -402,6 +420,10 @@ INTERCEPTOR(void, dispatch_after,
|
|||
|
||||
INTERCEPTOR(void, dispatch_source_set_cancel_handler,
|
||||
dispatch_source_t ds, void(^work)(void)) {
|
||||
if (!work) {
|
||||
REAL(dispatch_source_set_cancel_handler)(ds, work);
|
||||
return;
|
||||
}
|
||||
ENABLE_FRAME_POINTER;
|
||||
GET_ASAN_BLOCK(work);
|
||||
REAL(dispatch_source_set_cancel_handler)(ds, asan_block);
|
||||
|
|
|
@ -88,9 +88,9 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
|
|||
ENSURE_ASAN_INITED();
|
||||
// Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
|
||||
size_t buflen = 6 + (name ? internal_strlen(name) : 0);
|
||||
InternalScopedBuffer<char> new_name(buflen);
|
||||
InternalScopedString new_name(buflen);
|
||||
if (name && zone->introspect == asan_zone.introspect) {
|
||||
internal_snprintf(new_name.data(), buflen, "asan-%s", name);
|
||||
new_name.append("asan-%s", name);
|
||||
name = new_name.data();
|
||||
}
|
||||
|
||||
|
@ -150,13 +150,17 @@ INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
|
|||
|
||||
namespace {
|
||||
|
||||
// TODO(glider): the mz_* functions should be united with the Linux wrappers,
|
||||
// as they are basically copied from there.
|
||||
size_t mz_size(malloc_zone_t* zone, const void* ptr) {
|
||||
// TODO(glider): the __asan_mz_* functions should be united with the Linux
|
||||
// wrappers, as they are basically copied from there.
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
size_t __asan_mz_size(malloc_zone_t* zone, const void* ptr) {
|
||||
return asan_mz_size(ptr);
|
||||
}
|
||||
|
||||
void *mz_malloc(malloc_zone_t *zone, size_t size) {
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__asan_mz_malloc(malloc_zone_t *zone, uptr size) {
|
||||
if (UNLIKELY(!asan_inited)) {
|
||||
CHECK(system_malloc_zone);
|
||||
return malloc_zone_malloc(system_malloc_zone, size);
|
||||
|
@ -165,7 +169,9 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
|
|||
return asan_malloc(size, &stack);
|
||||
}
|
||||
|
||||
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__asan_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
|
||||
if (UNLIKELY(!asan_inited)) {
|
||||
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
||||
const size_t kCallocPoolSize = 1024;
|
||||
|
@ -181,7 +187,9 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
|
|||
return asan_calloc(nmemb, size, &stack);
|
||||
}
|
||||
|
||||
void *mz_valloc(malloc_zone_t *zone, size_t size) {
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__asan_mz_valloc(malloc_zone_t *zone, size_t size) {
|
||||
if (UNLIKELY(!asan_inited)) {
|
||||
CHECK(system_malloc_zone);
|
||||
return malloc_zone_valloc(system_malloc_zone, size);
|
||||
|
@ -208,11 +216,15 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) {
|
|||
}
|
||||
|
||||
// TODO(glider): the allocation callbacks need to be refactored.
|
||||
void mz_free(malloc_zone_t *zone, void *ptr) {
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_mz_free(malloc_zone_t *zone, void *ptr) {
|
||||
free_common(zone, ptr);
|
||||
}
|
||||
|
||||
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__asan_mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
|
||||
if (!ptr) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return asan_malloc(size, &stack);
|
||||
|
@ -231,15 +243,16 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
|
|||
}
|
||||
}
|
||||
|
||||
void mz_destroy(malloc_zone_t* zone) {
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_mz_destroy(malloc_zone_t* zone) {
|
||||
// A no-op -- we will not be destroyed!
|
||||
Report("mz_destroy() called -- ignoring\n");
|
||||
Report("__asan_mz_destroy() called -- ignoring\n");
|
||||
}
|
||||
|
||||
// from AvailabilityMacros.h
|
||||
#if defined(MAC_OS_X_VERSION_10_6) && \
|
||||
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
|
||||
void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__asan_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
|
||||
if (UNLIKELY(!asan_inited)) {
|
||||
CHECK(system_malloc_zone);
|
||||
return malloc_zone_memalign(system_malloc_zone, align, size);
|
||||
|
@ -250,12 +263,12 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
|
|||
|
||||
// This function is currently unused, and we build with -Werror.
|
||||
#if 0
|
||||
void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) {
|
||||
void __asan_mz_free_definite_size(
|
||||
malloc_zone_t* zone, void *ptr, size_t size) {
|
||||
// TODO(glider): check that |size| is valid.
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
kern_return_t mi_enumerator(task_t task, void *,
|
||||
unsigned type_mask, vm_address_t zone_address,
|
||||
|
@ -297,13 +310,10 @@ void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
|
|||
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
|
||||
}
|
||||
|
||||
#if defined(MAC_OS_X_VERSION_10_6) && \
|
||||
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
|
||||
boolean_t mi_zone_locked(malloc_zone_t *zone) {
|
||||
// UNIMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // unnamed namespace
|
||||
|
||||
|
@ -322,32 +332,25 @@ void ReplaceSystemMalloc() {
|
|||
asan_introspection.force_lock = &mi_force_lock;
|
||||
asan_introspection.force_unlock = &mi_force_unlock;
|
||||
asan_introspection.statistics = &mi_statistics;
|
||||
asan_introspection.zone_locked = &mi_zone_locked;
|
||||
|
||||
internal_memset(&asan_zone, 0, sizeof(malloc_zone_t));
|
||||
|
||||
// Start with a version 4 zone which is used for OS X 10.4 and 10.5.
|
||||
asan_zone.version = 4;
|
||||
// Use version 6 for OSX >= 10.6.
|
||||
asan_zone.version = 6;
|
||||
asan_zone.zone_name = "asan";
|
||||
asan_zone.size = &mz_size;
|
||||
asan_zone.malloc = &mz_malloc;
|
||||
asan_zone.calloc = &mz_calloc;
|
||||
asan_zone.valloc = &mz_valloc;
|
||||
asan_zone.free = &mz_free;
|
||||
asan_zone.realloc = &mz_realloc;
|
||||
asan_zone.destroy = &mz_destroy;
|
||||
asan_zone.size = &__asan_mz_size;
|
||||
asan_zone.malloc = &__asan_mz_malloc;
|
||||
asan_zone.calloc = &__asan_mz_calloc;
|
||||
asan_zone.valloc = &__asan_mz_valloc;
|
||||
asan_zone.free = &__asan_mz_free;
|
||||
asan_zone.realloc = &__asan_mz_realloc;
|
||||
asan_zone.destroy = &__asan_mz_destroy;
|
||||
asan_zone.batch_malloc = 0;
|
||||
asan_zone.batch_free = 0;
|
||||
asan_zone.introspect = &asan_introspection;
|
||||
|
||||
// from AvailabilityMacros.h
|
||||
#if defined(MAC_OS_X_VERSION_10_6) && \
|
||||
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
|
||||
// Switch to version 6 on OSX 10.6 to support memalign.
|
||||
asan_zone.version = 6;
|
||||
asan_zone.free_definite_size = 0;
|
||||
asan_zone.memalign = &mz_memalign;
|
||||
asan_introspection.zone_locked = &mi_zone_locked;
|
||||
#endif
|
||||
asan_zone.memalign = &__asan_mz_memalign;
|
||||
asan_zone.introspect = &asan_introspection;
|
||||
|
||||
// Register the ASan zone.
|
||||
malloc_zone_register(&asan_zone);
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_interception.h"
|
||||
#include "interception/interception.h"
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
|
|
|
@ -57,13 +57,34 @@
|
|||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
|
||||
//
|
||||
// Default Linux/MIPS mapping:
|
||||
// Default Linux/MIPS32 mapping:
|
||||
// || `[0x2aaa0000, 0xffffffff]` || HighMem ||
|
||||
// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
|
||||
// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
|
||||
// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x0aa9ffff]` || LowMem ||
|
||||
//
|
||||
// Default Linux/MIPS64 mapping:
|
||||
// || `[0x4000000000, 0xffffffffff]` || HighMem ||
|
||||
// || `[0x2800000000, 0x3fffffffff]` || HighShadow ||
|
||||
// || `[0x2400000000, 0x27ffffffff]` || ShadowGap ||
|
||||
// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
|
||||
// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
|
||||
//
|
||||
// Default Linux/AArch64 (39-bit VMA) mapping:
|
||||
// || `[0x2000000000, 0x7fffffffff]` || highmem ||
|
||||
// || `[0x1400000000, 0x1fffffffff]` || highshadow ||
|
||||
// || `[0x1200000000, 0x13ffffffff]` || shadowgap ||
|
||||
// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
|
||||
// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
|
||||
//
|
||||
// Default Linux/AArch64 (42-bit VMA) mapping:
|
||||
// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
|
||||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
|
||||
// || `[0x09000000000, 0x09fffffffff]` || shadowgap ||
|
||||
// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
|
||||
// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
|
||||
//
|
||||
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
|
||||
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
|
||||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
|
||||
|
@ -77,36 +98,56 @@
|
|||
// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
|
||||
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
|
||||
//
|
||||
// Default Windows/i386 mapping:
|
||||
// (the exact location of HighShadow/HighMem may vary depending
|
||||
// on WoW64, /LARGEADDRESSAWARE, etc).
|
||||
// || `[0x50000000, 0xffffffff]` || HighMem ||
|
||||
// || `[0x3a000000, 0x4fffffff]` || HighShadow ||
|
||||
// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
|
||||
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
|
||||
|
||||
static const u64 kDefaultShadowScale = 3;
|
||||
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
|
||||
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
|
||||
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
|
||||
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kIosShadowOffset64 = 0x130000000;
|
||||
static const u64 kIosSimShadowOffset32 = 1ULL << 30;
|
||||
static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
|
||||
#if SANITIZER_AARCH64_VMA == 39
|
||||
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
|
||||
#elif SANITIZER_AARCH64_VMA == 42
|
||||
static const u64 kAArch64_ShadowOffset64 = 1ULL << 39;
|
||||
#endif
|
||||
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36;
|
||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
|
||||
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
||||
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
|
||||
#define SHADOW_SCALE kDefaultShadowScale
|
||||
#if SANITIZER_ANDROID
|
||||
# define SHADOW_OFFSET (0)
|
||||
#else
|
||||
# if SANITIZER_WORDSIZE == 32
|
||||
# if defined(__mips__)
|
||||
|
||||
|
||||
#if SANITIZER_WORDSIZE == 32
|
||||
# if SANITIZER_ANDROID
|
||||
# define SHADOW_OFFSET (0)
|
||||
# elif defined(__mips__)
|
||||
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
|
||||
# elif SANITIZER_FREEBSD
|
||||
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
|
||||
# elif SANITIZER_WINDOWS
|
||||
# define SHADOW_OFFSET kWindowsShadowOffset32
|
||||
# elif SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset32
|
||||
# elif SANITIZER_IOS
|
||||
# define SHADOW_OFFSET kIosShadowOffset32
|
||||
# else
|
||||
# if SANITIZER_IOS
|
||||
# define SHADOW_OFFSET kIosShadowOffset32
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset32
|
||||
# endif
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset32
|
||||
# endif
|
||||
# else
|
||||
#else
|
||||
# if defined(__aarch64__)
|
||||
# define SHADOW_OFFSET kAArch64_ShadowOffset64
|
||||
# elif defined(__powerpc64__)
|
||||
|
@ -117,10 +158,13 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
|||
# define SHADOW_OFFSET kDefaultShadowOffset64
|
||||
# elif defined(__mips64)
|
||||
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
|
||||
# elif SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset64
|
||||
# elif SANITIZER_IOS
|
||||
# define SHADOW_OFFSET kIosShadowOffset64
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
|
||||
# endif
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
|
||||
|
@ -143,7 +187,8 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
|||
|
||||
// With the zero shadow base we can not actually map pages starting from 0.
|
||||
// This constant is somewhat arbitrary.
|
||||
#define kZeroBaseShadowStart (1 << 18)
|
||||
#define kZeroBaseShadowStart 0
|
||||
#define kZeroBaseMaxShadowStart (1 << 18)
|
||||
|
||||
#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
|
||||
: kZeroBaseShadowStart)
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include "asan_internal.h"
|
||||
#include "asan_stack.h"
|
||||
|
||||
#include "sanitizer_common/sanitizer_interception.h"
|
||||
#include "interception/interception.h"
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
|
@ -88,11 +88,11 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
|
|||
|
||||
#if !SANITIZER_MAC
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete(void *ptr) throw() {
|
||||
void operator delete(void *ptr) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW);
|
||||
}
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr) throw() {
|
||||
void operator delete[](void *ptr) NOEXCEPT {
|
||||
OPERATOR_DELETE_BODY(FROM_NEW_BR);
|
||||
}
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
|
@ -104,12 +104,12 @@ void operator delete[](void *ptr, std::nothrow_t const&) {
|
|||
OPERATOR_DELETE_BODY(FROM_NEW_BR);
|
||||
}
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete(void *ptr, size_t size) throw() {
|
||||
void operator delete(void *ptr, size_t size) NOEXCEPT {
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_sized_free(ptr, size, &stack, FROM_NEW);
|
||||
}
|
||||
CXX_OPERATOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr, size_t size) throw() {
|
||||
void operator delete[](void *ptr, size_t size) NOEXCEPT {
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
|
||||
}
|
||||
|
|
|
@ -13,13 +13,24 @@
|
|||
#include "asan_poisoning.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static atomic_uint8_t can_poison_memory;
|
||||
|
||||
void SetCanPoisonMemory(bool value) {
|
||||
atomic_store(&can_poison_memory, value, memory_order_release);
|
||||
}
|
||||
|
||||
bool CanPoisonMemory() {
|
||||
return atomic_load(&can_poison_memory, memory_order_acquire);
|
||||
}
|
||||
|
||||
void PoisonShadow(uptr addr, uptr size, u8 value) {
|
||||
if (!flags()->poison_heap) return;
|
||||
if (!CanPoisonMemory()) return;
|
||||
CHECK(AddrIsAlignedByGranularity(addr));
|
||||
CHECK(AddrIsInMem(addr));
|
||||
CHECK(AddrIsAlignedByGranularity(addr + size));
|
||||
|
@ -32,7 +43,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
|
|||
uptr size,
|
||||
uptr redzone_size,
|
||||
u8 value) {
|
||||
if (!flags()->poison_heap) return;
|
||||
if (!CanPoisonMemory()) return;
|
||||
CHECK(AddrIsAlignedByGranularity(addr));
|
||||
CHECK(AddrIsInMem(addr));
|
||||
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
|
||||
|
@ -61,10 +72,10 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
|
|||
|
||||
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
|
||||
uptr end = ptr + size;
|
||||
if (common_flags()->verbosity) {
|
||||
if (Verbosity()) {
|
||||
Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
|
||||
poison ? "" : "un", ptr, end, size);
|
||||
if (common_flags()->verbosity >= 2)
|
||||
if (Verbosity() >= 2)
|
||||
PRINT_CURRENT_STACK();
|
||||
}
|
||||
CHECK(size);
|
||||
|
@ -99,7 +110,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
|
|||
if (!flags()->allow_user_poisoning || size == 0) return;
|
||||
uptr beg_addr = (uptr)addr;
|
||||
uptr end_addr = beg_addr + size;
|
||||
VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
|
||||
VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
|
||||
(void *)end_addr);
|
||||
ShadowSegmentEndpoint beg(beg_addr);
|
||||
ShadowSegmentEndpoint end(end_addr);
|
||||
|
@ -139,7 +150,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
|
|||
if (!flags()->allow_user_poisoning || size == 0) return;
|
||||
uptr beg_addr = (uptr)addr;
|
||||
uptr end_addr = beg_addr + size;
|
||||
VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
|
||||
VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
|
||||
(void *)end_addr);
|
||||
ShadowSegmentEndpoint beg(beg_addr);
|
||||
ShadowSegmentEndpoint end(end_addr);
|
||||
|
@ -205,7 +216,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
|
|||
__asan::AddressIsPoisoned(__p + __size - 1))) { \
|
||||
GET_CURRENT_PC_BP_SP; \
|
||||
uptr __bad = __asan_region_is_poisoned(__p, __size); \
|
||||
__asan_report_error(pc, bp, sp, __bad, isWrite, __size);\
|
||||
__asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
|
||||
} \
|
||||
} while (false); \
|
||||
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
|
||||
namespace __asan {
|
||||
|
||||
// Enable/disable memory poisoning.
|
||||
void SetCanPoisonMemory(bool value);
|
||||
bool CanPoisonMemory();
|
||||
|
||||
// Poisons the shadow memory for "size" bytes starting from "addr".
|
||||
void PoisonShadow(uptr addr, uptr size, u8 value);
|
||||
|
||||
|
@ -32,7 +36,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
|
|||
// performance-critical code with care.
|
||||
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
|
||||
u8 value) {
|
||||
DCHECK(flags()->poison_heap);
|
||||
DCHECK(CanPoisonMemory());
|
||||
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
|
||||
uptr shadow_end = MEM_TO_SHADOW(
|
||||
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
|
||||
|
@ -58,15 +62,14 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
|
|||
if (page_end != shadow_end) {
|
||||
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
|
||||
}
|
||||
void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
|
||||
CHECK_EQ(page_beg, res);
|
||||
ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
|
||||
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
|
||||
DCHECK(flags()->poison_heap);
|
||||
DCHECK(CanPoisonMemory());
|
||||
bool poison_partial = flags()->poison_partial;
|
||||
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
|
||||
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_posix.h"
|
||||
#include "sanitizer_common/sanitizer_procmaps.h"
|
||||
|
||||
#include <pthread.h>
|
||||
|
@ -30,26 +31,52 @@
|
|||
|
||||
namespace __asan {
|
||||
|
||||
void AsanOnSIGSEGV(int, void *siginfo, void *context) {
|
||||
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
ScopedDeadlySignal signal_scope(GetCurrentThread());
|
||||
uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
|
||||
int code = (int)((siginfo_t*)siginfo)->si_code;
|
||||
// Write the first message using the bullet-proof write.
|
||||
if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
|
||||
uptr pc, sp, bp;
|
||||
GetPcSpBp(context, &pc, &sp, &bp);
|
||||
if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die();
|
||||
SignalContext sig = SignalContext::Create(siginfo, context);
|
||||
|
||||
// Access at a reasonable offset above SP, or slightly below it (to account
|
||||
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
|
||||
// probably a stack overflow.
|
||||
bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
|
||||
|
||||
#if __powerpc__
|
||||
// Large stack frames can be allocated with e.g.
|
||||
// lis r0,-10000
|
||||
// stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
|
||||
// If the store faults then sp will not have been updated, so test above
|
||||
// will not work, becase the fault address will be more than just "slightly"
|
||||
// below sp.
|
||||
if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) {
|
||||
u32 inst = *(unsigned *)sig.pc;
|
||||
u32 ra = (inst >> 16) & 0x1F;
|
||||
u32 opcd = inst >> 26;
|
||||
u32 xo = (inst >> 1) & 0x3FF;
|
||||
// Check for store-with-update to sp. The instructions we accept are:
|
||||
// stbu rs,d(ra) stbux rs,ra,rb
|
||||
// sthu rs,d(ra) sthux rs,ra,rb
|
||||
// stwu rs,d(ra) stwux rs,ra,rb
|
||||
// stdu rs,ds(ra) stdux rs,ra,rb
|
||||
// where ra is r1 (the stack pointer).
|
||||
if (ra == 1 &&
|
||||
(opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
|
||||
(opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
|
||||
IsStackAccess = true;
|
||||
}
|
||||
#endif // __powerpc__
|
||||
|
||||
// We also check si_code to filter out SEGV caused by something else other
|
||||
// then hitting the guard page or unmapped memory, like, for example,
|
||||
// unaligned memory access.
|
||||
if (addr + 512 > sp && addr < sp + 0xFFFF &&
|
||||
(code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
|
||||
ReportStackOverflow(pc, sp, bp, context, addr);
|
||||
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
|
||||
ReportStackOverflow(sig);
|
||||
else if (signo == SIGFPE)
|
||||
ReportDeadlySignal("FPE", sig);
|
||||
else
|
||||
ReportSIGSEGV("SEGV", pc, sp, bp, context, addr);
|
||||
ReportDeadlySignal("SEGV", sig);
|
||||
}
|
||||
|
||||
// ---------------------- TSD ---------------- {{{1
|
||||
|
|
|
@ -11,9 +11,13 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
#include "asan_internal.h"
|
||||
|
||||
using namespace __asan;
|
||||
|
||||
#if SANITIZER_CAN_USE_PREINIT_ARRAY
|
||||
// The symbol is called __local_asan_preinit, because it's not intended to be
|
||||
// exported.
|
||||
// This code linked into the main executable when -fsanitize=address is in
|
||||
// the link flags. It can only use exported interface functions.
|
||||
__attribute__((section(".preinit_array"), used))
|
||||
void (*__local_asan_preinit)(void) = __asan_init;
|
||||
#endif
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
//
|
||||
// This file contains error reporting code.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_flags.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
|
@ -25,7 +26,7 @@ namespace __asan {
|
|||
|
||||
// -------------------- User-specified callbacks ----------------- {{{1
|
||||
static void (*error_report_callback)(const char*);
|
||||
static char *error_message_buffer = 0;
|
||||
static char *error_message_buffer = nullptr;
|
||||
static uptr error_message_buffer_pos = 0;
|
||||
static uptr error_message_buffer_size = 0;
|
||||
|
||||
|
@ -51,7 +52,7 @@ void AppendToErrorMessageBuffer(const char *buffer) {
|
|||
buffer, remaining);
|
||||
error_message_buffer[error_message_buffer_size - 1] = '\0';
|
||||
// FIXME: reallocate the buffer instead of truncating the message.
|
||||
error_message_buffer_pos += remaining > length ? length : remaining;
|
||||
error_message_buffer_pos += Min(remaining, length);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,6 +86,8 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator {
|
|||
return Cyan();
|
||||
case kAsanUserPoisonedMemoryMagic:
|
||||
case kAsanContiguousContainerOOBMagic:
|
||||
case kAsanAllocaLeftMagic:
|
||||
case kAsanAllocaRightMagic:
|
||||
return Blue();
|
||||
case kAsanStackUseAfterScopeMagic:
|
||||
return Magenta();
|
||||
|
@ -171,6 +174,8 @@ static void PrintLegend(InternalScopedString *str) {
|
|||
PrintShadowByte(str, " Intra object redzone: ",
|
||||
kAsanIntraObjectRedzone);
|
||||
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
|
||||
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
|
||||
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
|
||||
}
|
||||
|
||||
void MaybeDumpInstructionBytes(uptr pc) {
|
||||
|
@ -275,9 +280,8 @@ static void PrintGlobalLocation(InternalScopedString *str,
|
|||
str->append(":%d", g.location->column_no);
|
||||
}
|
||||
|
||||
bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
|
||||
const __asan_global &g) {
|
||||
if (!IsAddressNearGlobal(addr, g)) return false;
|
||||
static void DescribeAddressRelativeToGlobal(uptr addr, uptr size,
|
||||
const __asan_global &g) {
|
||||
InternalScopedString str(4096);
|
||||
Decorator d;
|
||||
str.append("%s", d.Location());
|
||||
|
@ -300,6 +304,26 @@ bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
|
|||
str.append("%s", d.EndLocation());
|
||||
PrintGlobalNameIfASCII(&str, g);
|
||||
Printf("%s", str.data());
|
||||
}
|
||||
|
||||
static bool DescribeAddressIfGlobal(uptr addr, uptr size,
|
||||
const char *bug_type) {
|
||||
// Assume address is close to at most four globals.
|
||||
const int kMaxGlobalsInReport = 4;
|
||||
__asan_global globals[kMaxGlobalsInReport];
|
||||
u32 reg_sites[kMaxGlobalsInReport];
|
||||
int globals_num =
|
||||
GetGlobalsForAddress(addr, globals, reg_sites, ARRAY_SIZE(globals));
|
||||
if (globals_num == 0)
|
||||
return false;
|
||||
for (int i = 0; i < globals_num; i++) {
|
||||
DescribeAddressRelativeToGlobal(addr, size, globals[i]);
|
||||
if (0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
|
||||
reg_sites[i]) {
|
||||
Printf(" registered at:\n");
|
||||
StackDepotGet(reg_sites[i]).Print();
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -348,7 +372,7 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
|
|||
uptr next_var_beg) {
|
||||
uptr var_end = var.beg + var.size;
|
||||
uptr addr_end = addr + access_size;
|
||||
const char *pos_descr = 0;
|
||||
const char *pos_descr = nullptr;
|
||||
// If the variable [var.beg, var_end) is the nearest variable to the
|
||||
// current memory access, indicate it in the log.
|
||||
if (addr >= var.beg) {
|
||||
|
@ -519,7 +543,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
|
|||
StackTrace alloc_stack = chunk.GetAllocStack();
|
||||
char tname[128];
|
||||
Decorator d;
|
||||
AsanThreadContext *free_thread = 0;
|
||||
AsanThreadContext *free_thread = nullptr;
|
||||
if (chunk.FreeTid() != kInvalidTid) {
|
||||
free_thread = GetThreadContextByTidLocked(chunk.FreeTid());
|
||||
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
|
||||
|
@ -545,12 +569,12 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
|
|||
DescribeThread(alloc_thread);
|
||||
}
|
||||
|
||||
void DescribeAddress(uptr addr, uptr access_size) {
|
||||
static void DescribeAddress(uptr addr, uptr access_size, const char *bug_type) {
|
||||
// Check if this is shadow or shadow gap.
|
||||
if (DescribeAddressIfShadow(addr))
|
||||
return;
|
||||
CHECK(AddrIsInMem(addr));
|
||||
if (DescribeAddressIfGlobal(addr, access_size))
|
||||
if (DescribeAddressIfGlobal(addr, access_size, bug_type))
|
||||
return;
|
||||
if (DescribeAddressIfStack(addr, access_size))
|
||||
return;
|
||||
|
@ -572,6 +596,11 @@ void DescribeThread(AsanThreadContext *context) {
|
|||
InternalScopedString str(1024);
|
||||
str.append("Thread T%d%s", context->tid,
|
||||
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
|
||||
if (context->parent_tid == kInvalidTid) {
|
||||
str.append(" created by unknown thread\n");
|
||||
Printf("%s", str.data());
|
||||
return;
|
||||
}
|
||||
str.append(
|
||||
" created by T%d%s here:\n", context->parent_tid,
|
||||
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
|
||||
|
@ -609,7 +638,7 @@ class ScopedInErrorReport {
|
|||
}
|
||||
// If we're still not dead for some reason, use raw _exit() instead of
|
||||
// Die() to bypass any additional checks.
|
||||
internal__exit(flags()->exitcode);
|
||||
internal__exit(common_flags()->exitcode);
|
||||
}
|
||||
if (report) report_data = *report;
|
||||
report_happened = true;
|
||||
|
@ -641,40 +670,39 @@ class ScopedInErrorReport {
|
|||
}
|
||||
};
|
||||
|
||||
void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
|
||||
void ReportStackOverflow(const SignalContext &sig) {
|
||||
ScopedInErrorReport in_report;
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: stack-overflow on address %p"
|
||||
" (pc %p bp %p sp %p T%d)\n",
|
||||
(void *)addr, (void *)pc, (void *)bp, (void *)sp,
|
||||
(void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
|
||||
GetCurrentTidOrInvalid());
|
||||
Printf("%s", d.EndWarning());
|
||||
GET_STACK_TRACE_SIGNAL(pc, bp, context);
|
||||
GET_STACK_TRACE_SIGNAL(sig);
|
||||
stack.Print();
|
||||
ReportErrorSummary("stack-overflow", &stack);
|
||||
}
|
||||
|
||||
void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
|
||||
void *context, uptr addr) {
|
||||
void ReportDeadlySignal(const char *description, const SignalContext &sig) {
|
||||
ScopedInErrorReport in_report;
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: %s on unknown address %p"
|
||||
" (pc %p bp %p sp %p T%d)\n",
|
||||
description, (void *)addr, (void *)pc, (void *)bp, (void *)sp,
|
||||
GetCurrentTidOrInvalid());
|
||||
if (pc < GetPageSizeCached()) {
|
||||
description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
|
||||
(void *)sig.sp, GetCurrentTidOrInvalid());
|
||||
if (sig.pc < GetPageSizeCached()) {
|
||||
Report("Hint: pc points to the zero page.\n");
|
||||
}
|
||||
Printf("%s", d.EndWarning());
|
||||
GET_STACK_TRACE_SIGNAL(pc, bp, context);
|
||||
GET_STACK_TRACE_SIGNAL(sig);
|
||||
stack.Print();
|
||||
MaybeDumpInstructionBytes(pc);
|
||||
MaybeDumpInstructionBytes(sig.pc);
|
||||
Printf("AddressSanitizer can not provide additional info.\n");
|
||||
ReportErrorSummary("SEGV", &stack);
|
||||
ReportErrorSummary(description, &stack);
|
||||
}
|
||||
|
||||
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
|
||||
|
@ -800,8 +828,8 @@ void ReportStringFunctionMemoryRangesOverlap(const char *function,
|
|||
bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
|
||||
Printf("%s", d.EndWarning());
|
||||
stack->Print();
|
||||
DescribeAddress((uptr)offset1, length1);
|
||||
DescribeAddress((uptr)offset2, length2);
|
||||
DescribeAddress((uptr)offset1, length1, bug_type);
|
||||
DescribeAddress((uptr)offset2, length2, bug_type);
|
||||
ReportErrorSummary(bug_type, stack);
|
||||
}
|
||||
|
||||
|
@ -814,7 +842,7 @@ void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
|
|||
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
|
||||
Printf("%s", d.EndWarning());
|
||||
stack->Print();
|
||||
DescribeAddress(offset, size);
|
||||
DescribeAddress(offset, size, bug_type);
|
||||
ReportErrorSummary(bug_type, stack);
|
||||
}
|
||||
|
||||
|
@ -829,6 +857,9 @@ void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
|
|||
" old_mid : %p\n"
|
||||
" new_mid : %p\n",
|
||||
beg, end, old_mid, new_mid);
|
||||
uptr granularity = SHADOW_GRANULARITY;
|
||||
if (!IsAligned(beg, granularity))
|
||||
Report("ERROR: beg is not aligned by %d\n", granularity);
|
||||
stack->Print();
|
||||
ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
|
||||
}
|
||||
|
@ -866,15 +897,16 @@ void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
|
|||
static NOINLINE void
|
||||
ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
|
||||
ScopedInErrorReport in_report;
|
||||
const char *bug_type = "invalid-pointer-pair";
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
|
||||
Printf("%s", d.EndWarning());
|
||||
GET_STACK_TRACE_FATAL(pc, bp);
|
||||
stack.Print();
|
||||
DescribeAddress(a1, 1);
|
||||
DescribeAddress(a2, 1);
|
||||
ReportErrorSummary("invalid-pointer-pair", &stack);
|
||||
DescribeAddress(a1, 1, bug_type);
|
||||
DescribeAddress(a2, 1, bug_type);
|
||||
ReportErrorSummary(bug_type, &stack);
|
||||
}
|
||||
|
||||
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
|
||||
|
@ -925,13 +957,24 @@ void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
|
|||
DescribeHeapAddress(addr, 1);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
// --------------------------- Interface --------------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
|
||||
uptr access_size) {
|
||||
uptr access_size, u32 exp) {
|
||||
ENABLE_FRAME_POINTER;
|
||||
|
||||
// Optimization experiments.
|
||||
// The experiments can be used to evaluate potential optimizations that remove
|
||||
// instrumentation (assess false negatives). Instead of completely removing
|
||||
// some instrumentation, compiler can emit special calls into runtime
|
||||
// (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass
|
||||
// mask of experiments (exp).
|
||||
// The reaction to a non-zero value of exp is to be defined.
|
||||
(void)exp;
|
||||
|
||||
// Determine the error type.
|
||||
const char *bug_descr = "unknown-crash";
|
||||
if (AddrIsInMem(addr)) {
|
||||
|
@ -980,6 +1023,10 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
|
|||
case kAsanIntraObjectRedzone:
|
||||
bug_descr = "intra-object-overflow";
|
||||
break;
|
||||
case kAsanAllocaLeftMagic:
|
||||
case kAsanAllocaRightMagic:
|
||||
bug_descr = "dynamic-stack-buffer-overflow";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1006,7 +1053,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
|
|||
GET_STACK_TRACE_FATAL(pc, bp);
|
||||
stack.Print();
|
||||
|
||||
DescribeAddress(addr, access_size);
|
||||
DescribeAddress(addr, access_size, bug_descr);
|
||||
ReportErrorSummary(bug_descr, &stack);
|
||||
PrintShadowMemoryForAddress(addr);
|
||||
}
|
||||
|
@ -1024,7 +1071,7 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
|
|||
void __asan_describe_address(uptr addr) {
|
||||
// Thread registry must be locked while we're describing an address.
|
||||
asanThreadRegistry().Lock();
|
||||
DescribeAddress(addr, 1);
|
||||
DescribeAddress(addr, 1, "");
|
||||
asanThreadRegistry().Unlock();
|
||||
}
|
||||
|
||||
|
@ -1069,7 +1116,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
|
|||
void __sanitizer_ptr_cmp(void *a, void *b) {
|
||||
CheckForInvalidPointerPair(a, b);
|
||||
}
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// Provide default implementation of __asan_on_error that does nothing
|
||||
|
|
|
@ -31,29 +31,25 @@ struct AddressDescription {
|
|||
const char *region_kind;
|
||||
};
|
||||
|
||||
// Returns the number of globals close to the provided address and copies
|
||||
// them to "globals" array.
|
||||
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
|
||||
int max_globals);
|
||||
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
|
||||
// The following functions prints address description depending
|
||||
// on the memory type (shadow/heap/stack/global).
|
||||
void DescribeHeapAddress(uptr addr, uptr access_size);
|
||||
bool DescribeAddressIfGlobal(uptr addr, uptr access_size);
|
||||
bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
|
||||
const __asan_global &g);
|
||||
bool IsAddressNearGlobal(uptr addr, const __asan_global &g);
|
||||
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
|
||||
bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
|
||||
bool print = true);
|
||||
bool ParseFrameDescription(const char *frame_descr,
|
||||
InternalMmapVector<StackVarDescr> *vars);
|
||||
bool DescribeAddressIfStack(uptr addr, uptr access_size);
|
||||
// Determines memory type on its own.
|
||||
void DescribeAddress(uptr addr, uptr access_size);
|
||||
|
||||
void DescribeThread(AsanThreadContext *context);
|
||||
|
||||
// Different kinds of error reports.
|
||||
void NORETURN
|
||||
ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
|
||||
void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
|
||||
void *context, uptr addr);
|
||||
void NORETURN ReportStackOverflow(const SignalContext &sig);
|
||||
void NORETURN ReportDeadlySignal(const char* description,
|
||||
const SignalContext &sig);
|
||||
void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
|
||||
BufferedStackTrace *free_stack);
|
||||
void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
//
|
||||
// Main file of the ASan run-time library.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_activation.h"
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_interceptors.h"
|
||||
|
@ -19,12 +20,15 @@
|
|||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_suppressions.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "ubsan/ubsan_init.h"
|
||||
#include "ubsan/ubsan_platform.h"
|
||||
|
||||
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
|
||||
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
|
||||
|
@ -51,13 +55,6 @@ static void AsanDie() {
|
|||
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
|
||||
}
|
||||
}
|
||||
if (common_flags()->coverage)
|
||||
__sanitizer_cov_dump();
|
||||
if (death_callback)
|
||||
death_callback();
|
||||
if (flags()->abort_on_error)
|
||||
Abort();
|
||||
internal__exit(flags()->exitcode);
|
||||
}
|
||||
|
||||
static void AsanCheckFailed(const char *file, int line, const char *cond,
|
||||
|
@ -69,265 +66,9 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
|
|||
Die();
|
||||
}
|
||||
|
||||
// -------------------------- Flags ------------------------- {{{1
|
||||
static const int kDefaultMallocContextSize = 30;
|
||||
|
||||
Flags asan_flags_dont_use_directly; // use via flags().
|
||||
|
||||
static const char *MaybeCallAsanDefaultOptions() {
|
||||
return (&__asan_default_options) ? __asan_default_options() : "";
|
||||
}
|
||||
|
||||
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
|
||||
#ifdef ASAN_DEFAULT_OPTIONS
|
||||
// Stringize the macro value.
|
||||
# define ASAN_STRINGIZE(x) #x
|
||||
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
|
||||
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
|
||||
#else
|
||||
return "";
|
||||
#endif
|
||||
}
|
||||
|
||||
static void ParseFlagsFromString(Flags *f, const char *str) {
|
||||
CommonFlags *cf = common_flags();
|
||||
ParseCommonFlagsFromString(cf, str);
|
||||
CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
|
||||
// Please write meaningful flag descriptions when adding new flags.
|
||||
ParseFlag(str, &f->quarantine_size, "quarantine_size",
|
||||
"Size (in bytes) of quarantine used to detect use-after-free "
|
||||
"errors. Lower value may reduce memory usage but increase the "
|
||||
"chance of false negatives.");
|
||||
ParseFlag(str, &f->redzone, "redzone",
|
||||
"Minimal size (in bytes) of redzones around heap objects. "
|
||||
"Requirement: redzone >= 16, is a power of two.");
|
||||
ParseFlag(str, &f->max_redzone, "max_redzone",
|
||||
"Maximal size (in bytes) of redzones around heap objects.");
|
||||
CHECK_GE(f->redzone, 16);
|
||||
CHECK_GE(f->max_redzone, f->redzone);
|
||||
CHECK_LE(f->max_redzone, 2048);
|
||||
CHECK(IsPowerOfTwo(f->redzone));
|
||||
CHECK(IsPowerOfTwo(f->max_redzone));
|
||||
|
||||
ParseFlag(str, &f->debug, "debug",
|
||||
"If set, prints some debugging information and does additional checks.");
|
||||
ParseFlag(str, &f->report_globals, "report_globals",
|
||||
"Controls the way to handle globals (0 - don't detect buffer overflow on "
|
||||
"globals, 1 - detect buffer overflow, 2 - print data about registered "
|
||||
"globals).");
|
||||
|
||||
ParseFlag(str, &f->check_initialization_order,
|
||||
"check_initialization_order",
|
||||
"If set, attempts to catch initialization order issues.");
|
||||
|
||||
ParseFlag(str, &f->replace_str, "replace_str",
|
||||
"If set, uses custom wrappers and replacements for libc string functions "
|
||||
"to find more errors.");
|
||||
|
||||
ParseFlag(str, &f->replace_intrin, "replace_intrin",
|
||||
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
|
||||
ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
|
||||
"Ignore invalid free() calls to work around some bugs. Used on OS X "
|
||||
"only.");
|
||||
ParseFlag(str, &f->detect_stack_use_after_return,
|
||||
"detect_stack_use_after_return",
|
||||
"Enables stack-use-after-return checking at run-time.");
|
||||
ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
|
||||
"Minimum fake stack size log.");
|
||||
ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
|
||||
"Maximum fake stack size log.");
|
||||
ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
|
||||
"Use mmap with 'norserve' flag to allocate fake stack.");
|
||||
ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
|
||||
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
|
||||
"bytes that will be filled with malloc_fill_byte on malloc.");
|
||||
ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
|
||||
"Value used to fill the newly allocated memory.");
|
||||
ParseFlag(str, &f->exitcode, "exitcode",
|
||||
"Override the program exit status if the tool found an error.");
|
||||
ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
|
||||
"If set, user may manually mark memory regions as poisoned or "
|
||||
"unpoisoned.");
|
||||
ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
|
||||
"Number of seconds to sleep between printing an error report and "
|
||||
"terminating the program. Useful for debugging purposes (e.g. when one "
|
||||
"needs to attach gdb).");
|
||||
|
||||
ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
|
||||
"Allows the users to work around the bug in Nvidia drivers prior to "
|
||||
"295.*.");
|
||||
|
||||
ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
|
||||
"If set, explicitly unmaps the (huge) shadow at exit.");
|
||||
ParseFlag(str, &f->abort_on_error, "abort_on_error",
|
||||
"If set, the tool calls abort() instead of _exit() after printing the "
|
||||
"error report.");
|
||||
ParseFlag(str, &f->print_stats, "print_stats",
|
||||
"Print various statistics after printing an error message or if "
|
||||
"atexit=1.");
|
||||
ParseFlag(str, &f->print_legend, "print_legend",
|
||||
"Print the legend for the shadow bytes.");
|
||||
ParseFlag(str, &f->atexit, "atexit",
|
||||
"If set, prints ASan exit stats even after program terminates "
|
||||
"successfully.");
|
||||
|
||||
ParseFlag(str, &f->allow_reexec, "allow_reexec",
|
||||
"Allow the tool to re-exec the program. This may interfere badly with "
|
||||
"the debugger.");
|
||||
|
||||
ParseFlag(str, &f->print_full_thread_history,
|
||||
"print_full_thread_history",
|
||||
"If set, prints thread creation stacks for the threads involved in the "
|
||||
"report and their ancestors up to the main thread.");
|
||||
|
||||
ParseFlag(str, &f->poison_heap, "poison_heap",
|
||||
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
|
||||
"for benchmarking the allocator or instrumentator.");
|
||||
|
||||
ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie",
|
||||
"Poison (or not) the array cookie after operator new[].");
|
||||
|
||||
ParseFlag(str, &f->poison_partial, "poison_partial",
|
||||
"If true, poison partially addressable 8-byte aligned words "
|
||||
"(default=true). This flag affects heap and global buffers, but not "
|
||||
"stack buffers.");
|
||||
|
||||
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
|
||||
"Report errors on malloc/delete, new/free, new/delete[], etc.");
|
||||
|
||||
ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch",
|
||||
"Report errors on mismatch betwen size of new and delete.");
|
||||
|
||||
ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
|
||||
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
|
||||
"comparing p1 and p2.");
|
||||
|
||||
ParseFlag(str, &f->strict_init_order, "strict_init_order",
|
||||
"If true, assume that dynamic initializers can never access globals from "
|
||||
"other modules, even if the latter are already initialized.");
|
||||
|
||||
ParseFlag(str, &f->start_deactivated, "start_deactivated",
|
||||
"If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
|
||||
"poisoning) to reduce memory consumption as much as possible, and "
|
||||
"restores them to original values when the first instrumented module is "
|
||||
"loaded into the process. This is mainly intended to be used on "
|
||||
"Android. ");
|
||||
|
||||
ParseFlag(str, &f->detect_invalid_pointer_pairs,
|
||||
"detect_invalid_pointer_pairs",
|
||||
"If non-zero, try to detect operations like <, <=, >, >= and - on "
|
||||
"invalid pointer pairs (e.g. when pointers belong to different objects). "
|
||||
"The bigger the value the harder we try.");
|
||||
|
||||
ParseFlag(str, &f->detect_container_overflow,
|
||||
"detect_container_overflow",
|
||||
"If true, honor the container overflow annotations. "
|
||||
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
|
||||
|
||||
ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
|
||||
"If >=2, detect violation of One-Definition-Rule (ODR); "
|
||||
"If ==1, detect ODR-violation only if the two variables "
|
||||
"have different sizes");
|
||||
|
||||
ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes",
|
||||
"If true, dump 16 bytes starting at the instruction that caused SEGV");
|
||||
}
|
||||
|
||||
void InitializeFlags(Flags *f, const char *env) {
|
||||
CommonFlags *cf = common_flags();
|
||||
SetCommonFlagsDefaults(cf);
|
||||
cf->detect_leaks = CAN_SANITIZE_LEAKS;
|
||||
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
|
||||
cf->malloc_context_size = kDefaultMallocContextSize;
|
||||
cf->intercept_tls_get_addr = true;
|
||||
cf->coverage = false;
|
||||
|
||||
internal_memset(f, 0, sizeof(*f));
|
||||
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
|
||||
f->redzone = 16;
|
||||
f->max_redzone = 2048;
|
||||
f->debug = false;
|
||||
f->report_globals = 1;
|
||||
f->check_initialization_order = false;
|
||||
f->replace_str = true;
|
||||
f->replace_intrin = true;
|
||||
f->mac_ignore_invalid_free = false;
|
||||
f->detect_stack_use_after_return = false; // Also needs the compiler flag.
|
||||
f->min_uar_stack_size_log = 16; // We can't do smaller anyway.
|
||||
f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread.
|
||||
f->uar_noreserve = false;
|
||||
f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
|
||||
f->malloc_fill_byte = 0xbe;
|
||||
f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
|
||||
f->allow_user_poisoning = true;
|
||||
f->sleep_before_dying = 0;
|
||||
f->check_malloc_usable_size = true;
|
||||
f->unmap_shadow_on_exit = false;
|
||||
f->abort_on_error = false;
|
||||
f->print_stats = false;
|
||||
f->print_legend = true;
|
||||
f->atexit = false;
|
||||
f->allow_reexec = true;
|
||||
f->print_full_thread_history = true;
|
||||
f->poison_heap = true;
|
||||
f->poison_array_cookie = true;
|
||||
f->poison_partial = true;
|
||||
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
|
||||
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
|
||||
// TODO(glider,timurrrr): Fix known issues and enable this back.
|
||||
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
|
||||
f->new_delete_type_mismatch = true;
|
||||
f->strict_memcmp = true;
|
||||
f->strict_init_order = false;
|
||||
f->start_deactivated = false;
|
||||
f->detect_invalid_pointer_pairs = 0;
|
||||
f->detect_container_overflow = true;
|
||||
f->detect_odr_violation = 2;
|
||||
f->dump_instruction_bytes = false;
|
||||
|
||||
// Override from compile definition.
|
||||
ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
|
||||
|
||||
// Override from user-specified string.
|
||||
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
|
||||
VReport(1, "Using the defaults from __asan_default_options: %s\n",
|
||||
MaybeCallAsanDefaultOptions());
|
||||
|
||||
// Override from command line.
|
||||
ParseFlagsFromString(f, env);
|
||||
if (common_flags()->help) {
|
||||
PrintFlagDescriptions();
|
||||
}
|
||||
|
||||
if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
|
||||
Report("%s: detect_leaks is not supported on this platform.\n",
|
||||
SanitizerToolName);
|
||||
cf->detect_leaks = false;
|
||||
}
|
||||
|
||||
// Make "strict_init_order" imply "check_initialization_order".
|
||||
// TODO(samsonov): Use a single runtime flag for an init-order checker.
|
||||
if (f->strict_init_order) {
|
||||
f->check_initialization_order = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Parse flags that may change between startup and activation.
|
||||
// On Android they come from a system property.
|
||||
// On other platforms this is no-op.
|
||||
void ParseExtraActivationFlags() {
|
||||
char buf[100];
|
||||
GetExtraActivationFlags(buf, sizeof(buf));
|
||||
ParseFlagsFromString(flags(), buf);
|
||||
if (buf[0] != '\0')
|
||||
VReport(1, "Extra activation flags: %s\n", buf);
|
||||
}
|
||||
|
||||
// -------------------------- Globals --------------------- {{{1
|
||||
int asan_inited;
|
||||
bool asan_init_is_running;
|
||||
void (*death_callback)(void);
|
||||
|
||||
#if !ASAN_FIXED_MAPPING
|
||||
uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
|
||||
|
@ -341,17 +82,22 @@ void ShowStatsAndAbort() {
|
|||
|
||||
// ---------------------- mmap -------------------- {{{1
|
||||
// Reserve memory range [beg, end].
|
||||
static void ReserveShadowMemoryRange(uptr beg, uptr end) {
|
||||
// We need to use inclusive range because end+1 may not be representable.
|
||||
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
|
||||
CHECK_EQ((beg % GetPageSizeCached()), 0);
|
||||
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
|
||||
uptr size = end - beg + 1;
|
||||
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
|
||||
void *res = MmapFixedNoReserve(beg, size);
|
||||
void *res = MmapFixedNoReserve(beg, size, name);
|
||||
if (res != (void*)beg) {
|
||||
Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
|
||||
"Perhaps you're using ulimit -v\n", size);
|
||||
Abort();
|
||||
}
|
||||
if (common_flags()->no_huge_pages_for_shadow)
|
||||
NoHugePagesInRegion(beg, size);
|
||||
if (common_flags()->use_madv_dontdump)
|
||||
DontDumpShadowMemory(beg, size);
|
||||
}
|
||||
|
||||
// --------------- LowLevelAllocateCallbac ---------- {{{1
|
||||
|
@ -362,11 +108,15 @@ static void OnLowLevelAllocate(uptr ptr, uptr size) {
|
|||
// -------------------------- Run-time entry ------------------- {{{1
|
||||
// exported functions
|
||||
#define ASAN_REPORT_ERROR(type, is_write, size) \
|
||||
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
|
||||
void __asan_report_ ## type ## size(uptr addr); \
|
||||
void __asan_report_ ## type ## size(uptr addr) { \
|
||||
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
|
||||
void __asan_report_ ## type ## size(uptr addr) { \
|
||||
GET_CALLER_PC_BP_SP; \
|
||||
__asan_report_error(pc, bp, sp, addr, is_write, size); \
|
||||
__asan_report_error(pc, bp, sp, addr, is_write, size, 0); \
|
||||
} \
|
||||
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
|
||||
void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \
|
||||
GET_CALLER_PC_BP_SP; \
|
||||
__asan_report_error(pc, bp, sp, addr, is_write, size, exp); \
|
||||
}
|
||||
|
||||
ASAN_REPORT_ERROR(load, false, 1)
|
||||
|
@ -382,18 +132,20 @@ ASAN_REPORT_ERROR(store, true, 16)
|
|||
|
||||
#define ASAN_REPORT_ERROR_N(type, is_write) \
|
||||
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
|
||||
void __asan_report_ ## type ## _n(uptr addr, uptr size); \
|
||||
void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
|
||||
GET_CALLER_PC_BP_SP; \
|
||||
__asan_report_error(pc, bp, sp, addr, is_write, size); \
|
||||
__asan_report_error(pc, bp, sp, addr, is_write, size, 0); \
|
||||
} \
|
||||
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
|
||||
void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \
|
||||
GET_CALLER_PC_BP_SP; \
|
||||
__asan_report_error(pc, bp, sp, addr, is_write, size, exp); \
|
||||
}
|
||||
|
||||
ASAN_REPORT_ERROR_N(load, false)
|
||||
ASAN_REPORT_ERROR_N(store, true)
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
|
||||
extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_##type##size(uptr addr); \
|
||||
void __asan_##type##size(uptr addr) { \
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg) \
|
||||
uptr sp = MEM_TO_SHADOW(addr); \
|
||||
uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
|
||||
: *reinterpret_cast<u16 *>(sp); \
|
||||
|
@ -405,10 +157,19 @@ ASAN_REPORT_ERROR_N(store, true)
|
|||
*__asan_test_only_reported_buggy_pointer = addr; \
|
||||
} else { \
|
||||
GET_CALLER_PC_BP_SP; \
|
||||
__asan_report_error(pc, bp, sp, addr, is_write, size); \
|
||||
__asan_report_error(pc, bp, sp, addr, is_write, size, exp_arg); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
|
||||
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
|
||||
void __asan_##type##size(uptr addr) { \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0) \
|
||||
} \
|
||||
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
|
||||
void __asan_exp_##type##size(uptr addr, u32 exp) { \
|
||||
ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp) \
|
||||
}
|
||||
|
||||
ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1)
|
||||
|
@ -423,18 +184,38 @@ ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8)
|
|||
ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
|
||||
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) {
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_loadN(uptr addr, uptr size) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
__asan_report_error(pc, bp, sp, addr, false, size);
|
||||
__asan_report_error(pc, bp, sp, addr, false, size, 0);
|
||||
}
|
||||
}
|
||||
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) {
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
__asan_report_error(pc, bp, sp, addr, true, size);
|
||||
__asan_report_error(pc, bp, sp, addr, false, size, exp);
|
||||
}
|
||||
}
|
||||
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_storeN(uptr addr, uptr size) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
__asan_report_error(pc, bp, sp, addr, true, size, 0);
|
||||
}
|
||||
}
|
||||
|
||||
extern "C"
|
||||
NOINLINE INTERFACE_ATTRIBUTE
|
||||
void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
|
||||
if (__asan_region_is_poisoned(addr, size)) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
__asan_report_error(pc, bp, sp, addr, true, size, exp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -453,26 +234,39 @@ static NOINLINE void force_interface_symbols() {
|
|||
case 3: __asan_report_load4(0); break;
|
||||
case 4: __asan_report_load8(0); break;
|
||||
case 5: __asan_report_load16(0); break;
|
||||
case 6: __asan_report_store1(0); break;
|
||||
case 7: __asan_report_store2(0); break;
|
||||
case 8: __asan_report_store4(0); break;
|
||||
case 9: __asan_report_store8(0); break;
|
||||
case 10: __asan_report_store16(0); break;
|
||||
case 12: __asan_register_globals(0, 0); break;
|
||||
case 13: __asan_unregister_globals(0, 0); break;
|
||||
case 14: __asan_set_death_callback(0); break;
|
||||
case 15: __asan_set_error_report_callback(0); break;
|
||||
case 16: __asan_handle_no_return(); break;
|
||||
case 17: __asan_address_is_poisoned(0); break;
|
||||
case 25: __asan_poison_memory_region(0, 0); break;
|
||||
case 26: __asan_unpoison_memory_region(0, 0); break;
|
||||
case 27: __asan_set_error_exit_code(0); break;
|
||||
case 30: __asan_before_dynamic_init(0); break;
|
||||
case 31: __asan_after_dynamic_init(); break;
|
||||
case 32: __asan_poison_stack_memory(0, 0); break;
|
||||
case 33: __asan_unpoison_stack_memory(0, 0); break;
|
||||
case 34: __asan_region_is_poisoned(0, 0); break;
|
||||
case 35: __asan_describe_address(0); break;
|
||||
case 6: __asan_report_load_n(0, 0); break;
|
||||
case 7: __asan_report_store1(0); break;
|
||||
case 8: __asan_report_store2(0); break;
|
||||
case 9: __asan_report_store4(0); break;
|
||||
case 10: __asan_report_store8(0); break;
|
||||
case 11: __asan_report_store16(0); break;
|
||||
case 12: __asan_report_store_n(0, 0); break;
|
||||
case 13: __asan_report_exp_load1(0, 0); break;
|
||||
case 14: __asan_report_exp_load2(0, 0); break;
|
||||
case 15: __asan_report_exp_load4(0, 0); break;
|
||||
case 16: __asan_report_exp_load8(0, 0); break;
|
||||
case 17: __asan_report_exp_load16(0, 0); break;
|
||||
case 18: __asan_report_exp_load_n(0, 0, 0); break;
|
||||
case 19: __asan_report_exp_store1(0, 0); break;
|
||||
case 20: __asan_report_exp_store2(0, 0); break;
|
||||
case 21: __asan_report_exp_store4(0, 0); break;
|
||||
case 22: __asan_report_exp_store8(0, 0); break;
|
||||
case 23: __asan_report_exp_store16(0, 0); break;
|
||||
case 24: __asan_report_exp_store_n(0, 0, 0); break;
|
||||
case 25: __asan_register_globals(nullptr, 0); break;
|
||||
case 26: __asan_unregister_globals(nullptr, 0); break;
|
||||
case 27: __asan_set_death_callback(nullptr); break;
|
||||
case 28: __asan_set_error_report_callback(nullptr); break;
|
||||
case 29: __asan_handle_no_return(); break;
|
||||
case 30: __asan_address_is_poisoned(nullptr); break;
|
||||
case 31: __asan_poison_memory_region(nullptr, 0); break;
|
||||
case 32: __asan_unpoison_memory_region(nullptr, 0); break;
|
||||
case 34: __asan_before_dynamic_init(nullptr); break;
|
||||
case 35: __asan_after_dynamic_init(); break;
|
||||
case 36: __asan_poison_stack_memory(0, 0); break;
|
||||
case 37: __asan_unpoison_stack_memory(0, 0); break;
|
||||
case 38: __asan_region_is_poisoned(0, 0); break;
|
||||
case 39: __asan_describe_address(0); break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -496,8 +290,28 @@ static void InitializeHighMemEnd() {
|
|||
CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
|
||||
}
|
||||
|
||||
static void ProtectGap(uptr a, uptr size) {
|
||||
CHECK_EQ(a, (uptr)Mprotect(a, size));
|
||||
static void ProtectGap(uptr addr, uptr size) {
|
||||
void *res = MmapNoAccess(addr, size, "shadow gap");
|
||||
if (addr == (uptr)res)
|
||||
return;
|
||||
// A few pages at the start of the address space can not be protected.
|
||||
// But we really want to protect as much as possible, to prevent this memory
|
||||
// being returned as a result of a non-FIXED mmap().
|
||||
if (addr == kZeroBaseShadowStart) {
|
||||
uptr step = GetPageSizeCached();
|
||||
while (size > step && addr < kZeroBaseMaxShadowStart) {
|
||||
addr += step;
|
||||
size -= step;
|
||||
void *res = MmapNoAccess(addr, size, "shadow gap");
|
||||
if (addr == (uptr)res)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
Report("ERROR: Failed to protect the shadow gap. "
|
||||
"ASan cannot proceed correctly. ABORTING.\n");
|
||||
DumpProcessMap();
|
||||
Die();
|
||||
}
|
||||
|
||||
static void PrintAddressSpaceLayout() {
|
||||
|
@ -536,13 +350,13 @@ static void PrintAddressSpaceLayout() {
|
|||
Printf("\n");
|
||||
Printf("redzone=%zu\n", (uptr)flags()->redzone);
|
||||
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
|
||||
Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
|
||||
Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
|
||||
Printf("malloc_context_size=%zu\n",
|
||||
(uptr)common_flags()->malloc_context_size);
|
||||
|
||||
Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE);
|
||||
Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY);
|
||||
Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET);
|
||||
Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
|
||||
Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
|
||||
Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
|
||||
CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
|
||||
if (kMidMemBeg)
|
||||
CHECK(kMidShadowBeg > kLowShadowEnd &&
|
||||
|
@ -556,10 +370,19 @@ static void AsanInitInternal() {
|
|||
CHECK(!asan_init_is_running && "ASan init calls itself!");
|
||||
asan_init_is_running = true;
|
||||
|
||||
CacheBinaryName();
|
||||
|
||||
// Initialize flags. This must be done early, because most of the
|
||||
// initialization steps look at flags().
|
||||
const char *options = GetEnv("ASAN_OPTIONS");
|
||||
InitializeFlags(flags(), options);
|
||||
InitializeFlags();
|
||||
|
||||
CheckVMASize();
|
||||
|
||||
AsanCheckIncompatibleRT();
|
||||
AsanCheckDynamicRTPrereqs();
|
||||
|
||||
SetCanPoisonMemory(flags()->poison_heap);
|
||||
SetMallocContextSize(common_flags()->malloc_context_size);
|
||||
|
||||
InitializeHighMemEnd();
|
||||
|
||||
|
@ -567,24 +390,15 @@ static void AsanInitInternal() {
|
|||
AsanDoesNotSupportStaticLinkage();
|
||||
|
||||
// Install tool-specific callbacks in sanitizer_common.
|
||||
SetDieCallback(AsanDie);
|
||||
AddDieCallback(AsanDie);
|
||||
SetCheckFailedCallback(AsanCheckFailed);
|
||||
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
|
||||
|
||||
if (!flags()->start_deactivated)
|
||||
ParseExtraActivationFlags();
|
||||
|
||||
__sanitizer_set_report_path(common_flags()->log_path);
|
||||
|
||||
// Enable UAR detection, if required.
|
||||
__asan_option_detect_stack_use_after_return =
|
||||
flags()->detect_stack_use_after_return;
|
||||
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
|
||||
|
||||
if (options) {
|
||||
VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
|
||||
}
|
||||
|
||||
if (flags()->start_deactivated)
|
||||
AsanStartDeactivated();
|
||||
|
||||
// Re-exec ourselves if we need to set additional env or command line args.
|
||||
MaybeReexec();
|
||||
|
@ -615,17 +429,16 @@ static void AsanInitInternal() {
|
|||
}
|
||||
#endif
|
||||
|
||||
if (common_flags()->verbosity)
|
||||
PrintAddressSpaceLayout();
|
||||
if (Verbosity()) PrintAddressSpaceLayout();
|
||||
|
||||
DisableCoreDumperIfNecessary();
|
||||
|
||||
if (full_shadow_is_available) {
|
||||
// mmap the low shadow plus at least one page at the left.
|
||||
if (kLowShadowBeg)
|
||||
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
|
||||
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
|
||||
// mmap the high shadow.
|
||||
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
|
||||
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
|
||||
// protect the gap.
|
||||
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
|
||||
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
|
||||
|
@ -634,11 +447,11 @@ static void AsanInitInternal() {
|
|||
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
|
||||
CHECK(kLowShadowBeg != kLowShadowEnd);
|
||||
// mmap the low shadow plus at least one page at the left.
|
||||
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd);
|
||||
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
|
||||
// mmap the mid shadow.
|
||||
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd);
|
||||
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
|
||||
// mmap the high shadow.
|
||||
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
|
||||
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
|
||||
// protect the gaps.
|
||||
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
|
||||
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
|
||||
|
@ -646,14 +459,21 @@ static void AsanInitInternal() {
|
|||
} else {
|
||||
Report("Shadow memory range interleaves with an existing memory mapping. "
|
||||
"ASan cannot proceed correctly. ABORTING.\n");
|
||||
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
|
||||
shadow_start, kHighShadowEnd);
|
||||
DumpProcessMap();
|
||||
Die();
|
||||
}
|
||||
|
||||
AsanTSDInit(PlatformTSDDtor);
|
||||
InstallDeadlySignalHandlers(AsanOnSIGSEGV);
|
||||
InstallDeadlySignalHandlers(AsanOnDeadlySignal);
|
||||
|
||||
InitializeAllocator();
|
||||
AllocatorOptions allocator_options;
|
||||
allocator_options.SetFrom(flags(), common_flags());
|
||||
InitializeAllocator(allocator_options);
|
||||
|
||||
MaybeStartBackgroudThread();
|
||||
SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
|
||||
|
||||
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
|
||||
// should be set to 1 prior to initializing the threads.
|
||||
|
@ -663,32 +483,40 @@ static void AsanInitInternal() {
|
|||
if (flags()->atexit)
|
||||
Atexit(asan_atexit);
|
||||
|
||||
if (common_flags()->coverage) {
|
||||
__sanitizer_cov_init();
|
||||
Atexit(__sanitizer_cov_dump);
|
||||
}
|
||||
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
|
||||
|
||||
// Now that ASan runtime is (mostly) initialized, deactivate it if
|
||||
// necessary, so that it can be re-activated when requested.
|
||||
if (flags()->start_deactivated)
|
||||
AsanDeactivate();
|
||||
|
||||
// interceptors
|
||||
InitTlsSize();
|
||||
|
||||
// Create main thread.
|
||||
AsanThread *main_thread = AsanThread::Create(0, 0);
|
||||
CreateThreadContextArgs create_main_args = { main_thread, 0 };
|
||||
u32 main_tid = asanThreadRegistry().CreateThread(
|
||||
0, true, 0, &create_main_args);
|
||||
CHECK_EQ(0, main_tid);
|
||||
AsanThread *main_thread = AsanThread::Create(
|
||||
/* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
|
||||
/* stack */ nullptr, /* detached */ true);
|
||||
CHECK_EQ(0, main_thread->tid());
|
||||
SetCurrentThread(main_thread);
|
||||
main_thread->ThreadStart(internal_getpid());
|
||||
main_thread->ThreadStart(internal_getpid(),
|
||||
/* signal_thread_is_registered */ nullptr);
|
||||
force_interface_symbols(); // no-op.
|
||||
SanitizerInitializeUnwinder();
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::InitCommonLsan(false);
|
||||
__lsan::InitCommonLsan();
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
Atexit(__lsan::DoLeakCheck);
|
||||
}
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
||||
#if CAN_SANITIZE_UB
|
||||
__ubsan::InitAsPlugin();
|
||||
#endif
|
||||
|
||||
InitializeSuppressions();
|
||||
|
||||
VReport(1, "AddressSanitizer Init done\n");
|
||||
}
|
||||
|
||||
|
@ -700,46 +528,38 @@ void AsanInitFromRtl() {
|
|||
|
||||
#if ASAN_DYNAMIC
|
||||
// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
|
||||
// (and thus normal initializer from .preinit_array haven't run).
|
||||
// (and thus normal initializers from .preinit_array or modules haven't run).
|
||||
|
||||
class AsanInitializer {
|
||||
public: // NOLINT
|
||||
AsanInitializer() {
|
||||
AsanCheckIncompatibleRT();
|
||||
AsanCheckDynamicRTPrereqs();
|
||||
if (UNLIKELY(!asan_inited))
|
||||
__asan_init();
|
||||
AsanInitFromRtl();
|
||||
}
|
||||
};
|
||||
|
||||
static AsanInitializer asan_initializer;
|
||||
#endif // ASAN_DYNAMIC
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
const char* __asan_default_options() { return ""; }
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
int NOINLINE __asan_set_error_exit_code(int exit_code) {
|
||||
int old = flags()->exitcode;
|
||||
flags()->exitcode = exit_code;
|
||||
return old;
|
||||
}
|
||||
|
||||
void NOINLINE __asan_handle_no_return() {
|
||||
int local_stack;
|
||||
AsanThread *curr_thread = GetCurrentThread();
|
||||
CHECK(curr_thread);
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr top = curr_thread->stack_top();
|
||||
uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
|
||||
uptr top, bottom;
|
||||
if (curr_thread) {
|
||||
top = curr_thread->stack_top();
|
||||
bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
|
||||
} else {
|
||||
// If we haven't seen this thread, try asking the OS for stack bounds.
|
||||
uptr tls_addr, tls_size, stack_size;
|
||||
GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr,
|
||||
&tls_size);
|
||||
top = bottom + stack_size;
|
||||
}
|
||||
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
|
||||
if (top - bottom > kMaxExpectedCleanupSize) {
|
||||
static bool reported_warning = false;
|
||||
|
@ -755,18 +575,21 @@ void NOINLINE __asan_handle_no_return() {
|
|||
return;
|
||||
}
|
||||
PoisonShadow(bottom, top - bottom, 0);
|
||||
if (curr_thread->has_fake_stack())
|
||||
if (curr_thread && curr_thread->has_fake_stack())
|
||||
curr_thread->fake_stack()->HandleNoReturn();
|
||||
}
|
||||
|
||||
void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
|
||||
death_callback = callback;
|
||||
SetUserDieCallback(callback);
|
||||
}
|
||||
|
||||
// Initialize as requested from instrumented application code.
|
||||
// We use this call as a trigger to wake up ASan from deactivated state.
|
||||
void __asan_init() {
|
||||
AsanCheckIncompatibleRT();
|
||||
AsanActivate();
|
||||
AsanInitInternal();
|
||||
}
|
||||
|
||||
void __asan_version_mismatch_check() {
|
||||
// Do nothing.
|
||||
}
|
||||
|
|
|
@ -11,6 +11,21 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
#include "asan_internal.h"
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static atomic_uint32_t malloc_context_size;
|
||||
|
||||
void SetMallocContextSize(u32 size) {
|
||||
atomic_store(&malloc_context_size, size, memory_order_release);
|
||||
}
|
||||
|
||||
u32 GetMallocContextSize() {
|
||||
return atomic_load(&malloc_context_size, memory_order_acquire);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
// ------------------ Interface -------------- {{{1
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
//
|
||||
// ASan-private header for asan_stack.cc.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_STACK_H
|
||||
#define ASAN_STACK_H
|
||||
|
||||
|
@ -19,6 +20,11 @@
|
|||
|
||||
namespace __asan {
|
||||
|
||||
static const u32 kDefaultMallocContextSize = 30;
|
||||
|
||||
void SetMallocContextSize(u32 size);
|
||||
u32 GetMallocContextSize();
|
||||
|
||||
// Get the stack trace with the given pc and bp.
|
||||
// The pc will be in the position 0 of the resulting stack trace.
|
||||
// The bp may refer to the current frame or to the caller's frame.
|
||||
|
@ -41,15 +47,15 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
|
|||
uptr stack_bottom = t->stack_bottom();
|
||||
ScopedUnwinding unwind_scope(t);
|
||||
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
|
||||
} else if (t == 0 && !fast) {
|
||||
} else if (!t && !fast) {
|
||||
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */
|
||||
stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
|
||||
}
|
||||
}
|
||||
#endif // SANITIZER_WINDOWS
|
||||
#endif // SANITIZER_WINDOWS
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
|
||||
// as early as possible (in functions exposed to the user), as we generally
|
||||
|
@ -76,9 +82,10 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
|
|||
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
|
||||
common_flags()->fast_unwind_on_fatal)
|
||||
|
||||
#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \
|
||||
#define GET_STACK_TRACE_SIGNAL(sig) \
|
||||
BufferedStackTrace stack; \
|
||||
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \
|
||||
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \
|
||||
(sig).pc, (sig).bp, (sig).context, \
|
||||
common_flags()->fast_unwind_on_fatal)
|
||||
|
||||
#define GET_STACK_TRACE_FATAL_HERE \
|
||||
|
@ -90,9 +97,8 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
|
|||
#define GET_STACK_TRACE_THREAD \
|
||||
GET_STACK_TRACE(kStackTraceMax, true)
|
||||
|
||||
#define GET_STACK_TRACE_MALLOC \
|
||||
GET_STACK_TRACE(common_flags()->malloc_context_size, \
|
||||
common_flags()->fast_unwind_on_malloc)
|
||||
#define GET_STACK_TRACE_MALLOC \
|
||||
GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
|
||||
|
||||
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
|
||||
|
||||
|
@ -108,4 +114,4 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
|
|||
stack.Print(); \
|
||||
}
|
||||
|
||||
#endif // ASAN_STACK_H
|
||||
#endif // ASAN_STACK_H
|
||||
|
|
|
@ -49,12 +49,8 @@ void AsanStats::Print() {
|
|||
(mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
|
||||
mmaps, munmaps);
|
||||
|
||||
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
|
||||
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
|
||||
PrintMallocStatsArray(" frees by size class: ", freed_by_size);
|
||||
PrintMallocStatsArray(" rfrees by size class: ", really_freed_by_size);
|
||||
Printf("Stats: malloc large: %zu small slow: %zu\n",
|
||||
malloc_large, malloc_small_slow);
|
||||
Printf("Stats: malloc large: %zu\n", malloc_large);
|
||||
}
|
||||
|
||||
void AsanStats::MergeFrom(const AsanStats *stats) {
|
||||
|
@ -159,8 +155,7 @@ uptr __sanitizer_get_free_bytes() {
|
|||
GetAccumulatedStats(&stats);
|
||||
uptr total_free = stats.mmaped
|
||||
- stats.munmaped
|
||||
+ stats.really_freed
|
||||
+ stats.really_freed_redzones;
|
||||
+ stats.really_freed;
|
||||
uptr total_used = stats.malloced
|
||||
+ stats.malloced_redzones;
|
||||
// Return sane value if total_free < total_used due to racy
|
||||
|
|
|
@ -30,20 +30,14 @@ struct AsanStats {
|
|||
uptr freed;
|
||||
uptr real_frees;
|
||||
uptr really_freed;
|
||||
uptr really_freed_redzones;
|
||||
uptr reallocs;
|
||||
uptr realloced;
|
||||
uptr mmaps;
|
||||
uptr mmaped;
|
||||
uptr munmaps;
|
||||
uptr munmaped;
|
||||
uptr mmaped_by_size[kNumberOfSizeClasses];
|
||||
uptr malloced_by_size[kNumberOfSizeClasses];
|
||||
uptr freed_by_size[kNumberOfSizeClasses];
|
||||
uptr really_freed_by_size[kNumberOfSizeClasses];
|
||||
|
||||
uptr malloc_large;
|
||||
uptr malloc_small_slow;
|
||||
uptr malloced_by_size[kNumberOfSizeClasses];
|
||||
|
||||
// Ctor for global AsanStats (accumulated stats for dead threads).
|
||||
explicit AsanStats(LinkerInitialized) { }
|
||||
|
|
108
libsanitizer/asan/asan_suppressions.cc
Normal file
108
libsanitizer/asan/asan_suppressions.cc
Normal file
|
@ -0,0 +1,108 @@
|
|||
//===-- asan_suppressions.cc ----------------------------------------------===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Issue suppression and suppression-related functions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "asan_suppressions.h"
|
||||
|
||||
#include "asan_stack.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_suppressions.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
|
||||
static SuppressionContext *suppression_ctx = nullptr;
|
||||
static const char kInterceptorName[] = "interceptor_name";
|
||||
static const char kInterceptorViaFunction[] = "interceptor_via_fun";
|
||||
static const char kInterceptorViaLibrary[] = "interceptor_via_lib";
|
||||
static const char kODRViolation[] = "odr_violation";
|
||||
static const char *kSuppressionTypes[] = {
|
||||
kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
|
||||
kODRViolation};
|
||||
|
||||
extern "C" {
|
||||
#if SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
const char *__asan_default_suppressions();
|
||||
#else
|
||||
// No week hooks, provide empty implementation.
|
||||
const char *__asan_default_suppressions() { return ""; }
|
||||
#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
} // extern "C"
|
||||
|
||||
void InitializeSuppressions() {
|
||||
CHECK_EQ(nullptr, suppression_ctx);
|
||||
suppression_ctx = new (suppression_placeholder) // NOLINT
|
||||
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
|
||||
suppression_ctx->ParseFromFile(flags()->suppressions);
|
||||
if (&__asan_default_suppressions)
|
||||
suppression_ctx->Parse(__asan_default_suppressions());
|
||||
}
|
||||
|
||||
bool IsInterceptorSuppressed(const char *interceptor_name) {
|
||||
CHECK(suppression_ctx);
|
||||
Suppression *s;
|
||||
// Match "interceptor_name" suppressions.
|
||||
return suppression_ctx->Match(interceptor_name, kInterceptorName, &s);
|
||||
}
|
||||
|
||||
bool HaveStackTraceBasedSuppressions() {
|
||||
CHECK(suppression_ctx);
|
||||
return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) ||
|
||||
suppression_ctx->HasSuppressionType(kInterceptorViaLibrary);
|
||||
}
|
||||
|
||||
bool IsODRViolationSuppressed(const char *global_var_name) {
|
||||
CHECK(suppression_ctx);
|
||||
Suppression *s;
|
||||
// Match "odr_violation" suppressions.
|
||||
return suppression_ctx->Match(global_var_name, kODRViolation, &s);
|
||||
}
|
||||
|
||||
bool IsStackTraceSuppressed(const StackTrace *stack) {
|
||||
if (!HaveStackTraceBasedSuppressions())
|
||||
return false;
|
||||
|
||||
CHECK(suppression_ctx);
|
||||
Symbolizer *symbolizer = Symbolizer::GetOrInit();
|
||||
Suppression *s;
|
||||
for (uptr i = 0; i < stack->size && stack->trace[i]; i++) {
|
||||
uptr addr = stack->trace[i];
|
||||
|
||||
if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) {
|
||||
// Match "interceptor_via_lib" suppressions.
|
||||
if (const char *module_name = symbolizer->GetModuleNameForPc(addr))
|
||||
if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s))
|
||||
return true;
|
||||
}
|
||||
|
||||
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
|
||||
SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
|
||||
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
||||
const char *function_name = cur->info.function;
|
||||
if (!function_name) {
|
||||
continue;
|
||||
}
|
||||
// Match "interceptor_via_fun" suppressions.
|
||||
if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
|
||||
&s)) {
|
||||
frames->ClearAll();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
frames->ClearAll();
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace __asan
|
28
libsanitizer/asan/asan_suppressions.h
Normal file
28
libsanitizer/asan/asan_suppressions.h
Normal file
|
@ -0,0 +1,28 @@
|
|||
//===-- asan_suppressions.h -------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// ASan-private header for asan_suppressions.cc.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef ASAN_SUPPRESSIONS_H
|
||||
#define ASAN_SUPPRESSIONS_H
|
||||
|
||||
#include "asan_internal.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void InitializeSuppressions();
|
||||
bool IsInterceptorSuppressed(const char *interceptor_name);
|
||||
bool HaveStackTraceBasedSuppressions();
|
||||
bool IsStackTraceSuppressed(const StackTrace *stack);
|
||||
bool IsODRViolationSuppressed(const char *global_var_name);
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_SUPPRESSIONS_H
|
|
@ -25,6 +25,11 @@ namespace __asan {
|
|||
|
||||
// AsanThreadContext implementation.
|
||||
|
||||
struct CreateThreadContextArgs {
|
||||
AsanThread *thread;
|
||||
StackTrace *stack;
|
||||
};
|
||||
|
||||
void AsanThreadContext::OnCreated(void *arg) {
|
||||
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
|
||||
if (args->stack)
|
||||
|
@ -35,7 +40,7 @@ void AsanThreadContext::OnCreated(void *arg) {
|
|||
|
||||
void AsanThreadContext::OnFinished() {
|
||||
// Drop the link to the AsanThread object.
|
||||
thread = 0;
|
||||
thread = nullptr;
|
||||
}
|
||||
|
||||
// MIPS requires aligned address
|
||||
|
@ -73,13 +78,17 @@ AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
|
|||
|
||||
// AsanThread implementation.
|
||||
|
||||
AsanThread *AsanThread::Create(thread_callback_t start_routine,
|
||||
void *arg) {
|
||||
AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
|
||||
u32 parent_tid, StackTrace *stack,
|
||||
bool detached) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
|
||||
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
|
||||
thread->start_routine_ = start_routine;
|
||||
thread->arg_ = arg;
|
||||
CreateThreadContextArgs args = { thread, stack };
|
||||
asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
|
||||
parent_tid, &args);
|
||||
|
||||
return thread;
|
||||
}
|
||||
|
@ -114,7 +123,7 @@ void AsanThread::Destroy() {
|
|||
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
||||
uptr stack_size = this->stack_size();
|
||||
if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
|
||||
return 0;
|
||||
return nullptr;
|
||||
uptr old_val = 0;
|
||||
// fake_stack_ has 3 states:
|
||||
// 0 -- not initialized
|
||||
|
@ -135,11 +144,11 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
|||
SetTLSFakeStack(fake_stack_);
|
||||
return fake_stack_;
|
||||
}
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void AsanThread::Init() {
|
||||
fake_stack_ = 0; // Will be initialized lazily if needed.
|
||||
fake_stack_ = nullptr; // Will be initialized lazily if needed.
|
||||
CHECK_EQ(this->stack_size(), 0U);
|
||||
SetThreadStackAndTls();
|
||||
CHECK_GT(this->stack_size(), 0U);
|
||||
|
@ -150,12 +159,15 @@ void AsanThread::Init() {
|
|||
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
|
||||
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
|
||||
&local);
|
||||
AsanPlatformThreadInit();
|
||||
}
|
||||
|
||||
thread_return_t AsanThread::ThreadStart(uptr os_id) {
|
||||
thread_return_t AsanThread::ThreadStart(
|
||||
uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
|
||||
Init();
|
||||
asanThreadRegistry().StartThread(tid(), os_id, 0);
|
||||
asanThreadRegistry().StartThread(tid(), os_id, nullptr);
|
||||
if (signal_thread_is_registered)
|
||||
atomic_store(signal_thread_is_registered, 1, memory_order_release);
|
||||
|
||||
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
|
||||
|
||||
if (!start_routine_) {
|
||||
|
@ -262,7 +274,7 @@ AsanThread *GetCurrentThread() {
|
|||
return tctx->thread;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
return context->thread;
|
||||
}
|
||||
|
@ -287,7 +299,7 @@ AsanThread *FindThreadByStackAddress(uptr addr) {
|
|||
AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
|
||||
asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
|
||||
(void *)addr));
|
||||
return tctx ? tctx->thread : 0;
|
||||
return tctx ? tctx->thread : nullptr;
|
||||
}
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() {
|
||||
|
@ -300,10 +312,10 @@ void EnsureMainThreadIDIsCorrect() {
|
|||
__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
|
||||
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
|
||||
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
|
||||
if (!context) return 0;
|
||||
if (!context) return nullptr;
|
||||
return context->thread;
|
||||
}
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
|
@ -340,4 +352,4 @@ void UnlockThreadRegistry() {
|
|||
void EnsureMainThreadIDIsCorrect() {
|
||||
__asan::EnsureMainThreadIDIsCorrect();
|
||||
}
|
||||
} // namespace __lsan
|
||||
} // namespace __lsan
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
//
|
||||
// ASan-private header for asan_thread.cc.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_THREAD_H
|
||||
#define ASAN_THREAD_H
|
||||
|
||||
|
@ -32,19 +33,16 @@ class AsanThread;
|
|||
class AsanThreadContext : public ThreadContextBase {
|
||||
public:
|
||||
explicit AsanThreadContext(int tid)
|
||||
: ThreadContextBase(tid),
|
||||
announced(false),
|
||||
destructor_iterations(kPthreadDestructorIterations),
|
||||
stack_id(0),
|
||||
thread(0) {
|
||||
}
|
||||
: ThreadContextBase(tid), announced(false),
|
||||
destructor_iterations(GetPthreadDestructorIterations()), stack_id(0),
|
||||
thread(nullptr) {}
|
||||
bool announced;
|
||||
u8 destructor_iterations;
|
||||
u32 stack_id;
|
||||
AsanThread *thread;
|
||||
|
||||
void OnCreated(void *arg);
|
||||
void OnFinished();
|
||||
void OnCreated(void *arg) override;
|
||||
void OnFinished() override;
|
||||
};
|
||||
|
||||
// AsanThreadContext objects are never freed, so we need many of them.
|
||||
|
@ -53,12 +51,14 @@ COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
|
|||
// AsanThread are stored in TSD and destroyed when the thread dies.
|
||||
class AsanThread {
|
||||
public:
|
||||
static AsanThread *Create(thread_callback_t start_routine, void *arg);
|
||||
static AsanThread *Create(thread_callback_t start_routine, void *arg,
|
||||
u32 parent_tid, StackTrace *stack, bool detached);
|
||||
static void TSDDtor(void *tsd);
|
||||
void Destroy();
|
||||
|
||||
void Init(); // Should be called from the thread itself.
|
||||
thread_return_t ThreadStart(uptr os_id);
|
||||
thread_return_t ThreadStart(uptr os_id,
|
||||
atomic_uintptr_t *signal_thread_is_registered);
|
||||
|
||||
uptr stack_top() { return stack_top_; }
|
||||
uptr stack_bottom() { return stack_bottom_; }
|
||||
|
@ -83,8 +83,8 @@ class AsanThread {
|
|||
void DeleteFakeStack(int tid) {
|
||||
if (!fake_stack_) return;
|
||||
FakeStack *t = fake_stack_;
|
||||
fake_stack_ = 0;
|
||||
SetTLSFakeStack(0);
|
||||
fake_stack_ = nullptr;
|
||||
SetTLSFakeStack(nullptr);
|
||||
t->Destroy(tid);
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ class AsanThread {
|
|||
|
||||
FakeStack *fake_stack() {
|
||||
if (!__asan_option_detect_stack_use_after_return)
|
||||
return 0;
|
||||
return nullptr;
|
||||
if (!has_fake_stack())
|
||||
return AsyncSignalSafeLazyInitFakeStack();
|
||||
return fake_stack_;
|
||||
|
@ -164,11 +164,6 @@ class ScopedDeadlySignal {
|
|||
AsanThread *thread;
|
||||
};
|
||||
|
||||
struct CreateThreadContextArgs {
|
||||
AsanThread *thread;
|
||||
StackTrace *stack;
|
||||
};
|
||||
|
||||
// Returns a single instance of registry.
|
||||
ThreadRegistry &asanThreadRegistry();
|
||||
|
||||
|
@ -183,6 +178,6 @@ AsanThread *FindThreadByStackAddress(uptr addr);
|
|||
|
||||
// Used to handle fork().
|
||||
void EnsureMainThreadIDIsCorrect();
|
||||
} // namespace __asan
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_THREAD_H
|
||||
#endif // ASAN_THREAD_H
|
||||
|
|
|
@ -14,27 +14,139 @@
|
|||
#if SANITIZER_WINDOWS
|
||||
#include <windows.h>
|
||||
|
||||
#include <dbghelp.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __asan_should_detect_stack_use_after_return() {
|
||||
__asan_init();
|
||||
return __asan_option_detect_stack_use_after_return;
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __asan_should_detect_stack_use_after_return() {
|
||||
__asan_init();
|
||||
return __asan_option_detect_stack_use_after_return;
|
||||
}
|
||||
|
||||
// -------------------- A workaround for the abscence of weak symbols ----- {{{
|
||||
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
|
||||
// use the /alternatename directive to tell the linker to default a specific
|
||||
// symbol to a specific value, which works nicely for allocator hooks and
|
||||
// __asan_default_options().
|
||||
void __sanitizer_default_malloc_hook(void *ptr, uptr size) { }
|
||||
void __sanitizer_default_free_hook(void *ptr) { }
|
||||
const char* __asan_default_default_options() { return ""; }
|
||||
const char* __asan_default_default_suppressions() { return ""; }
|
||||
void __asan_default_on_error() {}
|
||||
#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
|
||||
// }}}
|
||||
} // extern "C"
|
||||
|
||||
// ---------------------- Windows-specific inteceptors ---------------- {{{
|
||||
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(RaiseException));
|
||||
__asan_handle_no_return();
|
||||
REAL(RaiseException)(a, b, c, d);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(_except_handler3));
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler3)(a, b, c, d);
|
||||
}
|
||||
|
||||
#if ASAN_DYNAMIC
|
||||
// This handler is named differently in -MT and -MD CRTs.
|
||||
#define _except_handler4 _except_handler4_common
|
||||
#endif
|
||||
INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(_except_handler4));
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler4)(a, b, c, d);
|
||||
}
|
||||
|
||||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
AsanThread *t = (AsanThread*)arg;
|
||||
SetCurrentThread(t);
|
||||
return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(DWORD, CreateThread,
|
||||
void* security, uptr stack_size,
|
||||
DWORD (__stdcall *start_routine)(void*), void* arg,
|
||||
DWORD thr_flags, void* tid) {
|
||||
// Strict init-order checking is thread-hostile.
|
||||
if (flags()->strict_init_order)
|
||||
StopInitOrderChecking();
|
||||
GET_STACK_TRACE_THREAD;
|
||||
// FIXME: The CreateThread interceptor is not the same as a pthread_create
|
||||
// one. This is a bandaid fix for PR22025.
|
||||
bool detached = false; // FIXME: how can we determine it on Windows?
|
||||
u32 current_tid = GetCurrentTidOrInvalid();
|
||||
AsanThread *t =
|
||||
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
|
||||
return REAL(CreateThread)(security, stack_size,
|
||||
asan_thread_start, t, thr_flags, tid);
|
||||
}
|
||||
|
||||
namespace {
|
||||
BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
|
||||
|
||||
void EnsureWorkerThreadRegistered() {
|
||||
// FIXME: GetCurrentThread relies on TSD, which might not play well with
|
||||
// system thread pools. We might want to use something like reference
|
||||
// counting to zero out GetCurrentThread() underlying storage when the last
|
||||
// work item finishes? Or can we disable reclaiming of threads in the pool?
|
||||
BlockingMutexLock l(&mu_for_thread_tracking);
|
||||
if (__asan::GetCurrentThread())
|
||||
return;
|
||||
|
||||
AsanThread *t = AsanThread::Create(
|
||||
/* start_routine */ nullptr, /* arg */ nullptr,
|
||||
/* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
|
||||
t->Init();
|
||||
asanThreadRegistry().StartThread(t->tid(), 0, 0);
|
||||
SetCurrentThread(t);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
|
||||
// NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
|
||||
// query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
|
||||
// System worker pool threads are created at arbitraty point in time and
|
||||
// without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
|
||||
// instead and don't register a specific parent_tid/stack.
|
||||
EnsureWorkerThreadRegistered();
|
||||
return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
|
||||
}
|
||||
|
||||
// }}}
|
||||
|
||||
namespace __asan {
|
||||
|
||||
// ---------------------- TSD ---------------- {{{1
|
||||
void InitializePlatformInterceptors() {
|
||||
ASAN_INTERCEPT_FUNC(CreateThread);
|
||||
ASAN_INTERCEPT_FUNC(RaiseException);
|
||||
ASAN_INTERCEPT_FUNC(_except_handler3);
|
||||
ASAN_INTERCEPT_FUNC(_except_handler4);
|
||||
|
||||
// NtWaitForWorkViaWorkerFactory is always linked dynamically.
|
||||
CHECK(::__interception::OverrideFunction(
|
||||
"NtWaitForWorkViaWorkerFactory",
|
||||
(uptr)WRAP(NtWaitForWorkViaWorkerFactory),
|
||||
(uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
|
||||
}
|
||||
|
||||
// ---------------------- TSD ---------------- {{{
|
||||
static bool tsd_key_inited = false;
|
||||
|
||||
static __declspec(thread) void *fake_tsd = 0;
|
||||
|
@ -57,7 +169,13 @@ void AsanTSDSet(void *tsd) {
|
|||
void PlatformTSDDtor(void *tsd) {
|
||||
AsanThread::TSDDtor(tsd);
|
||||
}
|
||||
// ---------------------- Various stuff ---------------- {{{1
|
||||
// }}}
|
||||
|
||||
// ---------------------- Various stuff ---------------- {{{
|
||||
void DisableReexec() {
|
||||
// No need to re-exec on Windows.
|
||||
}
|
||||
|
||||
void MaybeReexec() {
|
||||
// No need to re-exec on Windows.
|
||||
}
|
||||
|
@ -73,15 +191,11 @@ void AsanCheckDynamicRTPrereqs() {}
|
|||
|
||||
void AsanCheckIncompatibleRT() {}
|
||||
|
||||
void AsanPlatformThreadInit() {
|
||||
// Nothing here for now.
|
||||
}
|
||||
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
void AsanOnSIGSEGV(int, void *siginfo, void *context) {
|
||||
void AsanOnDeadlySignal(int, void *siginfo, void *context) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
|
@ -90,12 +204,6 @@ static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
|
|||
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
|
||||
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
|
||||
CONTEXT *context = info->ContextRecord;
|
||||
uptr pc = (uptr)exception_record->ExceptionAddress;
|
||||
#ifdef _WIN64
|
||||
uptr bp = (uptr)context->Rbp, sp = (uptr)context->Rsp;
|
||||
#else
|
||||
uptr bp = (uptr)context->Ebp, sp = (uptr)context->Esp;
|
||||
#endif
|
||||
|
||||
if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
|
||||
exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
|
||||
|
@ -103,8 +211,8 @@ static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
|
|||
(exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
|
||||
? "access-violation"
|
||||
: "in-page-error";
|
||||
uptr access_addr = exception_record->ExceptionInformation[1];
|
||||
ReportSIGSEGV(description, pc, sp, bp, context, access_addr);
|
||||
SignalContext sig = SignalContext::Create(exception_record, context);
|
||||
ReportDeadlySignal(description, sig);
|
||||
}
|
||||
|
||||
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
|
||||
|
@ -142,10 +250,10 @@ int __asan_set_seh_filter() {
|
|||
// Put a pointer to __asan_set_seh_filter at the end of the global list
|
||||
// of C initializers, after the default EH is set by the CRT.
|
||||
#pragma section(".CRT$XIZ", long, read) // NOLINT
|
||||
static __declspec(allocate(".CRT$XIZ"))
|
||||
__declspec(allocate(".CRT$XIZ"))
|
||||
int (*__intercept_seh)() = __asan_set_seh_filter;
|
||||
#endif
|
||||
|
||||
// }}}
|
||||
} // namespace __asan
|
||||
|
||||
#endif // _WIN32
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
// simplifies the build procedure.
|
||||
#ifdef ASAN_DLL_THUNK
|
||||
#include "asan_init_version.h"
|
||||
#include "sanitizer_common/sanitizer_interception.h"
|
||||
#include "interception/interception.h"
|
||||
|
||||
// ---------- Function interception helper functions and macros ----------- {{{1
|
||||
extern "C" {
|
||||
|
@ -28,8 +28,9 @@ void *__stdcall GetProcAddress(void *module, const char *proc_name);
|
|||
void abort();
|
||||
}
|
||||
|
||||
static void *getRealProcAddressOrDie(const char *name) {
|
||||
void *ret = GetProcAddress(GetModuleHandleA(0), name);
|
||||
static uptr getRealProcAddressOrDie(const char *name) {
|
||||
uptr ret =
|
||||
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
|
||||
if (!ret)
|
||||
abort();
|
||||
return ret;
|
||||
|
@ -60,13 +61,12 @@ struct FunctionInterceptor<0> {
|
|||
};
|
||||
|
||||
#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \
|
||||
template<> struct FunctionInterceptor<__LINE__> { \
|
||||
template <> struct FunctionInterceptor<__LINE__> { \
|
||||
static void Execute() { \
|
||||
void *wrapper = getRealProcAddressOrDie(main_function); \
|
||||
if (!__interception::OverrideFunction((uptr)dll_function, \
|
||||
(uptr)wrapper, 0)) \
|
||||
uptr wrapper = getRealProcAddressOrDie(main_function); \
|
||||
if (!__interception::OverrideFunction((uptr)dll_function, wrapper, 0)) \
|
||||
abort(); \
|
||||
FunctionInterceptor<__LINE__-1>::Execute(); \
|
||||
FunctionInterceptor<__LINE__ - 1>::Execute(); \
|
||||
} \
|
||||
};
|
||||
|
||||
|
@ -208,7 +208,7 @@ extern "C" {
|
|||
// __asan_init is expected to be called by only one thread.
|
||||
if (fn) return;
|
||||
|
||||
fn = (fntype)getRealProcAddressOrDie(__asan_init_name);
|
||||
fn = (fntype)getRealProcAddressOrDie("__asan_init");
|
||||
fn();
|
||||
__asan_option_detect_stack_use_after_return =
|
||||
(__asan_should_detect_stack_use_after_return() != 0);
|
||||
|
@ -217,6 +217,10 @@ extern "C" {
|
|||
}
|
||||
}
|
||||
|
||||
extern "C" void __asan_version_mismatch_check() {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
INTERFACE_FUNCTION(__asan_handle_no_return)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_report_store1)
|
||||
|
@ -292,7 +296,45 @@ INTERFACE_FUNCTION(__asan_stack_free_8)
|
|||
INTERFACE_FUNCTION(__asan_stack_free_9)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_10)
|
||||
|
||||
// FIXME: we might want to have a sanitizer_win_dll_thunk?
|
||||
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_dump)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_heap_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_ownership)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
|
||||
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
|
||||
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
|
||||
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
|
||||
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
|
||||
INTERFACE_FUNCTION(__sanitizer_reset_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_number_of_counters)
|
||||
INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
|
||||
INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
|
||||
|
||||
// TODO(timurrrr): Add more interface functions on the as-needed basis.
|
||||
|
||||
|
@ -342,11 +384,15 @@ INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT
|
|||
INTERCEPT_LIBRARY_FUNCTION(strchr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcmp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcspn);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strlen);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncat);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncmp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncpy);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strnlen);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strpbrk);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strspn);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strstr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strtol);
|
||||
INTERCEPT_LIBRARY_FUNCTION(wcslen);
|
||||
|
||||
|
|
|
@ -13,7 +13,8 @@
|
|||
//
|
||||
// This includes:
|
||||
// - forwarding the detect_stack_use_after_return runtime option
|
||||
// - installing a custom SEH handler
|
||||
// - working around deficiencies of the MD runtime
|
||||
// - installing a custom SEH handlerx
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
@ -21,10 +22,15 @@
|
|||
// Using #ifdef rather than relying on Makefiles etc.
|
||||
// simplifies the build procedure.
|
||||
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
|
||||
extern "C" {
|
||||
__declspec(dllimport) int __asan_set_seh_filter();
|
||||
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
|
||||
#include <windows.h>
|
||||
|
||||
// First, declare CRT sections we'll be using in this file
|
||||
#pragma section(".CRT$XID", long, read) // NOLINT
|
||||
#pragma section(".CRT$XIZ", long, read) // NOLINT
|
||||
#pragma section(".CRT$XTW", long, read) // NOLINT
|
||||
#pragma section(".CRT$XTY", long, read) // NOLINT
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Define a copy of __asan_option_detect_stack_use_after_return that should be
|
||||
// used when linking an MD runtime with a set of object files on Windows.
|
||||
//
|
||||
|
@ -35,16 +41,55 @@ __declspec(dllimport) int __asan_should_detect_stack_use_after_return();
|
|||
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
|
||||
// just to work around this issue, let's clone the a variable that is
|
||||
// constant after initialization anyways.
|
||||
extern "C" {
|
||||
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
|
||||
int __asan_option_detect_stack_use_after_return =
|
||||
__asan_should_detect_stack_use_after_return();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
|
||||
// unload or on exit. ASan relies on LLVM global_dtors to call
|
||||
// __asan_unregister_globals on these events, which unfortunately doesn't work
|
||||
// with the MD runtime, see PR22545 for the details.
|
||||
// To work around this, for each DLL we schedule a call to UnregisterGlobals
|
||||
// using atexit() that calls a small subset of C terminators
|
||||
// where LLVM global_dtors is placed. Fingers crossed, no other C terminators
|
||||
// are there.
|
||||
extern "C" void __cdecl _initterm(void *a, void *b);
|
||||
|
||||
namespace {
|
||||
__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0;
|
||||
__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0;
|
||||
|
||||
void UnregisterGlobals() {
|
||||
_initterm(&before_global_dtors, &after_global_dtors);
|
||||
}
|
||||
|
||||
int ScheduleUnregisterGlobals() {
|
||||
return atexit(UnregisterGlobals);
|
||||
}
|
||||
|
||||
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
|
||||
// atexit() is initialized (.CRT$XIC). As this is executed before C++
|
||||
// initializers (think ctors for globals), UnregisterGlobals gets executed after
|
||||
// dtors for C++ globals.
|
||||
__declspec(allocate(".CRT$XID"))
|
||||
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
|
||||
|
||||
} // namespace
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// ASan SEH handling.
|
||||
// We need to set the ASan-specific SEH handler at the end of CRT initialization
|
||||
// of each module (see also asan_win.cc).
|
||||
extern "C" {
|
||||
__declspec(dllimport) int __asan_set_seh_filter();
|
||||
static int SetSEHFilter() { return __asan_set_seh_filter(); }
|
||||
|
||||
// Set the ASan-specific SEH handler at the end of CRT initialization of each
|
||||
// module (see asan_win.cc for the details).
|
||||
//
|
||||
// Unfortunately, putting a pointer to __asan_set_seh_filter into
|
||||
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
|
||||
static int SetSEHFilter() { return __asan_set_seh_filter(); }
|
||||
#pragma section(".CRT$XIZ", long, read) // NOLINT
|
||||
__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
|
||||
}
|
||||
|
||||
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
|
||||
|
|
|
@ -3,4 +3,4 @@
|
|||
# a separate file so that version updates don't involve re-running
|
||||
# automake.
|
||||
# CURRENT:REVISION:AGE
|
||||
2:0:0
|
||||
3:0:0
|
||||
|
|
73
libsanitizer/configure
vendored
73
libsanitizer/configure
vendored
|
@ -616,6 +616,8 @@ BACKTRACE_SUPPORTED
|
|||
FORMAT_FILE
|
||||
SANITIZER_SUPPORTED_FALSE
|
||||
SANITIZER_SUPPORTED_TRUE
|
||||
USE_CXX_ABI_FLAG_FALSE
|
||||
USE_CXX_ABI_FLAG_TRUE
|
||||
USING_MAC_INTERPOSE_FALSE
|
||||
USING_MAC_INTERPOSE_TRUE
|
||||
link_liblsan
|
||||
|
@ -12027,7 +12029,7 @@ else
|
|||
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
|
||||
lt_status=$lt_dlunknown
|
||||
cat > conftest.$ac_ext <<_LT_EOF
|
||||
#line 12030 "configure"
|
||||
#line 12032 "configure"
|
||||
#include "confdefs.h"
|
||||
|
||||
#if HAVE_DLFCN_H
|
||||
|
@ -12133,7 +12135,7 @@ else
|
|||
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
|
||||
lt_status=$lt_dlunknown
|
||||
cat > conftest.$ac_ext <<_LT_EOF
|
||||
#line 12136 "configure"
|
||||
#line 12138 "configure"
|
||||
#include "confdefs.h"
|
||||
|
||||
#if HAVE_DLFCN_H
|
||||
|
@ -15514,7 +15516,7 @@ done
|
|||
|
||||
|
||||
# Common libraries that we need to link against for all sanitizer libs.
|
||||
link_sanitizer_common='-lpthread -ldl -lm'
|
||||
link_sanitizer_common='-lrt -lpthread -ldl -lm'
|
||||
|
||||
# Set up the set of additional libraries that we need to link against for libasan.
|
||||
link_libasan=$link_sanitizer_common
|
||||
|
@ -15532,58 +15534,9 @@ link_libubsan=$link_sanitizer_common
|
|||
link_liblsan=$link_sanitizer_common
|
||||
|
||||
|
||||
# At least for glibc, clock_gettime is in librt. But don't pull that
|
||||
# in if it still doesn't give us the function we want. This
|
||||
# test is copied from libgomp.
|
||||
if test $ac_cv_func_clock_gettime = no; then
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for clock_gettime in -lrt" >&5
|
||||
$as_echo_n "checking for clock_gettime in -lrt... " >&6; }
|
||||
if test "${ac_cv_lib_rt_clock_gettime+set}" = set; then :
|
||||
$as_echo_n "(cached) " >&6
|
||||
else
|
||||
ac_check_lib_save_LIBS=$LIBS
|
||||
LIBS="-lrt $LIBS"
|
||||
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
|
||||
/* end confdefs.h. */
|
||||
|
||||
/* Override any GCC internal prototype to avoid an error.
|
||||
Use char because int might match the return type of a GCC
|
||||
builtin and then its argument prototype would still apply. */
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
#endif
|
||||
char clock_gettime ();
|
||||
int
|
||||
main ()
|
||||
{
|
||||
return clock_gettime ();
|
||||
;
|
||||
return 0;
|
||||
}
|
||||
_ACEOF
|
||||
if ac_fn_c_try_link "$LINENO"; then :
|
||||
ac_cv_lib_rt_clock_gettime=yes
|
||||
else
|
||||
ac_cv_lib_rt_clock_gettime=no
|
||||
fi
|
||||
rm -f core conftest.err conftest.$ac_objext \
|
||||
conftest$ac_exeext conftest.$ac_ext
|
||||
LIBS=$ac_check_lib_save_LIBS
|
||||
fi
|
||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_rt_clock_gettime" >&5
|
||||
$as_echo "$ac_cv_lib_rt_clock_gettime" >&6; }
|
||||
if test "x$ac_cv_lib_rt_clock_gettime" = x""yes; then :
|
||||
link_libasan="-lrt $link_libasan"
|
||||
link_libtsan="-lrt $link_libtsan"
|
||||
# Other sanitizers do not override clock_* API
|
||||
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
case "$host" in
|
||||
*-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;;
|
||||
*) MAC_INTERPOSE=false ;;
|
||||
*-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ; CXX_ABI_NEEDED=true ;;
|
||||
*) MAC_INTERPOSE=false ; CXX_ABI_NEEDED=false ;;
|
||||
esac
|
||||
if $MAC_INTERPOSE; then
|
||||
USING_MAC_INTERPOSE_TRUE=
|
||||
|
@ -15593,6 +15546,14 @@ else
|
|||
USING_MAC_INTERPOSE_FALSE=
|
||||
fi
|
||||
|
||||
if $CXX_ABI_NEEDED; then
|
||||
USE_CXX_ABI_FLAG_TRUE=
|
||||
USE_CXX_ABI_FLAG_FALSE='#'
|
||||
else
|
||||
USE_CXX_ABI_FLAG_TRUE='#'
|
||||
USE_CXX_ABI_FLAG_FALSE=
|
||||
fi
|
||||
|
||||
|
||||
backtrace_supported=yes
|
||||
|
||||
|
@ -16550,6 +16511,10 @@ if test -z "${USING_MAC_INTERPOSE_TRUE}" && test -z "${USING_MAC_INTERPOSE_FALSE
|
|||
as_fn_error "conditional \"USING_MAC_INTERPOSE\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
fi
|
||||
if test -z "${USE_CXX_ABI_FLAG_TRUE}" && test -z "${USE_CXX_ABI_FLAG_FALSE}"; then
|
||||
as_fn_error "conditional \"USE_CXX_ABI_FLAG\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
fi
|
||||
if test -z "${SANITIZER_SUPPORTED_TRUE}" && test -z "${SANITIZER_SUPPORTED_FALSE}"; then
|
||||
as_fn_error "conditional \"SANITIZER_SUPPORTED\" was never defined.
|
||||
Usually this means the macro was only invoked conditionally." "$LINENO" 5
|
||||
|
|
|
@ -96,7 +96,7 @@ AM_CONDITIONAL(LSAN_SUPPORTED, [test "x$LSAN_SUPPORTED" = "xyes"])
|
|||
AC_CHECK_FUNCS(clock_getres clock_gettime clock_settime)
|
||||
|
||||
# Common libraries that we need to link against for all sanitizer libs.
|
||||
link_sanitizer_common='-lpthread -ldl -lm'
|
||||
link_sanitizer_common='-lrt -lpthread -ldl -lm'
|
||||
|
||||
# Set up the set of additional libraries that we need to link against for libasan.
|
||||
link_libasan=$link_sanitizer_common
|
||||
|
@ -114,22 +114,12 @@ AC_SUBST(link_libubsan)
|
|||
link_liblsan=$link_sanitizer_common
|
||||
AC_SUBST(link_liblsan)
|
||||
|
||||
# At least for glibc, clock_gettime is in librt. But don't pull that
|
||||
# in if it still doesn't give us the function we want. This
|
||||
# test is copied from libgomp.
|
||||
if test $ac_cv_func_clock_gettime = no; then
|
||||
AC_CHECK_LIB(rt, clock_gettime,
|
||||
[link_libasan="-lrt $link_libasan"
|
||||
link_libtsan="-lrt $link_libtsan"
|
||||
# Other sanitizers do not override clock_* API
|
||||
])
|
||||
fi
|
||||
|
||||
case "$host" in
|
||||
*-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;;
|
||||
*) MAC_INTERPOSE=false ;;
|
||||
*-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ; CXX_ABI_NEEDED=true ;;
|
||||
*) MAC_INTERPOSE=false ; CXX_ABI_NEEDED=false ;;
|
||||
esac
|
||||
AM_CONDITIONAL(USING_MAC_INTERPOSE, $MAC_INTERPOSE)
|
||||
AM_CONDITIONAL(USE_CXX_ABI_FLAG, $CXX_ABI_NEEDED)
|
||||
|
||||
backtrace_supported=yes
|
||||
|
||||
|
|
|
@ -35,6 +35,9 @@ case "${target}" in
|
|||
arm*-*-linux*)
|
||||
;;
|
||||
aarch64*-*-linux*)
|
||||
if test x$ac_cv_sizeof_void_p = x8; then
|
||||
TSAN_SUPPORTED=yes
|
||||
fi
|
||||
;;
|
||||
x86_64-*-darwin[1]* | i?86-*-darwin[1]*)
|
||||
TSAN_SUPPORTED=no
|
||||
|
|
|
@ -108,12 +108,7 @@ extern "C" {
|
|||
void __asan_report_error(void *pc, void *bp, void *sp,
|
||||
void *addr, int is_write, size_t access_size);
|
||||
|
||||
// Sets the exit code to use when reporting an error.
|
||||
// Returns the old value.
|
||||
int __asan_set_error_exit_code(int exit_code);
|
||||
|
||||
// Sets the callback to be called right before death on error.
|
||||
// Passing 0 will unset the callback.
|
||||
// Deprecated. Call __sanitizer_set_death_callback instead.
|
||||
void __asan_set_death_callback(void (*callback)(void));
|
||||
|
||||
void __asan_set_error_report_callback(void (*callback)(const char*));
|
||||
|
|
|
@ -60,15 +60,6 @@ extern "C" {
|
|||
void __sanitizer_unaligned_store32(void *p, uint32_t x);
|
||||
void __sanitizer_unaligned_store64(void *p, uint64_t x);
|
||||
|
||||
// Initialize coverage.
|
||||
void __sanitizer_cov_init();
|
||||
// Record and dump coverage info.
|
||||
void __sanitizer_cov_dump();
|
||||
// Open <name>.sancov.packed in the coverage directory and return the file
|
||||
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
|
||||
// This is intended for use by sandboxing code.
|
||||
intptr_t __sanitizer_maybe_open_cov_file(const char *name);
|
||||
|
||||
// Annotate the current state of a contiguous container, such as
|
||||
// std::vector, std::string or similar.
|
||||
// A contiguous container is a container that keeps all of its elements
|
||||
|
@ -115,6 +106,20 @@ extern "C" {
|
|||
// Print the stack trace leading to this call. Useful for debugging user code.
|
||||
void __sanitizer_print_stack_trace();
|
||||
|
||||
// Sets the callback to be called right before death on error.
|
||||
// Passing 0 will unset the callback.
|
||||
void __sanitizer_set_death_callback(void (*callback)(void));
|
||||
|
||||
// Interceptor hooks.
|
||||
// Whenever a libc function interceptor is called it checks if the
|
||||
// corresponding weak hook is defined, and it so -- calls it.
|
||||
// The primary use case is data-flow-guided fuzzing, where the fuzzer needs
|
||||
// to know what is being passed to libc functions, e.g. memcmp.
|
||||
// FIXME: implement more hooks.
|
||||
void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
|
||||
const void *s2, size_t n);
|
||||
void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
|
||||
const char *s2, size_t n);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
|
61
libsanitizer/include/sanitizer/coverage_interface.h
Normal file
61
libsanitizer/include/sanitizer/coverage_interface.h
Normal file
|
@ -0,0 +1,61 @@
|
|||
//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Public interface for sanitizer coverage.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_COVERAG_INTERFACE_H
|
||||
#define SANITIZER_COVERAG_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Initialize coverage.
|
||||
void __sanitizer_cov_init();
|
||||
// Record and dump coverage info.
|
||||
void __sanitizer_cov_dump();
|
||||
// Open <name>.sancov.packed in the coverage directory and return the file
|
||||
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
|
||||
// This is intended for use by sandboxing code.
|
||||
intptr_t __sanitizer_maybe_open_cov_file(const char *name);
|
||||
// Get the number of total unique covered entities (blocks, edges, calls).
|
||||
// This can be useful for coverage-directed in-process fuzzers.
|
||||
uintptr_t __sanitizer_get_total_unique_coverage();
|
||||
|
||||
// Reset the basic-block (edge) coverage to the initial state.
|
||||
// Useful for in-process fuzzing to start collecting coverage from scratch.
|
||||
// Experimental, will likely not work for multi-threaded process.
|
||||
void __sanitizer_reset_coverage();
|
||||
// Set *data to the array of covered PCs and return the size of that array.
|
||||
// Some of the entries in *data will be zero.
|
||||
uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
|
||||
|
||||
// The coverage instrumentation may optionally provide imprecise counters.
|
||||
// Rather than exposing the counter values to the user we instead map
|
||||
// the counters to a bitset.
|
||||
// Every counter is associated with 8 bits in the bitset.
|
||||
// We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+
|
||||
// The i-th bit is set to 1 if the counter value is in the i-th range.
|
||||
// This counter-based coverage implementation is *not* thread-safe.
|
||||
|
||||
// Returns the number of registered coverage counters.
|
||||
uintptr_t __sanitizer_get_number_of_counters();
|
||||
// Updates the counter 'bitset', clears the counters and returns the number of
|
||||
// new bits in 'bitset'.
|
||||
// If 'bitset' is nullptr, only clears the counters.
|
||||
// Otherwise 'bitset' should be at least
|
||||
// __sanitizer_get_number_of_counters bytes long and 8-aligned.
|
||||
uintptr_t
|
||||
__sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_COVERAG_INTERFACE_H
|
|
@ -83,6 +83,24 @@ size_t dfsan_get_label_count(void);
|
|||
/// callback executes. Pass in NULL to remove any callback.
|
||||
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
|
||||
|
||||
/// Writes the labels currently used by the program to the given file
|
||||
/// descriptor. The lines of the output have the following format:
|
||||
///
|
||||
/// <label> <parent label 1> <parent label 2> <label description if any>
|
||||
void dfsan_dump_labels(int fd);
|
||||
|
||||
/// Interceptor hooks.
|
||||
/// Whenever a dfsan's custom function is called the corresponding
|
||||
/// hook is called it non-zero. The hooks should be defined by the user.
|
||||
/// The primary use case is taint-guided fuzzing, where the fuzzer
|
||||
/// needs to see the parameters of the function and the labels.
|
||||
/// FIXME: implement more hooks.
|
||||
void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
|
||||
size_t n, dfsan_label s1_label,
|
||||
dfsan_label s2_label, dfsan_label n_label);
|
||||
void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
|
||||
size_t n, dfsan_label s1_label,
|
||||
dfsan_label s2_label, dfsan_label n_label);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
|
||||
|
|
|
@ -39,14 +39,25 @@ extern "C" {
|
|||
void __lsan_register_root_region(const void *p, size_t size);
|
||||
void __lsan_unregister_root_region(const void *p, size_t size);
|
||||
|
||||
// Calling this function makes LSan enter the leak checking phase immediately.
|
||||
// Use this if normal end-of-process leak checking happens too late (e.g. if
|
||||
// you have intentional memory leaks in your shutdown code). Calling this
|
||||
// function overrides end-of-process leak checking; it must be called at
|
||||
// most once per process. This function will terminate the process if there
|
||||
// are memory leaks and the exit_code flag is non-zero.
|
||||
// Check for leaks now. This function behaves identically to the default
|
||||
// end-of-process leak check. In particular, it will terminate the process if
|
||||
// leaks are found and the exitcode runtime flag is non-zero.
|
||||
// Subsequent calls to this function will have no effect and end-of-process
|
||||
// leak check will not run. Effectively, end-of-process leak check is moved to
|
||||
// the time of first invocation of this function.
|
||||
// By calling this function early during process shutdown, you can instruct
|
||||
// LSan to ignore shutdown-only leaks which happen later on.
|
||||
void __lsan_do_leak_check();
|
||||
|
||||
// Check for leaks now. Returns zero if no leaks have been found or if leak
|
||||
// detection is disabled, non-zero otherwise.
|
||||
// This function may be called repeatedly, e.g. to periodically check a
|
||||
// long-running process. It prints a leak report if appropriate, but does not
|
||||
// terminate the process. It does not affect the behavior of
|
||||
// __lsan_do_leak_check() or the end-of-process leak check, and is not
|
||||
// affected by them.
|
||||
int __lsan_do_recoverable_leak_check();
|
||||
|
||||
// The user may optionally provide this function to disallow leak checking
|
||||
// for the program it is linked into (if the return value is non-zero). This
|
||||
// function must be defined as returning a constant value; any behavior beyond
|
||||
|
|
|
@ -23,6 +23,11 @@ extern "C" {
|
|||
/* Get raw origin for an address. */
|
||||
uint32_t __msan_get_origin(const volatile void *a);
|
||||
|
||||
/* Test that this_id is a descendant of prev_id (or they are simply equal).
|
||||
* "descendant" here means they are part of the same chain, created with
|
||||
* __msan_chain_origin. */
|
||||
int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
|
||||
|
||||
/* Returns non-zero if tracking origins. */
|
||||
int __msan_get_track_origins();
|
||||
|
||||
|
@ -36,7 +41,9 @@ extern "C" {
|
|||
contents). */
|
||||
void __msan_unpoison_string(const volatile char *a);
|
||||
|
||||
/* Make memory region fully uninitialized (without changing its contents). */
|
||||
/* Make memory region fully uninitialized (without changing its contents).
|
||||
This is a legacy interface that does not update origin information. Use
|
||||
__msan_allocated_memory() instead. */
|
||||
void __msan_poison(const volatile void *a, size_t size);
|
||||
|
||||
/* Make memory region partially uninitialized (without changing its contents).
|
||||
|
@ -52,10 +59,6 @@ extern "C" {
|
|||
* is not. */
|
||||
void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
|
||||
|
||||
/* Set exit code when error(s) were detected.
|
||||
Value of 0 means don't change the program exit code. */
|
||||
void __msan_set_exit_code(int exit_code);
|
||||
|
||||
/* For testing:
|
||||
__msan_set_expect_umr(1);
|
||||
... some buggy code ...
|
||||
|
@ -83,14 +86,22 @@ extern "C" {
|
|||
Memory will be marked uninitialized, with origin at the call site. */
|
||||
void __msan_allocated_memory(const volatile void* data, size_t size);
|
||||
|
||||
/* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
|
||||
void __sanitizer_dtor_callback(const volatile void* data, size_t size);
|
||||
|
||||
/* This function may be optionally provided by user and should return
|
||||
a string containing Msan runtime options. See msan_flags.h for details. */
|
||||
const char* __msan_default_options();
|
||||
|
||||
/* Sets the callback to be called right before death on error.
|
||||
Passing 0 will unset the callback. */
|
||||
/* Deprecated. Call __sanitizer_set_death_callback instead. */
|
||||
void __msan_set_death_callback(void (*callback)(void));
|
||||
|
||||
/* Update shadow for the application copy of size bytes from src to dst.
|
||||
Src and dst are application addresses. This function does not copy the
|
||||
actual application memory, it only updates shadow and origin for such
|
||||
copy. Source and destination regions can overlap. */
|
||||
void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
|
||||
size_t size);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
|
|
@ -217,7 +217,6 @@ const interpose_substitution substitution_##func_name[] \
|
|||
namespace __interception { \
|
||||
FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
} \
|
||||
DECLARE_WRAPPER_WINAPI(ret_type, func, __VA_ARGS__) \
|
||||
extern "C" \
|
||||
INTERCEPTOR_ATTRIBUTE \
|
||||
ret_type __stdcall WRAP(func)(__VA_ARGS__)
|
||||
|
|
|
@ -33,12 +33,12 @@ void *GetFuncAddrVer(const char *func_name, const char *ver);
|
|||
(::__interception::uptr) & WRAP(func))
|
||||
|
||||
#if !defined(__ANDROID__) // android does not have dlvsym
|
||||
# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
::__interception::real_##func = (func##_f)(unsigned long) \
|
||||
::__interception::GetFuncAddrVer(#func, symver)
|
||||
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
(::__interception::real_##func = (func##_f)( \
|
||||
unsigned long)::__interception::GetFuncAddrVer(#func, symver))
|
||||
#else
|
||||
# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
|
||||
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
|
||||
#endif // !defined(__ANDROID__)
|
||||
|
||||
#endif // INTERCEPTION_LINUX_H
|
||||
|
|
|
@ -82,6 +82,7 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
|
|||
cursor += 2;
|
||||
continue;
|
||||
case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
|
||||
case '\xB8': // B8 XX YY ZZ WW = mov eax, WWZZYYXX
|
||||
cursor += 5;
|
||||
continue;
|
||||
}
|
||||
|
@ -179,11 +180,15 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static const void **InterestingDLLsAvailable() {
|
||||
const char *InterestingDLLs[] = {"kernel32.dll",
|
||||
"msvcr110.dll", // VS2012
|
||||
"msvcr120.dll", // VS2013
|
||||
NULL};
|
||||
static void **InterestingDLLsAvailable() {
|
||||
const char *InterestingDLLs[] = {
|
||||
"kernel32.dll",
|
||||
"msvcr110.dll", // VS2012
|
||||
"msvcr120.dll", // VS2013
|
||||
// NTDLL should go last as it exports some functions that we should override
|
||||
// in the CRT [presumably only used internally].
|
||||
"ntdll.dll", NULL
|
||||
};
|
||||
static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
|
||||
if (!result[0]) {
|
||||
for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
|
||||
|
@ -191,14 +196,65 @@ static const void **InterestingDLLsAvailable() {
|
|||
result[j++] = (void *)h;
|
||||
}
|
||||
}
|
||||
return (const void **)&result[0];
|
||||
return &result[0];
|
||||
}
|
||||
|
||||
namespace {
|
||||
// Utility for reading loaded PE images.
|
||||
template <typename T> class RVAPtr {
|
||||
public:
|
||||
RVAPtr(void *module, uptr rva)
|
||||
: ptr_(reinterpret_cast<T *>(reinterpret_cast<char *>(module) + rva)) {}
|
||||
operator T *() { return ptr_; }
|
||||
T *operator->() { return ptr_; }
|
||||
T *operator++() { return ++ptr_; }
|
||||
|
||||
private:
|
||||
T *ptr_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
// Internal implementation of GetProcAddress. At least since Windows 8,
|
||||
// GetProcAddress appears to initialize DLLs before returning function pointers
|
||||
// into them. This is problematic for the sanitizers, because they typically
|
||||
// want to intercept malloc *before* MSVCRT initializes. Our internal
|
||||
// implementation walks the export list manually without doing initialization.
|
||||
uptr InternalGetProcAddress(void *module, const char *func_name) {
|
||||
// Check that the module header is full and present.
|
||||
RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
|
||||
RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
|
||||
if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
|
||||
headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
|
||||
headers->FileHeader.SizeOfOptionalHeader <
|
||||
sizeof(IMAGE_OPTIONAL_HEADER)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
IMAGE_DATA_DIRECTORY *export_directory =
|
||||
&headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
|
||||
RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
|
||||
export_directory->VirtualAddress);
|
||||
RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
|
||||
RVAPtr<DWORD> names(module, exports->AddressOfNames);
|
||||
RVAPtr<WORD> ordinals(module, exports->AddressOfNameOrdinals);
|
||||
|
||||
for (DWORD i = 0; i < exports->NumberOfNames; i++) {
|
||||
RVAPtr<char> name(module, names[i]);
|
||||
if (!strcmp(func_name, name)) {
|
||||
DWORD index = ordinals[i];
|
||||
RVAPtr<char> func(module, functions[index]);
|
||||
return (uptr)(char *)func;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
|
||||
*func_addr = 0;
|
||||
const void **DLLs = InterestingDLLsAvailable();
|
||||
void **DLLs = InterestingDLLsAvailable();
|
||||
for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
|
||||
*func_addr = (uptr)GetProcAddress((HMODULE)DLLs[i], func_name);
|
||||
*func_addr = InternalGetProcAddress(DLLs[i], func_name);
|
||||
return (*func_addr != 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,10 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);
|
|||
|
||||
// Overrides a function in a system DLL or DLL CRT by its exported name.
|
||||
bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
|
||||
|
||||
// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
|
||||
uptr InternalGetProcAddress(void *module, const char *func_name);
|
||||
|
||||
} // namespace __interception
|
||||
|
||||
#if defined(INTERCEPTION_DYNAMIC_CRT)
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "lsan.h"
|
||||
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_flag_parser.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_common.h"
|
||||
|
@ -32,13 +33,44 @@ bool WordIsPoisoned(uptr addr) {
|
|||
|
||||
using namespace __lsan; // NOLINT
|
||||
|
||||
static void InitializeFlags() {
|
||||
// Set all the default values.
|
||||
SetCommonFlagsDefaults();
|
||||
{
|
||||
CommonFlags cf;
|
||||
cf.CopyFrom(*common_flags());
|
||||
cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
|
||||
cf.malloc_context_size = 30;
|
||||
cf.detect_leaks = true;
|
||||
cf.exitcode = 23;
|
||||
OverrideCommonFlags(cf);
|
||||
}
|
||||
|
||||
Flags *f = flags();
|
||||
f->SetDefaults();
|
||||
|
||||
FlagParser parser;
|
||||
RegisterLsanFlags(&parser, f);
|
||||
RegisterCommonFlags(&parser);
|
||||
|
||||
parser.ParseString(GetEnv("LSAN_OPTIONS"));
|
||||
|
||||
SetVerbosity(common_flags()->verbosity);
|
||||
|
||||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
|
||||
if (common_flags()->help) parser.PrintFlagDescriptions();
|
||||
}
|
||||
|
||||
extern "C" void __lsan_init() {
|
||||
CHECK(!lsan_init_is_running);
|
||||
if (lsan_inited)
|
||||
return;
|
||||
lsan_init_is_running = true;
|
||||
SanitizerToolName = "LeakSanitizer";
|
||||
InitCommonLsan(true);
|
||||
CacheBinaryName();
|
||||
InitializeFlags();
|
||||
InitCommonLsan();
|
||||
InitializeAllocator();
|
||||
InitTlsSize();
|
||||
InitializeInterceptors();
|
||||
|
@ -50,6 +82,9 @@ extern "C" void __lsan_init() {
|
|||
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
|
||||
Atexit(DoLeakCheck);
|
||||
|
||||
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
|
||||
|
||||
lsan_inited = true;
|
||||
lsan_init_is_running = false;
|
||||
}
|
||||
|
|
|
@ -23,19 +23,29 @@ extern "C" void *memset(void *ptr, int value, uptr num);
|
|||
|
||||
namespace __lsan {
|
||||
|
||||
static const uptr kMaxAllowedMallocSize = 8UL << 30;
|
||||
static const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
|
||||
struct ChunkMetadata {
|
||||
bool allocated : 8; // Must be first.
|
||||
u8 allocated : 8; // Must be first.
|
||||
ChunkTag tag : 2;
|
||||
uptr requested_size : 54;
|
||||
u32 stack_trace_id;
|
||||
};
|
||||
|
||||
#if defined(__mips64)
|
||||
static const uptr kMaxAllowedMallocSize = 4UL << 30;
|
||||
static const uptr kRegionSizeLog = 20;
|
||||
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
|
||||
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
|
||||
typedef CompactSizeClassMap SizeClassMap;
|
||||
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
|
||||
sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
|
||||
PrimaryAllocator;
|
||||
#else
|
||||
static const uptr kMaxAllowedMallocSize = 8UL << 30;
|
||||
static const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
|
||||
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
|
||||
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
|
||||
#endif
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
typedef LargeMmapAllocator<> SecondaryAllocator;
|
||||
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||
|
@ -45,7 +55,7 @@ static Allocator allocator;
|
|||
static THREADLOCAL AllocatorCache cache;
|
||||
|
||||
void InitializeAllocator() {
|
||||
allocator.Init();
|
||||
allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
|
||||
}
|
||||
|
||||
void AllocatorThreadFinish() {
|
||||
|
@ -79,7 +89,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
|||
size = 1;
|
||||
if (size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
void *p = allocator.Allocate(&cache, size, alignment, false);
|
||||
// Do not rely on the allocator to clear the memory (it's slow).
|
||||
|
@ -102,7 +112,7 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
|
|||
if (new_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
|
||||
allocator.Deallocate(&cache, p);
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
p = allocator.Reallocate(&cache, p, new_size, alignment);
|
||||
RegisterAllocation(stack, p, new_size);
|
||||
|
@ -200,7 +210,7 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
|
|||
return kIgnoreObjectInvalid;
|
||||
}
|
||||
}
|
||||
} // namespace __lsan
|
||||
} // namespace __lsan
|
||||
|
||||
using namespace __lsan;
|
||||
|
||||
|
@ -229,10 +239,10 @@ SANITIZER_INTERFACE_ATTRIBUTE
|
|||
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; }
|
||||
int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_allocated_size(const void *p) {
|
||||
return GetMallocUsableSize(p);
|
||||
}
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
|
|
|
@ -14,11 +14,11 @@
|
|||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_flag_parser.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_procmaps.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "sanitizer_common/sanitizer_suppressions.h"
|
||||
#include "sanitizer_common/sanitizer_report_decorator.h"
|
||||
|
||||
|
@ -34,52 +34,17 @@ bool DisabledInThisThread() { return disable_counter > 0; }
|
|||
|
||||
Flags lsan_flags;
|
||||
|
||||
static void InitializeFlags(bool standalone) {
|
||||
Flags *f = flags();
|
||||
// Default values.
|
||||
f->report_objects = false;
|
||||
f->resolution = 0;
|
||||
f->max_leaks = 0;
|
||||
f->exitcode = 23;
|
||||
f->use_registers = true;
|
||||
f->use_globals = true;
|
||||
f->use_stacks = true;
|
||||
f->use_tls = true;
|
||||
f->use_root_regions = true;
|
||||
f->use_unaligned = false;
|
||||
f->use_poisoned = false;
|
||||
f->log_pointers = false;
|
||||
f->log_threads = false;
|
||||
void Flags::SetDefaults() {
|
||||
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||
#include "lsan_flags.inc"
|
||||
#undef LSAN_FLAG
|
||||
}
|
||||
|
||||
const char *options = GetEnv("LSAN_OPTIONS");
|
||||
if (options) {
|
||||
ParseFlag(options, &f->use_registers, "use_registers", "");
|
||||
ParseFlag(options, &f->use_globals, "use_globals", "");
|
||||
ParseFlag(options, &f->use_stacks, "use_stacks", "");
|
||||
ParseFlag(options, &f->use_tls, "use_tls", "");
|
||||
ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
|
||||
ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
|
||||
ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
|
||||
ParseFlag(options, &f->report_objects, "report_objects", "");
|
||||
ParseFlag(options, &f->resolution, "resolution", "");
|
||||
CHECK_GE(&f->resolution, 0);
|
||||
ParseFlag(options, &f->max_leaks, "max_leaks", "");
|
||||
CHECK_GE(&f->max_leaks, 0);
|
||||
ParseFlag(options, &f->log_pointers, "log_pointers", "");
|
||||
ParseFlag(options, &f->log_threads, "log_threads", "");
|
||||
ParseFlag(options, &f->exitcode, "exitcode", "");
|
||||
}
|
||||
|
||||
// Set defaults for common flags (only in standalone mode) and parse
|
||||
// them from LSAN_OPTIONS.
|
||||
CommonFlags *cf = common_flags();
|
||||
if (standalone) {
|
||||
SetCommonFlagsDefaults(cf);
|
||||
cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
|
||||
cf->malloc_context_size = 30;
|
||||
cf->detect_leaks = true;
|
||||
}
|
||||
ParseCommonFlagsFromString(cf, options);
|
||||
void RegisterLsanFlags(FlagParser *parser, Flags *f) {
|
||||
#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
|
||||
RegisterFlag(parser, #Name, Description, &f->Name);
|
||||
#include "lsan_flags.inc"
|
||||
#undef LSAN_FLAG
|
||||
}
|
||||
|
||||
#define LOG_POINTERS(...) \
|
||||
|
@ -92,14 +57,23 @@ static void InitializeFlags(bool standalone) {
|
|||
if (flags()->log_threads) Report(__VA_ARGS__); \
|
||||
} while (0);
|
||||
|
||||
static bool suppressions_inited = false;
|
||||
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
|
||||
static SuppressionContext *suppression_ctx = nullptr;
|
||||
static const char kSuppressionLeak[] = "leak";
|
||||
static const char *kSuppressionTypes[] = { kSuppressionLeak };
|
||||
|
||||
void InitializeSuppressions() {
|
||||
CHECK(!suppressions_inited);
|
||||
SuppressionContext::InitIfNecessary();
|
||||
CHECK_EQ(nullptr, suppression_ctx);
|
||||
suppression_ctx = new (suppression_placeholder) // NOLINT
|
||||
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
|
||||
suppression_ctx->ParseFromFile(flags()->suppressions);
|
||||
if (&__lsan_default_suppressions)
|
||||
SuppressionContext::Get()->Parse(__lsan_default_suppressions());
|
||||
suppressions_inited = true;
|
||||
suppression_ctx->Parse(__lsan_default_suppressions());
|
||||
}
|
||||
|
||||
static SuppressionContext *GetSuppressionContext() {
|
||||
CHECK(suppression_ctx);
|
||||
return suppression_ctx;
|
||||
}
|
||||
|
||||
struct RootRegion {
|
||||
|
@ -115,8 +89,7 @@ void InitializeRootRegions() {
|
|||
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
|
||||
}
|
||||
|
||||
void InitCommonLsan(bool standalone) {
|
||||
InitializeFlags(standalone);
|
||||
void InitCommonLsan() {
|
||||
InitializeRootRegions();
|
||||
if (common_flags()->detect_leaks) {
|
||||
// Initialization which can fail or print warnings should only be done if
|
||||
|
@ -139,9 +112,11 @@ static inline bool CanBeAHeapPointer(uptr p) {
|
|||
// bound on heap addresses.
|
||||
const uptr kMinAddress = 4 * 4096;
|
||||
if (p < kMinAddress) return false;
|
||||
#ifdef __x86_64__
|
||||
#if defined(__x86_64__)
|
||||
// Accept only canonical form user-space addresses.
|
||||
return ((p >> 47) == 0);
|
||||
#elif defined(__mips64)
|
||||
return ((p >> 40) == 0);
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
|
@ -149,13 +124,14 @@ static inline bool CanBeAHeapPointer(uptr p) {
|
|||
|
||||
// Scans the memory range, looking for byte patterns that point into allocator
|
||||
// chunks. Marks those chunks with |tag| and adds them to |frontier|.
|
||||
// There are two usage modes for this function: finding reachable or ignored
|
||||
// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
|
||||
// There are two usage modes for this function: finding reachable chunks
|
||||
// (|tag| = kReachable) and finding indirectly leaked chunks
|
||||
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
|
||||
// so |frontier| = 0.
|
||||
void ScanRangeForPointers(uptr begin, uptr end,
|
||||
Frontier *frontier,
|
||||
const char *region_type, ChunkTag tag) {
|
||||
CHECK(tag == kReachable || tag == kIndirectlyLeaked);
|
||||
const uptr alignment = flags()->pointer_alignment();
|
||||
LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
|
||||
uptr pp = begin;
|
||||
|
@ -169,9 +145,7 @@ void ScanRangeForPointers(uptr begin, uptr end,
|
|||
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
|
||||
if (chunk == begin) continue;
|
||||
LsanMetadata m(chunk);
|
||||
// Reachable beats ignored beats leaked.
|
||||
if (m.tag() == kReachable) continue;
|
||||
if (m.tag() == kIgnored && tag != kReachable) continue;
|
||||
if (m.tag() == kReachable || m.tag() == kIgnored) continue;
|
||||
|
||||
// Do this check relatively late so we can log only the interesting cases.
|
||||
if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
|
||||
|
@ -267,8 +241,8 @@ static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
|
|||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
uptr begin, end, prot;
|
||||
while (proc_maps.Next(&begin, &end,
|
||||
/*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0,
|
||||
&prot)) {
|
||||
/*offset*/ nullptr, /*filename*/ nullptr,
|
||||
/*filename_size*/ 0, &prot)) {
|
||||
uptr intersection_begin = Max(root_begin, begin);
|
||||
uptr intersection_end = Min(end, root_end);
|
||||
if (intersection_begin >= intersection_end) continue;
|
||||
|
@ -310,7 +284,7 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
|
|||
LsanMetadata m(chunk);
|
||||
if (m.allocated() && m.tag() != kReachable) {
|
||||
ScanRangeForPointers(chunk, chunk + m.requested_size(),
|
||||
/* frontier */ 0, "HEAP", kIndirectlyLeaked);
|
||||
/* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -320,8 +294,11 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
|
|||
CHECK(arg);
|
||||
chunk = GetUserBegin(chunk);
|
||||
LsanMetadata m(chunk);
|
||||
if (m.allocated() && m.tag() == kIgnored)
|
||||
if (m.allocated() && m.tag() == kIgnored) {
|
||||
LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
|
||||
chunk, chunk + m.requested_size(), m.requested_size());
|
||||
reinterpret_cast<Frontier *>(arg)->push_back(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the appropriate tag on each chunk.
|
||||
|
@ -329,26 +306,33 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
|||
// Holds the flood fill frontier.
|
||||
Frontier frontier(1);
|
||||
|
||||
ForEachChunk(CollectIgnoredCb, &frontier);
|
||||
ProcessGlobalRegions(&frontier);
|
||||
ProcessThreads(suspended_threads, &frontier);
|
||||
ProcessRootRegions(&frontier);
|
||||
FloodFillTag(&frontier, kReachable);
|
||||
|
||||
// The check here is relatively expensive, so we do this in a separate flood
|
||||
// fill. That way we can skip the check for chunks that are reachable
|
||||
// otherwise.
|
||||
LOG_POINTERS("Processing platform-specific allocations.\n");
|
||||
CHECK_EQ(0, frontier.size());
|
||||
ProcessPlatformSpecificAllocations(&frontier);
|
||||
FloodFillTag(&frontier, kReachable);
|
||||
|
||||
LOG_POINTERS("Scanning ignored chunks.\n");
|
||||
CHECK_EQ(0, frontier.size());
|
||||
ForEachChunk(CollectIgnoredCb, &frontier);
|
||||
FloodFillTag(&frontier, kIgnored);
|
||||
|
||||
// Iterate over leaked chunks and mark those that are reachable from other
|
||||
// leaked chunks.
|
||||
LOG_POINTERS("Scanning leaked chunks.\n");
|
||||
ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
|
||||
ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
|
||||
}
|
||||
|
||||
// ForEachChunk callback. Resets the tags to pre-leak-check state.
|
||||
static void ResetTagsCb(uptr chunk, void *arg) {
|
||||
(void)arg;
|
||||
chunk = GetUserBegin(chunk);
|
||||
LsanMetadata m(chunk);
|
||||
if (m.allocated() && m.tag() != kIgnored)
|
||||
m.set_tag(kDirectlyLeaked);
|
||||
}
|
||||
|
||||
static void PrintStackTraceById(u32 stack_trace_id) {
|
||||
|
@ -365,7 +349,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
|
|||
LsanMetadata m(chunk);
|
||||
if (!m.allocated()) return;
|
||||
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
|
||||
uptr resolution = flags()->resolution;
|
||||
u32 resolution = flags()->resolution;
|
||||
u32 stack_trace_id = 0;
|
||||
if (resolution > 0) {
|
||||
StackTrace stack = StackDepotGet(m.stack_trace_id());
|
||||
|
@ -381,7 +365,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
|
|||
|
||||
static void PrintMatchedSuppressions() {
|
||||
InternalMmapVector<Suppression *> matched(1);
|
||||
SuppressionContext::Get()->GetMatched(&matched);
|
||||
GetSuppressionContext()->GetMatched(&matched);
|
||||
if (!matched.size())
|
||||
return;
|
||||
const char *line = "-----------------------------------------------------";
|
||||
|
@ -389,40 +373,38 @@ static void PrintMatchedSuppressions() {
|
|||
Printf("Suppressions used:\n");
|
||||
Printf(" count bytes template\n");
|
||||
for (uptr i = 0; i < matched.size(); i++)
|
||||
Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
|
||||
matched[i]->weight, matched[i]->templ);
|
||||
Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
|
||||
&matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
|
||||
Printf("%s\n\n", line);
|
||||
}
|
||||
|
||||
struct DoLeakCheckParam {
|
||||
struct CheckForLeaksParam {
|
||||
bool success;
|
||||
LeakReport leak_report;
|
||||
};
|
||||
|
||||
static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
|
||||
void *arg) {
|
||||
DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
|
||||
static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
|
||||
void *arg) {
|
||||
CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
|
||||
CHECK(param);
|
||||
CHECK(!param->success);
|
||||
ClassifyAllChunks(suspended_threads);
|
||||
ForEachChunk(CollectLeaksCb, ¶m->leak_report);
|
||||
// Clean up for subsequent leak checks. This assumes we did not overwrite any
|
||||
// kIgnored tags.
|
||||
ForEachChunk(ResetTagsCb, nullptr);
|
||||
param->success = true;
|
||||
}
|
||||
|
||||
void DoLeakCheck() {
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
BlockingMutexLock l(&global_mutex);
|
||||
static bool already_done;
|
||||
if (already_done) return;
|
||||
already_done = true;
|
||||
static bool CheckForLeaks() {
|
||||
if (&__lsan_is_turned_off && __lsan_is_turned_off())
|
||||
return;
|
||||
|
||||
DoLeakCheckParam param;
|
||||
return false;
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
CheckForLeaksParam param;
|
||||
param.success = false;
|
||||
LockThreadRegistry();
|
||||
LockAllocator();
|
||||
StopTheWorld(DoLeakCheckCallback, ¶m);
|
||||
DoStopTheWorld(CheckForLeaksCallback, ¶m);
|
||||
UnlockAllocator();
|
||||
UnlockThreadRegistry();
|
||||
|
||||
|
@ -446,39 +428,51 @@ void DoLeakCheck() {
|
|||
PrintMatchedSuppressions();
|
||||
if (unsuppressed_count > 0) {
|
||||
param.leak_report.PrintSummary();
|
||||
if (flags()->exitcode) {
|
||||
if (common_flags()->coverage)
|
||||
__sanitizer_cov_dump();
|
||||
internal__exit(flags()->exitcode);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void DoLeakCheck() {
|
||||
BlockingMutexLock l(&global_mutex);
|
||||
static bool already_done;
|
||||
if (already_done) return;
|
||||
already_done = true;
|
||||
bool have_leaks = CheckForLeaks();
|
||||
if (!have_leaks) {
|
||||
return;
|
||||
}
|
||||
if (common_flags()->exitcode) {
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
static int DoRecoverableLeakCheck() {
|
||||
BlockingMutexLock l(&global_mutex);
|
||||
bool have_leaks = CheckForLeaks();
|
||||
return have_leaks ? 1 : 0;
|
||||
}
|
||||
|
||||
static Suppression *GetSuppressionForAddr(uptr addr) {
|
||||
Suppression *s;
|
||||
Suppression *s = nullptr;
|
||||
|
||||
// Suppress by module name.
|
||||
const char *module_name;
|
||||
uptr module_offset;
|
||||
if (Symbolizer::GetOrInit()
|
||||
->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) &&
|
||||
SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
|
||||
return s;
|
||||
SuppressionContext *suppressions = GetSuppressionContext();
|
||||
if (const char *module_name =
|
||||
Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
|
||||
if (suppressions->Match(module_name, kSuppressionLeak, &s))
|
||||
return s;
|
||||
|
||||
// Suppress by file or function name.
|
||||
static const uptr kMaxAddrFrames = 16;
|
||||
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
|
||||
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
|
||||
uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
|
||||
addr, addr_frames.data(), kMaxAddrFrames);
|
||||
for (uptr i = 0; i < addr_frames_num; i++) {
|
||||
if (SuppressionContext::Get()->Match(addr_frames[i].function,
|
||||
SuppressionLeak, &s) ||
|
||||
SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak,
|
||||
&s))
|
||||
return s;
|
||||
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
|
||||
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
||||
if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
|
||||
suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
frames->ClearAll();
|
||||
return s;
|
||||
}
|
||||
|
||||
static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
|
||||
|
@ -488,7 +482,7 @@ static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
|
|||
StackTrace::GetPreviousInstructionPc(stack.trace[i]));
|
||||
if (s) return s;
|
||||
}
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
///// LeakReport implementation. /////
|
||||
|
@ -591,10 +585,9 @@ void LeakReport::PrintSummary() {
|
|||
bytes += leaks_[i].total_size;
|
||||
allocations += leaks_[i].hit_count;
|
||||
}
|
||||
InternalScopedBuffer<char> summary(kMaxSummaryLength);
|
||||
internal_snprintf(summary.data(), summary.size(),
|
||||
"%zu byte(s) leaked in %zu allocation(s).", bytes,
|
||||
allocations);
|
||||
InternalScopedString summary(kMaxSummaryLength);
|
||||
summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
|
||||
allocations);
|
||||
ReportErrorSummary(summary.data());
|
||||
}
|
||||
|
||||
|
@ -603,7 +596,8 @@ void LeakReport::ApplySuppressions() {
|
|||
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
|
||||
if (s) {
|
||||
s->weight += leaks_[i].total_size;
|
||||
s->hit_count += leaks_[i].hit_count;
|
||||
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
|
||||
leaks_[i].hit_count);
|
||||
leaks_[i].is_suppressed = true;
|
||||
}
|
||||
}
|
||||
|
@ -616,8 +610,8 @@ uptr LeakReport::UnsuppressedLeakCount() {
|
|||
return result;
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
} // namespace __lsan
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
||||
using namespace __lsan; // NOLINT
|
||||
|
||||
|
@ -638,7 +632,7 @@ void __lsan_ignore_object(const void *p) {
|
|||
"heap object at %p is already being ignored\n", p);
|
||||
if (res == kIgnoreObjectSuccess)
|
||||
VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
|
@ -649,7 +643,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
|
|||
RootRegion region = {begin, size};
|
||||
root_regions->push_back(region);
|
||||
VReport(1, "Registered root region at %p of size %llu\n", begin, size);
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
|
@ -676,7 +670,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
|
|||
begin, size);
|
||||
Die();
|
||||
}
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
|
@ -702,7 +696,16 @@ void __lsan_do_leak_check() {
|
|||
#if CAN_SANITIZE_LEAKS
|
||||
if (common_flags()->detect_leaks)
|
||||
__lsan::DoLeakCheck();
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __lsan_do_recoverable_leak_check() {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
if (common_flags()->detect_leaks)
|
||||
return __lsan::DoRecoverableLeakCheck();
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
|
@ -711,4 +714,4 @@ int __lsan_is_turned_off() {
|
|||
return 0;
|
||||
}
|
||||
#endif
|
||||
} // extern "C"
|
||||
} // extern "C"
|
||||
|
|
|
@ -17,14 +17,20 @@
|
|||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
|
||||
#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
|
||||
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips64)) \
|
||||
&& (SANITIZER_WORDSIZE == 64)
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
#else
|
||||
#define CAN_SANITIZE_LEAKS 0
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
class FlagParser;
|
||||
}
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
// Chunk tags.
|
||||
|
@ -36,44 +42,19 @@ enum ChunkTag {
|
|||
};
|
||||
|
||||
struct Flags {
|
||||
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
||||
#include "lsan_flags.inc"
|
||||
#undef LSAN_FLAG
|
||||
|
||||
void SetDefaults();
|
||||
uptr pointer_alignment() const {
|
||||
return use_unaligned ? 1 : sizeof(uptr);
|
||||
}
|
||||
|
||||
// Print addresses of leaked objects after main leak report.
|
||||
bool report_objects;
|
||||
// Aggregate two objects into one leak if this many stack frames match. If
|
||||
// zero, the entire stack trace must match.
|
||||
int resolution;
|
||||
// The number of leaks reported.
|
||||
int max_leaks;
|
||||
// If nonzero kill the process with this exit code upon finding leaks.
|
||||
int exitcode;
|
||||
|
||||
// Flags controlling the root set of reachable memory.
|
||||
// Global variables (.data and .bss).
|
||||
bool use_globals;
|
||||
// Thread stacks.
|
||||
bool use_stacks;
|
||||
// Thread registers.
|
||||
bool use_registers;
|
||||
// TLS and thread-specific storage.
|
||||
bool use_tls;
|
||||
// Regions added via __lsan_register_root_region().
|
||||
bool use_root_regions;
|
||||
|
||||
// Consider unaligned pointers valid.
|
||||
bool use_unaligned;
|
||||
// Consider pointers found in poisoned memory to be valid.
|
||||
bool use_poisoned;
|
||||
|
||||
// Debug logging.
|
||||
bool log_pointers;
|
||||
bool log_threads;
|
||||
};
|
||||
|
||||
extern Flags lsan_flags;
|
||||
inline Flags *flags() { return &lsan_flags; }
|
||||
void RegisterLsanFlags(FlagParser *parser, Flags *f);
|
||||
|
||||
struct Leak {
|
||||
u32 id;
|
||||
|
@ -117,6 +98,8 @@ typedef InternalMmapVector<uptr> Frontier;
|
|||
void InitializePlatformSpecificModules();
|
||||
void ProcessGlobalRegions(Frontier *frontier);
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier);
|
||||
// Run stoptheworld while holding any platform-specific locks.
|
||||
void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
|
||||
|
||||
void ScanRangeForPointers(uptr begin, uptr end,
|
||||
Frontier *frontier,
|
||||
|
@ -129,7 +112,7 @@ enum IgnoreObjectResult {
|
|||
};
|
||||
|
||||
// Functions called from the parent tool.
|
||||
void InitCommonLsan(bool standalone);
|
||||
void InitCommonLsan();
|
||||
void DoLeakCheck();
|
||||
bool DisabledInThisThread();
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ static const char kLinkerName[] = "ld";
|
|||
// We request 2 modules matching "ld", so we can print a warning if there's more
|
||||
// than one match. But only the first one is actually used.
|
||||
static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
|
||||
static LoadedModule *linker = 0;
|
||||
static LoadedModule *linker = nullptr;
|
||||
|
||||
static bool IsLinker(const char* full_name) {
|
||||
return LibraryNameIs(full_name, kLinkerName);
|
||||
|
@ -47,7 +47,7 @@ void InitializePlatformSpecificModules() {
|
|||
else if (num_matches > 1)
|
||||
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
|
||||
"TLS will not be handled correctly.\n", kLinkerName);
|
||||
linker = 0;
|
||||
linker = nullptr;
|
||||
}
|
||||
|
||||
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
||||
|
@ -83,10 +83,6 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
|||
// Scans global variables for heap pointers.
|
||||
void ProcessGlobalRegions(Frontier *frontier) {
|
||||
if (!flags()->use_globals) return;
|
||||
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
|
||||
// deadlocking by running this under StopTheWorld. However, the lock is
|
||||
// reentrant, so we should be able to fix this by acquiring the lock before
|
||||
// suspending threads.
|
||||
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
|
||||
}
|
||||
|
||||
|
@ -112,7 +108,7 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
|
|||
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
|
||||
chunk = GetUserBegin(chunk);
|
||||
LsanMetadata m(chunk);
|
||||
if (m.allocated() && m.tag() != kReachable) {
|
||||
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
||||
u32 stack_id = m.stack_trace_id();
|
||||
uptr caller_pc = 0;
|
||||
if (stack_id > 0)
|
||||
|
@ -151,5 +147,31 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
|||
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
|
||||
struct DoStopTheWorldParam {
|
||||
StopTheWorldCallback callback;
|
||||
void *argument;
|
||||
};
|
||||
|
||||
static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
|
||||
void *data) {
|
||||
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
|
||||
StopTheWorld(param->callback, param->argument);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
|
||||
// of the threads is frozen while holding the libdl lock, the tracer will hang
|
||||
// in dl_iterate_phdr() forever.
|
||||
// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
|
||||
// tracer task and the thread that spawned it. Thus, if we run the tracer task
|
||||
// while holding the libdl lock in the parent thread, we can safely reenter it
|
||||
// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
|
||||
// callback in the parent thread.
|
||||
void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
|
||||
DoStopTheWorldParam param = {callback, argument};
|
||||
dl_iterate_phdr(DoStopTheWorldCallback, ¶m);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
|
||||
|
|
41
libsanitizer/lsan/lsan_flags.inc
Normal file
41
libsanitizer/lsan/lsan_flags.inc
Normal file
|
@ -0,0 +1,41 @@
|
|||
//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// LSan runtime flags.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef LSAN_FLAG
|
||||
# error "Define LSAN_FLAG prior to including this file!"
|
||||
#endif
|
||||
|
||||
// LSAN_FLAG(Type, Name, DefaultValue, Description)
|
||||
// See COMMON_FLAG in sanitizer_flags.inc for more details.
|
||||
|
||||
LSAN_FLAG(bool, report_objects, false,
|
||||
"Print addresses of leaked objects after main leak report.")
|
||||
LSAN_FLAG(
|
||||
int, resolution, 0,
|
||||
"Aggregate two objects into one leak if this many stack frames match. If "
|
||||
"zero, the entire stack trace must match.")
|
||||
LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
|
||||
|
||||
// Flags controlling the root set of reachable memory.
|
||||
LSAN_FLAG(bool, use_globals, true,
|
||||
"Root set: include global variables (.data and .bss)")
|
||||
LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
|
||||
LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
|
||||
LSAN_FLAG(bool, use_tls, true,
|
||||
"Root set: include TLS and thread-specific storage")
|
||||
LSAN_FLAG(bool, use_root_regions, true,
|
||||
"Root set: include regions added via __lsan_register_root_region().")
|
||||
|
||||
LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
|
||||
LSAN_FLAG(bool, use_poisoned, false,
|
||||
"Consider pointers found in poisoned memory to be valid.")
|
||||
LSAN_FLAG(bool, log_pointers, false, "Debug logging")
|
||||
LSAN_FLAG(bool, log_threads, false, "Debug logging")
|
||||
LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
|
|
@ -10,11 +10,11 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_flags.h"
|
||||
#include "sanitizer_common/sanitizer_interception.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_linux.h"
|
||||
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
|
||||
|
@ -69,7 +69,7 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
|||
CHECK(allocated < kCallocPoolSize);
|
||||
return mem;
|
||||
}
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
size *= nmemb;
|
||||
|
@ -162,9 +162,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
|
|||
Deallocate(ptr);
|
||||
|
||||
INTERCEPTOR_ATTRIBUTE
|
||||
void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; }
|
||||
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
|
||||
INTERCEPTOR_ATTRIBUTE
|
||||
void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; }
|
||||
void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
|
||||
INTERCEPTOR_ATTRIBUTE
|
||||
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
|
||||
INTERCEPTOR_ATTRIBUTE
|
||||
|
@ -206,16 +206,16 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
|
|||
// Wait until the last iteration to maximize the chance that we are the last
|
||||
// destructor to run.
|
||||
if (pthread_setspecific(g_thread_finalize_key,
|
||||
(void*)kPthreadDestructorIterations)) {
|
||||
(void*)GetPthreadDestructorIterations())) {
|
||||
Report("LeakSanitizer: failed to set thread key.\n");
|
||||
Die();
|
||||
}
|
||||
int tid = 0;
|
||||
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
|
||||
internal_sched_yield();
|
||||
atomic_store(&p->tid, 0, memory_order_release);
|
||||
SetCurrentThread(tid);
|
||||
ThreadStart(tid, GetTid());
|
||||
atomic_store(&p->tid, 0, memory_order_release);
|
||||
return callback(param);
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
|
|||
ENSURE_LSAN_INITED;
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
__sanitizer_pthread_attr_t myattr;
|
||||
if (attr == 0) {
|
||||
if (!attr) {
|
||||
pthread_attr_init(&myattr);
|
||||
attr = &myattr;
|
||||
}
|
||||
|
@ -282,4 +282,4 @@ void InitializeInterceptors() {
|
|||
}
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
} // namespace __lsan
|
||||
|
|
|
@ -77,7 +77,7 @@ void ThreadContext::OnFinished() {
|
|||
|
||||
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
|
||||
return thread_registry->CreateThread(user_id, detached, parent_tid,
|
||||
/* arg */ 0);
|
||||
/* arg */ nullptr);
|
||||
}
|
||||
|
||||
void ThreadStart(u32 tid, uptr os_id) {
|
||||
|
@ -97,9 +97,9 @@ void ThreadFinish() {
|
|||
}
|
||||
|
||||
ThreadContext *CurrentThreadContext() {
|
||||
if (!thread_registry) return 0;
|
||||
if (!thread_registry) return nullptr;
|
||||
if (GetCurrentThread() == kInvalidTid)
|
||||
return 0;
|
||||
return nullptr;
|
||||
// No lock needed when getting current thread.
|
||||
return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ u32 ThreadTid(uptr uid) {
|
|||
|
||||
void ThreadJoin(u32 tid) {
|
||||
CHECK_NE(tid, kInvalidTid);
|
||||
thread_registry->JoinThread(tid, /* arg */0);
|
||||
thread_registry->JoinThread(tid, /* arg */nullptr);
|
||||
}
|
||||
|
||||
void EnsureMainThreadIDIsCorrect() {
|
||||
|
@ -155,4 +155,4 @@ void UnlockThreadRegistry() {
|
|||
thread_registry->Unlock();
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
} // namespace __lsan
|
||||
|
|
|
@ -20,8 +20,8 @@ namespace __lsan {
|
|||
class ThreadContext : public ThreadContextBase {
|
||||
public:
|
||||
explicit ThreadContext(int tid);
|
||||
void OnStarted(void *arg);
|
||||
void OnFinished();
|
||||
void OnStarted(void *arg) override;
|
||||
void OnFinished() override;
|
||||
uptr stack_begin() { return stack_begin_; }
|
||||
uptr stack_end() { return stack_end_; }
|
||||
uptr tls_begin() { return tls_begin_; }
|
||||
|
|
|
@ -27,6 +27,7 @@ sanitizer_common_files = \
|
|||
sanitizer_deadlock_detector1.cc \
|
||||
sanitizer_deadlock_detector2.cc \
|
||||
sanitizer_flags.cc \
|
||||
sanitizer_flag_parser.cc \
|
||||
sanitizer_libc.cc \
|
||||
sanitizer_libignore.cc \
|
||||
sanitizer_linux.cc \
|
||||
|
@ -45,6 +46,7 @@ sanitizer_common_files = \
|
|||
sanitizer_stackdepot.cc \
|
||||
sanitizer_stacktrace.cc \
|
||||
sanitizer_stacktrace_libcdep.cc \
|
||||
sanitizer_symbolizer_mac.cc \
|
||||
sanitizer_stacktrace_printer.cc \
|
||||
sanitizer_stoptheworld_linux_libcdep.cc \
|
||||
sanitizer_suppressions.cc \
|
||||
|
@ -55,7 +57,7 @@ sanitizer_common_files = \
|
|||
sanitizer_symbolizer_win.cc \
|
||||
sanitizer_thread_registry.cc \
|
||||
sanitizer_tls_get_addr.cc \
|
||||
sanitizer_unwind_posix_libcdep.cc \
|
||||
sanitizer_unwind_linux_libcdep.cc \
|
||||
sanitizer_win.cc
|
||||
|
||||
|
||||
|
|
|
@ -85,7 +85,8 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
|
|||
sanitizer_coverage_mapping_libcdep.lo \
|
||||
sanitizer_deadlock_detector1.lo \
|
||||
sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
|
||||
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
|
||||
sanitizer_flag_parser.lo sanitizer_libc.lo \
|
||||
sanitizer_libignore.lo sanitizer_linux.lo \
|
||||
sanitizer_linux_libcdep.lo sanitizer_mac.lo \
|
||||
sanitizer_persistent_allocator.lo \
|
||||
sanitizer_platform_limits_linux.lo \
|
||||
|
@ -94,7 +95,7 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
|
|||
sanitizer_procmaps_common.lo sanitizer_procmaps_freebsd.lo \
|
||||
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
|
||||
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
|
||||
sanitizer_stacktrace_libcdep.lo \
|
||||
sanitizer_stacktrace_libcdep.lo sanitizer_symbolizer_mac.lo \
|
||||
sanitizer_stacktrace_printer.lo \
|
||||
sanitizer_stoptheworld_linux_libcdep.lo \
|
||||
sanitizer_suppressions.lo sanitizer_symbolizer.lo \
|
||||
|
@ -102,7 +103,7 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
|
|||
sanitizer_symbolizer_libcdep.lo \
|
||||
sanitizer_symbolizer_posix_libcdep.lo \
|
||||
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
|
||||
sanitizer_tls_get_addr.lo sanitizer_unwind_posix_libcdep.lo \
|
||||
sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \
|
||||
sanitizer_win.lo
|
||||
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
|
||||
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
|
||||
|
@ -290,6 +291,7 @@ sanitizer_common_files = \
|
|||
sanitizer_deadlock_detector1.cc \
|
||||
sanitizer_deadlock_detector2.cc \
|
||||
sanitizer_flags.cc \
|
||||
sanitizer_flag_parser.cc \
|
||||
sanitizer_libc.cc \
|
||||
sanitizer_libignore.cc \
|
||||
sanitizer_linux.cc \
|
||||
|
@ -308,6 +310,7 @@ sanitizer_common_files = \
|
|||
sanitizer_stackdepot.cc \
|
||||
sanitizer_stacktrace.cc \
|
||||
sanitizer_stacktrace_libcdep.cc \
|
||||
sanitizer_symbolizer_mac.cc \
|
||||
sanitizer_stacktrace_printer.cc \
|
||||
sanitizer_stoptheworld_linux_libcdep.cc \
|
||||
sanitizer_suppressions.cc \
|
||||
|
@ -318,7 +321,7 @@ sanitizer_common_files = \
|
|||
sanitizer_symbolizer_win.cc \
|
||||
sanitizer_thread_registry.cc \
|
||||
sanitizer_tls_get_addr.cc \
|
||||
sanitizer_unwind_posix_libcdep.cc \
|
||||
sanitizer_unwind_linux_libcdep.cc \
|
||||
sanitizer_win.cc
|
||||
|
||||
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
|
||||
|
@ -421,6 +424,7 @@ distclean-compile:
|
|||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_mapping_libcdep.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flag_parser.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
|
||||
|
@ -446,11 +450,12 @@ distclean-compile:
|
|||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libcdep.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_posix_libcdep.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
|
||||
|
||||
.cc.o:
|
||||
|
|
|
@ -141,7 +141,7 @@ bool AddrHashMap<T, kSize>::Handle::created() const {
|
|||
|
||||
template<typename T, uptr kSize>
|
||||
bool AddrHashMap<T, kSize>::Handle::exists() const {
|
||||
return cell_ != 0;
|
||||
return cell_ != nullptr;
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
|
@ -158,7 +158,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
|
|||
h->created_ = false;
|
||||
h->addidx_ = -1U;
|
||||
h->bucket_ = b;
|
||||
h->cell_ = 0;
|
||||
h->cell_ = nullptr;
|
||||
|
||||
// If we want to remove the element, we need exclusive access to the bucket,
|
||||
// so skip the lock-free phase.
|
||||
|
@ -248,7 +248,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
|
|||
}
|
||||
|
||||
// Store in the add cells.
|
||||
if (add == 0) {
|
||||
if (!add) {
|
||||
// Allocate a new add array.
|
||||
const uptr kInitSize = 64;
|
||||
add = (AddBucket*)InternalAlloc(kInitSize);
|
||||
|
@ -280,7 +280,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
|
|||
|
||||
template<typename T, uptr kSize>
|
||||
void AddrHashMap<T, kSize>::release(Handle *h) {
|
||||
if (h->cell_ == 0)
|
||||
if (!h->cell_)
|
||||
return;
|
||||
Bucket *b = h->bucket_;
|
||||
Cell *c = h->cell_;
|
||||
|
|
|
@ -9,10 +9,10 @@
|
|||
// run-time libraries.
|
||||
// This allocator is used inside run-times.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_allocator.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_flags.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
|
@ -43,7 +43,7 @@ InternalAllocator *internal_allocator() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
#else // SANITIZER_GO
|
||||
#else // SANITIZER_GO
|
||||
|
||||
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
|
||||
static atomic_uint8_t internal_allocator_initialized;
|
||||
|
@ -59,7 +59,7 @@ InternalAllocator *internal_allocator() {
|
|||
SpinMutexLock l(&internal_alloc_init_mu);
|
||||
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
|
||||
0) {
|
||||
internal_allocator_instance->Init();
|
||||
internal_allocator_instance->Init(/* may_return_null*/ false);
|
||||
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
|
||||
}
|
||||
}
|
||||
|
@ -76,29 +76,29 @@ static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
|
|||
}
|
||||
|
||||
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||
if (cache == 0) {
|
||||
if (!cache) {
|
||||
SpinMutexLock l(&internal_allocator_cache_mu);
|
||||
return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
|
||||
}
|
||||
internal_allocator()->Deallocate(cache, ptr);
|
||||
}
|
||||
|
||||
#endif // SANITIZER_GO
|
||||
#endif // SANITIZER_GO
|
||||
|
||||
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
|
||||
if (size + sizeof(u64) < size)
|
||||
return 0;
|
||||
return nullptr;
|
||||
void *p = RawInternalAlloc(size + sizeof(u64), cache);
|
||||
if (p == 0)
|
||||
return 0;
|
||||
if (!p)
|
||||
return nullptr;
|
||||
((u64*)p)[0] = kBlockMagic;
|
||||
return (char*)p + sizeof(u64);
|
||||
}
|
||||
|
||||
void InternalFree(void *addr, InternalAllocatorCache *cache) {
|
||||
if (addr == 0)
|
||||
if (!addr)
|
||||
return;
|
||||
addr = (char*)addr - sizeof(u64);
|
||||
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
|
||||
|
@ -138,14 +138,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
|
|||
return (max / size) < n;
|
||||
}
|
||||
|
||||
void *AllocatorReturnNull() {
|
||||
if (common_flags()->allocator_may_return_null)
|
||||
return 0;
|
||||
void NORETURN ReportAllocatorCannotReturnNull() {
|
||||
Report("%s's allocator is terminating the process instead of returning 0\n",
|
||||
SanitizerToolName);
|
||||
Report("If you don't like this behavior set allocator_may_return_null=1\n");
|
||||
CHECK(0);
|
||||
return 0;
|
||||
Die();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
} // namespace __sanitizer
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Depending on allocator_may_return_null either return 0 or crash.
|
||||
void *AllocatorReturnNull();
|
||||
// Prints error message and kills the program.
|
||||
void NORETURN ReportAllocatorCannotReturnNull();
|
||||
|
||||
// SizeClassMap maps allocation sizes into size classes and back.
|
||||
// Class 0 corresponds to size 0.
|
||||
|
@ -209,6 +209,7 @@ class AllocatorStats {
|
|||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
}
|
||||
void InitLinkerInitialized() {}
|
||||
|
||||
void Add(AllocatorStat i, uptr v) {
|
||||
v += atomic_load(&stats_[i], memory_order_relaxed);
|
||||
|
@ -238,11 +239,14 @@ class AllocatorStats {
|
|||
// Global stats, used for aggregation and querying.
|
||||
class AllocatorGlobalStats : public AllocatorStats {
|
||||
public:
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
void InitLinkerInitialized() {
|
||||
next_ = this;
|
||||
prev_ = this;
|
||||
}
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
InitLinkerInitialized();
|
||||
}
|
||||
|
||||
void Register(AllocatorStats *s) {
|
||||
SpinMutexLock l(&mu_);
|
||||
|
@ -317,7 +321,7 @@ class SizeClassAllocator64 {
|
|||
|
||||
void Init() {
|
||||
CHECK_EQ(kSpaceBeg,
|
||||
reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
|
||||
reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize)));
|
||||
MapWithCallback(kSpaceEnd, AdditionalSize());
|
||||
}
|
||||
|
||||
|
@ -341,7 +345,7 @@ class SizeClassAllocator64 {
|
|||
CHECK_LT(class_id, kNumClasses);
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
Batch *b = region->free_list.Pop();
|
||||
if (b == 0)
|
||||
if (!b)
|
||||
b = PopulateFreeList(stat, c, class_id, region);
|
||||
region->n_allocated += b->count;
|
||||
return b;
|
||||
|
@ -365,16 +369,16 @@ class SizeClassAllocator64 {
|
|||
void *GetBlockBegin(const void *p) {
|
||||
uptr class_id = GetSizeClass(p);
|
||||
uptr size = SizeClassMap::Size(class_id);
|
||||
if (!size) return 0;
|
||||
if (!size) return nullptr;
|
||||
uptr chunk_idx = GetChunkIdx((uptr)p, size);
|
||||
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
|
||||
uptr beg = chunk_idx * size;
|
||||
uptr next_beg = beg + size;
|
||||
if (class_id >= kNumClasses) return 0;
|
||||
if (class_id >= kNumClasses) return nullptr;
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
if (region->mapped_user >= next_beg)
|
||||
return reinterpret_cast<void*>(reg_beg + beg);
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static uptr GetActuallyAllocatedSize(void *p) {
|
||||
|
@ -603,6 +607,7 @@ class TwoLevelByteMap {
|
|||
internal_memset(map1_, 0, sizeof(map1_));
|
||||
mu_.Init();
|
||||
}
|
||||
|
||||
void TestOnlyUnmap() {
|
||||
for (uptr i = 0; i < kSize1; i++) {
|
||||
u8 *p = Get(i);
|
||||
|
@ -816,6 +821,10 @@ class SizeClassAllocator32 {
|
|||
void PrintStats() {
|
||||
}
|
||||
|
||||
static uptr AdditionalSize() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef SizeClassMap SizeClassMapT;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||
|
||||
|
@ -862,9 +871,9 @@ class SizeClassAllocator32 {
|
|||
uptr reg = AllocateRegion(stat, class_id);
|
||||
uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||
uptr max_count = SizeClassMap::MaxCached(class_id);
|
||||
Batch *b = 0;
|
||||
Batch *b = nullptr;
|
||||
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
|
||||
if (b == 0) {
|
||||
if (!b) {
|
||||
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
|
||||
b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
|
||||
else
|
||||
|
@ -875,7 +884,7 @@ class SizeClassAllocator32 {
|
|||
if (b->count == max_count) {
|
||||
CHECK_GT(b->count, 0);
|
||||
sci->free_list.push_back(b);
|
||||
b = 0;
|
||||
b = nullptr;
|
||||
}
|
||||
}
|
||||
if (b) {
|
||||
|
@ -1000,9 +1009,14 @@ struct SizeClassAllocatorLocalCache {
|
|||
template <class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||
class LargeMmapAllocator {
|
||||
public:
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
void InitLinkerInitialized(bool may_return_null) {
|
||||
page_size_ = GetPageSizeCached();
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Init(bool may_return_null) {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
InitLinkerInitialized(may_return_null);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
|
||||
|
@ -1010,7 +1024,9 @@ class LargeMmapAllocator {
|
|||
uptr map_size = RoundUpMapSize(size);
|
||||
if (alignment > page_size_)
|
||||
map_size += alignment;
|
||||
if (map_size < size) return AllocatorReturnNull(); // Overflow.
|
||||
// Overflow.
|
||||
if (map_size < size)
|
||||
return ReturnNullOrDie();
|
||||
uptr map_beg = reinterpret_cast<uptr>(
|
||||
MmapOrDie(map_size, "LargeMmapAllocator"));
|
||||
CHECK(IsAligned(map_beg, page_size_));
|
||||
|
@ -1046,6 +1062,16 @@ class LargeMmapAllocator {
|
|||
return reinterpret_cast<void*>(res);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDie() {
|
||||
if (atomic_load(&may_return_null_, memory_order_acquire))
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void SetMayReturnNull(bool may_return_null) {
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorStats *stat, void *p) {
|
||||
Header *h = GetHeader(p);
|
||||
{
|
||||
|
@ -1078,7 +1104,7 @@ class LargeMmapAllocator {
|
|||
}
|
||||
|
||||
bool PointerIsMine(const void *p) {
|
||||
return GetBlockBegin(p) != 0;
|
||||
return GetBlockBegin(p) != nullptr;
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
|
@ -1107,13 +1133,13 @@ class LargeMmapAllocator {
|
|||
nearest_chunk = ch;
|
||||
}
|
||||
if (!nearest_chunk)
|
||||
return 0;
|
||||
return nullptr;
|
||||
Header *h = reinterpret_cast<Header *>(nearest_chunk);
|
||||
CHECK_GE(nearest_chunk, h->map_beg);
|
||||
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
|
||||
CHECK_LE(nearest_chunk, p);
|
||||
if (h->map_beg + h->map_size <= p)
|
||||
return 0;
|
||||
return nullptr;
|
||||
return GetUser(h);
|
||||
}
|
||||
|
||||
|
@ -1123,7 +1149,7 @@ class LargeMmapAllocator {
|
|||
mutex_.CheckLocked();
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
uptr n = n_chunks_;
|
||||
if (!n) return 0;
|
||||
if (!n) return nullptr;
|
||||
if (!chunks_sorted_) {
|
||||
// Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
|
||||
SortArray(reinterpret_cast<uptr*>(chunks_), n);
|
||||
|
@ -1135,7 +1161,7 @@ class LargeMmapAllocator {
|
|||
chunks_[n - 1]->map_size;
|
||||
}
|
||||
if (p < min_mmap_ || p >= max_mmap_)
|
||||
return 0;
|
||||
return nullptr;
|
||||
uptr beg = 0, end = n - 1;
|
||||
// This loop is a log(n) lower_bound. It does not check for the exact match
|
||||
// to avoid expensive cache-thrashing loads.
|
||||
|
@ -1156,7 +1182,7 @@ class LargeMmapAllocator {
|
|||
|
||||
Header *h = chunks_[beg];
|
||||
if (h->map_beg + h->map_size <= p || p < h->map_beg)
|
||||
return 0;
|
||||
return nullptr;
|
||||
return GetUser(h);
|
||||
}
|
||||
|
||||
|
@ -1224,6 +1250,7 @@ class LargeMmapAllocator {
|
|||
struct Stats {
|
||||
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
||||
} stats;
|
||||
atomic_uint8_t may_return_null_;
|
||||
SpinMutex mutex_;
|
||||
};
|
||||
|
||||
|
@ -1237,19 +1264,32 @@ template <class PrimaryAllocator, class AllocatorCache,
|
|||
class SecondaryAllocator> // NOLINT
|
||||
class CombinedAllocator {
|
||||
public:
|
||||
void Init() {
|
||||
void InitCommon(bool may_return_null) {
|
||||
primary_.Init();
|
||||
secondary_.Init();
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void InitLinkerInitialized(bool may_return_null) {
|
||||
secondary_.InitLinkerInitialized(may_return_null);
|
||||
stats_.InitLinkerInitialized();
|
||||
InitCommon(may_return_null);
|
||||
}
|
||||
|
||||
void Init(bool may_return_null) {
|
||||
secondary_.Init(may_return_null);
|
||||
stats_.Init();
|
||||
InitCommon(may_return_null);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
|
||||
bool cleared = false) {
|
||||
bool cleared = false, bool check_rss_limit = false) {
|
||||
// Returning 0 on malloc(0) may break a lot of code.
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
if (size + alignment < size)
|
||||
return AllocatorReturnNull();
|
||||
return ReturnNullOrDie();
|
||||
if (check_rss_limit && RssLimitIsExceeded())
|
||||
return ReturnNullOrDie();
|
||||
if (alignment > 8)
|
||||
size = RoundUpTo(size, alignment);
|
||||
void *res;
|
||||
|
@ -1265,6 +1305,30 @@ class CombinedAllocator {
|
|||
return res;
|
||||
}
|
||||
|
||||
bool MayReturnNull() const {
|
||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDie() {
|
||||
if (MayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void SetMayReturnNull(bool may_return_null) {
|
||||
secondary_.SetMayReturnNull(may_return_null);
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
||||
}
|
||||
|
||||
bool RssLimitIsExceeded() {
|
||||
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
|
||||
atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
|
||||
memory_order_release);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorCache *cache, void *p) {
|
||||
if (!p) return;
|
||||
if (primary_.PointerIsMine(p))
|
||||
|
@ -1279,7 +1343,7 @@ class CombinedAllocator {
|
|||
return Allocate(cache, new_size, alignment);
|
||||
if (!new_size) {
|
||||
Deallocate(cache, p);
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
CHECK(PointerIsMine(p));
|
||||
uptr old_size = GetActuallyAllocatedSize(p);
|
||||
|
@ -1377,11 +1441,13 @@ class CombinedAllocator {
|
|||
PrimaryAllocator primary_;
|
||||
SecondaryAllocator secondary_;
|
||||
AllocatorGlobalStats stats_;
|
||||
atomic_uint8_t may_return_null_;
|
||||
atomic_uint8_t rss_limit_is_exceeded_;
|
||||
};
|
||||
|
||||
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
|
||||
bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
|
||||
|
||||
} // namespace __sanitizer
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_H
|
||||
#endif // SANITIZER_ALLOCATOR_H
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===-- sanitizer_allocator_internal.h -------------------------- C++ -----===//
|
||||
//===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
|
@ -43,10 +43,19 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
|
|||
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
|
||||
LargeMmapAllocator<> > InternalAllocator;
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = 0);
|
||||
void InternalFree(void *p, InternalAllocatorCache *cache = 0);
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr);
|
||||
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
|
||||
InternalAllocator *internal_allocator();
|
||||
|
||||
} // namespace __sanitizer
|
||||
enum InternalAllocEnum {
|
||||
INTERNAL_ALLOC
|
||||
};
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
InternalAllocEnum) {
|
||||
return InternalAlloc(size);
|
||||
}
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
|
||||
|
|
|
@ -53,7 +53,7 @@ struct atomic_uintptr_t {
|
|||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#if defined(__GNUC__)
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
# include "sanitizer_atomic_clang.h"
|
||||
#elif defined(_MSC_VER)
|
||||
# include "sanitizer_atomic_msvc.h"
|
||||
|
@ -61,4 +61,20 @@ struct atomic_uintptr_t {
|
|||
# error "Unsupported compiler"
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Clutter-reducing helpers.
|
||||
|
||||
template<typename T>
|
||||
INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
|
||||
return atomic_load(a, memory_order_relaxed);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
|
||||
atomic_store(a, v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ATOMIC_H
|
||||
|
|
|
@ -19,6 +19,15 @@ extern "C" void _mm_mfence();
|
|||
#pragma intrinsic(_mm_mfence)
|
||||
extern "C" void _mm_pause();
|
||||
#pragma intrinsic(_mm_pause)
|
||||
extern "C" char _InterlockedExchange8( // NOLINT
|
||||
char volatile *Addend, char Value); // NOLINT
|
||||
#pragma intrinsic(_InterlockedExchange8)
|
||||
extern "C" short _InterlockedExchange16( // NOLINT
|
||||
short volatile *Addend, short Value); // NOLINT
|
||||
#pragma intrinsic(_InterlockedExchange16)
|
||||
extern "C" long _InterlockedExchange( // NOLINT
|
||||
long volatile *Addend, long Value); // NOLINT
|
||||
#pragma intrinsic(_InterlockedExchange)
|
||||
extern "C" long _InterlockedExchangeAdd( // NOLINT
|
||||
long volatile * Addend, long Value); // NOLINT
|
||||
#pragma intrinsic(_InterlockedExchangeAdd)
|
||||
|
@ -143,28 +152,25 @@ INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
|
|||
u8 v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
__asm {
|
||||
mov eax, a
|
||||
mov cl, v
|
||||
xchg [eax], cl // NOLINT
|
||||
mov v, cl
|
||||
}
|
||||
return v;
|
||||
return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
|
||||
}
|
||||
|
||||
INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
|
||||
u16 v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
__asm {
|
||||
mov eax, a
|
||||
mov cx, v
|
||||
xchg [eax], cx // NOLINT
|
||||
mov v, cx
|
||||
}
|
||||
return v;
|
||||
return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
|
||||
}
|
||||
|
||||
INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
|
||||
u32 v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
|
||||
}
|
||||
|
||||
#ifndef _WIN64
|
||||
|
||||
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
|
||||
u8 *cmp,
|
||||
u8 xchgv,
|
||||
|
@ -186,6 +192,8 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
|
|||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
|
||||
uptr *cmp,
|
||||
uptr xchg,
|
||||
|
|
|
@ -10,13 +10,19 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
#include "sanitizer_stacktrace_printer.h"
|
||||
#include "sanitizer_symbolizer.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
const char *SanitizerToolName = "SanitizerTool";
|
||||
|
||||
atomic_uint32_t current_verbosity;
|
||||
|
||||
uptr GetPageSizeCached() {
|
||||
static uptr PageSize;
|
||||
if (!PageSize)
|
||||
|
@ -24,19 +30,71 @@ uptr GetPageSizeCached() {
|
|||
return PageSize;
|
||||
}
|
||||
|
||||
StaticSpinMutex report_file_mu;
|
||||
ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
|
||||
|
||||
// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
|
||||
// isn't equal to the current PID, try to obtain file descriptor by opening
|
||||
// file "report_path_prefix.<PID>".
|
||||
fd_t report_fd = kStderrFd;
|
||||
void RawWrite(const char *buffer) {
|
||||
report_file.Write(buffer, internal_strlen(buffer));
|
||||
}
|
||||
|
||||
// Set via __sanitizer_set_report_path.
|
||||
bool log_to_file = false;
|
||||
char report_path_prefix[sizeof(report_path_prefix)];
|
||||
void ReportFile::ReopenIfNecessary() {
|
||||
mu->CheckLocked();
|
||||
if (fd == kStdoutFd || fd == kStderrFd) return;
|
||||
|
||||
// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
|
||||
// child thread will be different from |report_fd_pid|.
|
||||
uptr report_fd_pid = 0;
|
||||
uptr pid = internal_getpid();
|
||||
// If in tracer, use the parent's file.
|
||||
if (pid == stoptheworld_tracer_pid)
|
||||
pid = stoptheworld_tracer_ppid;
|
||||
if (fd != kInvalidFd) {
|
||||
// If the report file is already opened by the current process,
|
||||
// do nothing. Otherwise the report file was opened by the parent
|
||||
// process, close it now.
|
||||
if (fd_pid == pid)
|
||||
return;
|
||||
else
|
||||
CloseFile(fd);
|
||||
}
|
||||
|
||||
const char *exe_name = GetProcessName();
|
||||
if (common_flags()->log_exe_name && exe_name) {
|
||||
internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix,
|
||||
exe_name, pid);
|
||||
} else {
|
||||
internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
|
||||
}
|
||||
fd = OpenFile(full_path, WrOnly);
|
||||
if (fd == kInvalidFd) {
|
||||
const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
|
||||
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
|
||||
WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
|
||||
Die();
|
||||
}
|
||||
fd_pid = pid;
|
||||
}
|
||||
|
||||
void ReportFile::SetReportPath(const char *path) {
|
||||
if (!path)
|
||||
return;
|
||||
uptr len = internal_strlen(path);
|
||||
if (len > sizeof(path_prefix) - 100) {
|
||||
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
|
||||
path[0], path[1], path[2], path[3],
|
||||
path[4], path[5], path[6], path[7]);
|
||||
Die();
|
||||
}
|
||||
|
||||
SpinMutexLock l(mu);
|
||||
if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
|
||||
CloseFile(fd);
|
||||
fd = kInvalidFd;
|
||||
if (internal_strcmp(path, "stdout") == 0) {
|
||||
fd = kStdoutFd;
|
||||
} else if (internal_strcmp(path, "stderr") == 0) {
|
||||
fd = kStderrFd;
|
||||
} else {
|
||||
internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
|
||||
}
|
||||
}
|
||||
|
||||
// PID of the tracer task in StopTheWorld. It shares the address space with the
|
||||
// main process, but has a different PID and thus requires special handling.
|
||||
|
@ -45,20 +103,47 @@ uptr stoptheworld_tracer_pid = 0;
|
|||
// writing to the same log file.
|
||||
uptr stoptheworld_tracer_ppid = 0;
|
||||
|
||||
static DieCallbackType DieCallback;
|
||||
void SetDieCallback(DieCallbackType callback) {
|
||||
DieCallback = callback;
|
||||
static const int kMaxNumOfInternalDieCallbacks = 5;
|
||||
static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];
|
||||
|
||||
bool AddDieCallback(DieCallbackType callback) {
|
||||
for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
|
||||
if (InternalDieCallbacks[i] == nullptr) {
|
||||
InternalDieCallbacks[i] = callback;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
DieCallbackType GetDieCallback() {
|
||||
return DieCallback;
|
||||
bool RemoveDieCallback(DieCallbackType callback) {
|
||||
for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
|
||||
if (InternalDieCallbacks[i] == callback) {
|
||||
internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],
|
||||
sizeof(InternalDieCallbacks[0]) *
|
||||
(kMaxNumOfInternalDieCallbacks - i - 1));
|
||||
InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static DieCallbackType UserDieCallback;
|
||||
void SetUserDieCallback(DieCallbackType callback) {
|
||||
UserDieCallback = callback;
|
||||
}
|
||||
|
||||
void NORETURN Die() {
|
||||
if (DieCallback) {
|
||||
DieCallback();
|
||||
if (UserDieCallback)
|
||||
UserDieCallback();
|
||||
for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {
|
||||
if (InternalDieCallbacks[i])
|
||||
InternalDieCallbacks[i]();
|
||||
}
|
||||
internal__exit(1);
|
||||
if (common_flags()->abort_on_error)
|
||||
Abort();
|
||||
internal__exit(common_flags()->exitcode);
|
||||
}
|
||||
|
||||
static CheckFailedCallbackType CheckFailedCallback;
|
||||
|
@ -76,37 +161,57 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
|
|||
Die();
|
||||
}
|
||||
|
||||
uptr ReadFileToBuffer(const char *file_name, char **buff,
|
||||
uptr *buff_size, uptr max_len) {
|
||||
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
||||
error_t err) {
|
||||
static int recursion_count;
|
||||
if (recursion_count) {
|
||||
// The Report() and CHECK calls below may call mmap recursively and fail.
|
||||
// If we went into recursion, just die.
|
||||
RawWrite("ERROR: Failed to mmap\n");
|
||||
Die();
|
||||
}
|
||||
recursion_count++;
|
||||
Report("ERROR: %s failed to "
|
||||
"allocate 0x%zx (%zd) bytes of %s (error code: %d)\n",
|
||||
SanitizerToolName, size, size, mem_type, err);
|
||||
DumpProcessMap();
|
||||
UNREACHABLE("unable to mmap");
|
||||
}
|
||||
|
||||
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
|
||||
uptr *read_len, uptr max_len, error_t *errno_p) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr kMinFileLen = PageSize;
|
||||
uptr read_len = 0;
|
||||
*buff = 0;
|
||||
*buff = nullptr;
|
||||
*buff_size = 0;
|
||||
*read_len = 0;
|
||||
// The files we usually open are not seekable, so try different buffer sizes.
|
||||
for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
|
||||
uptr openrv = OpenFile(file_name, /*write*/ false);
|
||||
if (internal_iserror(openrv)) return 0;
|
||||
fd_t fd = openrv;
|
||||
fd_t fd = OpenFile(file_name, RdOnly, errno_p);
|
||||
if (fd == kInvalidFd) return false;
|
||||
UnmapOrDie(*buff, *buff_size);
|
||||
*buff = (char*)MmapOrDie(size, __func__);
|
||||
*buff_size = size;
|
||||
*read_len = 0;
|
||||
// Read up to one page at a time.
|
||||
read_len = 0;
|
||||
bool reached_eof = false;
|
||||
while (read_len + PageSize <= size) {
|
||||
uptr just_read = internal_read(fd, *buff + read_len, PageSize);
|
||||
while (*read_len + PageSize <= size) {
|
||||
uptr just_read;
|
||||
if (!ReadFromFile(fd, *buff + *read_len, PageSize, &just_read, errno_p)) {
|
||||
UnmapOrDie(*buff, *buff_size);
|
||||
return false;
|
||||
}
|
||||
if (just_read == 0) {
|
||||
reached_eof = true;
|
||||
break;
|
||||
}
|
||||
read_len += just_read;
|
||||
*read_len += just_read;
|
||||
}
|
||||
internal_close(fd);
|
||||
CloseFile(fd);
|
||||
if (reached_eof) // We've read the whole file.
|
||||
break;
|
||||
}
|
||||
return read_len;
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
|
||||
|
@ -143,62 +248,77 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
|||
|
||||
const char *StripPathPrefix(const char *filepath,
|
||||
const char *strip_path_prefix) {
|
||||
if (filepath == 0) return 0;
|
||||
if (strip_path_prefix == 0) return filepath;
|
||||
const char *pos = internal_strstr(filepath, strip_path_prefix);
|
||||
if (pos == 0) return filepath;
|
||||
pos += internal_strlen(strip_path_prefix);
|
||||
if (pos[0] == '.' && pos[1] == '/')
|
||||
pos += 2;
|
||||
return pos;
|
||||
if (!filepath) return nullptr;
|
||||
if (!strip_path_prefix) return filepath;
|
||||
const char *res = filepath;
|
||||
if (const char *pos = internal_strstr(filepath, strip_path_prefix))
|
||||
res = pos + internal_strlen(strip_path_prefix);
|
||||
if (res[0] == '.' && res[1] == '/')
|
||||
res += 2;
|
||||
return res;
|
||||
}
|
||||
|
||||
const char *StripModuleName(const char *module) {
|
||||
if (module == 0)
|
||||
return 0;
|
||||
if (const char *slash_pos = internal_strrchr(module, '/'))
|
||||
if (!module)
|
||||
return nullptr;
|
||||
if (SANITIZER_WINDOWS) {
|
||||
// On Windows, both slash and backslash are possible.
|
||||
// Pick the one that goes last.
|
||||
if (const char *bslash_pos = internal_strrchr(module, '\\'))
|
||||
return StripModuleName(bslash_pos + 1);
|
||||
}
|
||||
if (const char *slash_pos = internal_strrchr(module, '/')) {
|
||||
return slash_pos + 1;
|
||||
}
|
||||
return module;
|
||||
}
|
||||
|
||||
void ReportErrorSummary(const char *error_message) {
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
InternalScopedBuffer<char> buff(kMaxSummaryLength);
|
||||
internal_snprintf(buff.data(), buff.size(),
|
||||
"SUMMARY: %s: %s", SanitizerToolName, error_message);
|
||||
InternalScopedString buff(kMaxSummaryLength);
|
||||
buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message);
|
||||
__sanitizer_report_error_summary(buff.data());
|
||||
}
|
||||
|
||||
void ReportErrorSummary(const char *error_type, const char *file,
|
||||
int line, const char *function) {
|
||||
#ifndef SANITIZER_GO
|
||||
void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
InternalScopedBuffer<char> buff(kMaxSummaryLength);
|
||||
internal_snprintf(
|
||||
buff.data(), buff.size(), "%s %s:%d %s", error_type,
|
||||
file ? StripPathPrefix(file, common_flags()->strip_path_prefix) : "??",
|
||||
line, function ? function : "??");
|
||||
InternalScopedString buff(kMaxSummaryLength);
|
||||
buff.append("%s ", error_type);
|
||||
RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
|
||||
common_flags()->strip_path_prefix);
|
||||
ReportErrorSummary(buff.data());
|
||||
}
|
||||
#endif
|
||||
|
||||
LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
|
||||
void LoadedModule::set(const char *module_name, uptr base_address) {
|
||||
clear();
|
||||
full_name_ = internal_strdup(module_name);
|
||||
base_address_ = base_address;
|
||||
n_ranges_ = 0;
|
||||
}
|
||||
|
||||
void LoadedModule::clear() {
|
||||
InternalFree(full_name_);
|
||||
full_name_ = nullptr;
|
||||
while (!ranges_.empty()) {
|
||||
AddressRange *r = ranges_.front();
|
||||
ranges_.pop_front();
|
||||
InternalFree(r);
|
||||
}
|
||||
}
|
||||
|
||||
void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
|
||||
CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
|
||||
ranges_[n_ranges_].beg = beg;
|
||||
ranges_[n_ranges_].end = end;
|
||||
exec_[n_ranges_] = executable;
|
||||
n_ranges_++;
|
||||
void *mem = InternalAlloc(sizeof(AddressRange));
|
||||
AddressRange *r = new(mem) AddressRange(beg, end, executable);
|
||||
ranges_.push_back(r);
|
||||
}
|
||||
|
||||
bool LoadedModule::containsAddress(uptr address) const {
|
||||
for (uptr i = 0; i < n_ranges_; i++) {
|
||||
if (ranges_[i].beg <= address && address < ranges_[i].end)
|
||||
for (Iterator iter = ranges(); iter.hasNext();) {
|
||||
const AddressRange *r = iter.next();
|
||||
if (r->beg <= address && address < r->end)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -210,12 +330,9 @@ void IncreaseTotalMmap(uptr size) {
|
|||
if (!common_flags()->mmap_limit_mb) return;
|
||||
uptr total_mmaped =
|
||||
atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
|
||||
if ((total_mmaped >> 20) > common_flags()->mmap_limit_mb) {
|
||||
// Since for now mmap_limit_mb is not a user-facing flag, just CHECK.
|
||||
uptr mmap_limit_mb = common_flags()->mmap_limit_mb;
|
||||
common_flags()->mmap_limit_mb = 0; // Allow mmap in CHECK.
|
||||
RAW_CHECK(total_mmaped >> 20 < mmap_limit_mb);
|
||||
}
|
||||
// Since for now mmap_limit_mb is not a user-facing flag, just kill
|
||||
// a program. Use RAW_CHECK to avoid extra mmaps in reporting.
|
||||
RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);
|
||||
}
|
||||
|
||||
void DecreaseTotalMmap(uptr size) {
|
||||
|
@ -223,39 +340,130 @@ void DecreaseTotalMmap(uptr size) {
|
|||
atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
bool TemplateMatch(const char *templ, const char *str) {
|
||||
if ((!str) || str[0] == 0)
|
||||
return false;
|
||||
bool start = false;
|
||||
if (templ && templ[0] == '^') {
|
||||
start = true;
|
||||
templ++;
|
||||
}
|
||||
bool asterisk = false;
|
||||
while (templ && templ[0]) {
|
||||
if (templ[0] == '*') {
|
||||
templ++;
|
||||
start = false;
|
||||
asterisk = true;
|
||||
continue;
|
||||
}
|
||||
if (templ[0] == '$')
|
||||
return str[0] == 0 || asterisk;
|
||||
if (str[0] == 0)
|
||||
return false;
|
||||
char *tpos = (char*)internal_strchr(templ, '*');
|
||||
char *tpos1 = (char*)internal_strchr(templ, '$');
|
||||
if ((!tpos) || (tpos1 && tpos1 < tpos))
|
||||
tpos = tpos1;
|
||||
if (tpos)
|
||||
tpos[0] = 0;
|
||||
const char *str0 = str;
|
||||
const char *spos = internal_strstr(str, templ);
|
||||
str = spos + internal_strlen(templ);
|
||||
templ = tpos;
|
||||
if (tpos)
|
||||
tpos[0] = tpos == tpos1 ? '$' : '*';
|
||||
if (!spos)
|
||||
return false;
|
||||
if (start && spos != str0)
|
||||
return false;
|
||||
start = false;
|
||||
asterisk = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
|
||||
|
||||
char *FindPathToBinary(const char *name) {
|
||||
const char *path = GetEnv("PATH");
|
||||
if (!path)
|
||||
return nullptr;
|
||||
uptr name_len = internal_strlen(name);
|
||||
InternalScopedBuffer<char> buffer(kMaxPathLength);
|
||||
const char *beg = path;
|
||||
while (true) {
|
||||
const char *end = internal_strchrnul(beg, kPathSeparator);
|
||||
uptr prefix_len = end - beg;
|
||||
if (prefix_len + name_len + 2 <= kMaxPathLength) {
|
||||
internal_memcpy(buffer.data(), beg, prefix_len);
|
||||
buffer[prefix_len] = '/';
|
||||
internal_memcpy(&buffer[prefix_len + 1], name, name_len);
|
||||
buffer[prefix_len + 1 + name_len] = '\0';
|
||||
if (FileExists(buffer.data()))
|
||||
return internal_strdup(buffer.data());
|
||||
}
|
||||
if (*end == '\0') break;
|
||||
beg = end + 1;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static char binary_name_cache_str[kMaxPathLength];
|
||||
static char process_name_cache_str[kMaxPathLength];
|
||||
|
||||
const char *GetProcessName() {
|
||||
return process_name_cache_str;
|
||||
}
|
||||
|
||||
static uptr ReadProcessName(/*out*/ char *buf, uptr buf_len) {
|
||||
ReadLongProcessName(buf, buf_len);
|
||||
char *s = const_cast<char *>(StripModuleName(buf));
|
||||
uptr len = internal_strlen(s);
|
||||
if (s != buf) {
|
||||
internal_memmove(buf, s, len);
|
||||
buf[len] = '\0';
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
void UpdateProcessName() {
|
||||
ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
|
||||
}
|
||||
|
||||
// Call once to make sure that binary_name_cache_str is initialized
|
||||
void CacheBinaryName() {
|
||||
if (binary_name_cache_str[0] != '\0')
|
||||
return;
|
||||
ReadBinaryName(binary_name_cache_str, sizeof(binary_name_cache_str));
|
||||
ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
|
||||
}
|
||||
|
||||
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
|
||||
CacheBinaryName();
|
||||
uptr name_len = internal_strlen(binary_name_cache_str);
|
||||
name_len = (name_len < buf_len - 1) ? name_len : buf_len - 1;
|
||||
if (buf_len == 0)
|
||||
return 0;
|
||||
internal_memcpy(buf, binary_name_cache_str, name_len);
|
||||
buf[name_len] = '\0';
|
||||
return name_len;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
using namespace __sanitizer; // NOLINT
|
||||
|
||||
extern "C" {
|
||||
void __sanitizer_set_report_path(const char *path) {
|
||||
if (!path)
|
||||
return;
|
||||
uptr len = internal_strlen(path);
|
||||
if (len > sizeof(report_path_prefix) - 100) {
|
||||
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
|
||||
path[0], path[1], path[2], path[3],
|
||||
path[4], path[5], path[6], path[7]);
|
||||
Die();
|
||||
}
|
||||
if (report_fd != kStdoutFd &&
|
||||
report_fd != kStderrFd &&
|
||||
report_fd != kInvalidFd)
|
||||
internal_close(report_fd);
|
||||
report_fd = kInvalidFd;
|
||||
log_to_file = false;
|
||||
if (internal_strcmp(path, "stdout") == 0) {
|
||||
report_fd = kStdoutFd;
|
||||
} else if (internal_strcmp(path, "stderr") == 0) {
|
||||
report_fd = kStderrFd;
|
||||
} else {
|
||||
internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
|
||||
report_path_prefix[len] = '\0';
|
||||
log_to_file = true;
|
||||
}
|
||||
report_file.SetReportPath(path);
|
||||
}
|
||||
|
||||
void __sanitizer_report_error_summary(const char *error_summary) {
|
||||
Printf("%s\n", error_summary);
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_death_callback(void (*callback)(void)) {
|
||||
SetUserDieCallback(callback);
|
||||
}
|
||||
} // extern "C"
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries.
|
||||
// This file is shared between run-time libraries of sanitizers.
|
||||
//
|
||||
// It declares common functions and classes that are used in both runtimes.
|
||||
// Implementation of some functions are provided in sanitizer_common, while
|
||||
// others must be defined by run-time library itself.
|
||||
|
@ -14,13 +14,21 @@
|
|||
#ifndef SANITIZER_COMMON_H
|
||||
#define SANITIZER_COMMON_H
|
||||
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_list.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
#include "sanitizer_flags.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
extern "C" void _ReadWriteBarrier();
|
||||
#pragma intrinsic(_ReadWriteBarrier)
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
struct StackTrace;
|
||||
struct AddressInfo;
|
||||
|
||||
// Constants.
|
||||
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
|
||||
|
@ -32,12 +40,27 @@ const uptr kWordSizeInBits = 8 * kWordSize;
|
|||
const uptr kCacheLineSize = 64;
|
||||
#endif
|
||||
|
||||
const uptr kMaxPathLength = 512;
|
||||
const uptr kMaxPathLength = 4096;
|
||||
|
||||
// 16K loaded modules should be enough for everyone.
|
||||
static const uptr kMaxNumberOfModules = 1 << 14;
|
||||
|
||||
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
|
||||
|
||||
// Denotes fake PC values that come from JIT/JAVA/etc.
|
||||
// For such PC values __tsan_symbolize_external() will be called.
|
||||
const u64 kExternalPCBit = 1ULL << 60;
|
||||
|
||||
extern const char *SanitizerToolName; // Can be changed by the tool.
|
||||
|
||||
extern atomic_uint32_t current_verbosity;
|
||||
INLINE void SetVerbosity(int verbosity) {
|
||||
atomic_store(¤t_verbosity, verbosity, memory_order_relaxed);
|
||||
}
|
||||
INLINE int Verbosity() {
|
||||
return atomic_load(¤t_verbosity, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uptr GetPageSize();
|
||||
uptr GetPageSizeCached();
|
||||
uptr GetMmapGranularity();
|
||||
|
@ -53,17 +76,27 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
|
|||
// Memory management
|
||||
void *MmapOrDie(uptr size, const char *mem_type);
|
||||
void UnmapOrDie(void *addr, uptr size);
|
||||
void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
|
||||
void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
|
||||
const char *name = nullptr);
|
||||
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
|
||||
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
|
||||
void *Mprotect(uptr fixed_addr, uptr size);
|
||||
void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
||||
// Map aligned chunk of address space; size and alignment are powers of two.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
|
||||
// Disallow access to a memory range. Use MmapNoAccess to allocate an
|
||||
// unaccessible memory.
|
||||
bool MprotectNoAccess(uptr addr, uptr size);
|
||||
|
||||
// Used to check if we can map shadow memory to a fixed location.
|
||||
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
|
||||
void FlushUnneededShadowMemory(uptr addr, uptr size);
|
||||
void IncreaseTotalMmap(uptr size);
|
||||
void DecreaseTotalMmap(uptr size);
|
||||
uptr GetRSS();
|
||||
void NoHugePagesInRegion(uptr addr, uptr length);
|
||||
void DontDumpShadowMemory(uptr addr, uptr length);
|
||||
// Check if the built VMA size matches the runtime one.
|
||||
void CheckVMASize();
|
||||
|
||||
// InternalScopedBuffer can be used instead of large stack arrays to
|
||||
// keep frame size low.
|
||||
|
@ -126,44 +159,93 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
|
|||
|
||||
// IO
|
||||
void RawWrite(const char *buffer);
|
||||
bool PrintsToTty();
|
||||
// Caching version of PrintsToTty(). Not thread-safe.
|
||||
bool PrintsToTtyCached();
|
||||
bool ColorizeReports();
|
||||
void Printf(const char *format, ...);
|
||||
void Report(const char *format, ...);
|
||||
void SetPrintfAndReportCallback(void (*callback)(const char *));
|
||||
#define VReport(level, ...) \
|
||||
do { \
|
||||
if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \
|
||||
if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
|
||||
} while (0)
|
||||
#define VPrintf(level, ...) \
|
||||
do { \
|
||||
if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \
|
||||
if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
// Can be used to prevent mixing error reports from different sanitizers.
|
||||
extern StaticSpinMutex CommonSanitizerReportMutex;
|
||||
void MaybeOpenReportFile();
|
||||
extern fd_t report_fd;
|
||||
extern bool log_to_file;
|
||||
extern char report_path_prefix[4096];
|
||||
extern uptr report_fd_pid;
|
||||
|
||||
struct ReportFile {
|
||||
void Write(const char *buffer, uptr length);
|
||||
bool SupportsColors();
|
||||
void SetReportPath(const char *path);
|
||||
|
||||
// Don't use fields directly. They are only declared public to allow
|
||||
// aggregate initialization.
|
||||
|
||||
// Protects fields below.
|
||||
StaticSpinMutex *mu;
|
||||
// Opened file descriptor. Defaults to stderr. It may be equal to
|
||||
// kInvalidFd, in which case new file will be opened when necessary.
|
||||
fd_t fd;
|
||||
// Path prefix of report file, set via __sanitizer_set_report_path.
|
||||
char path_prefix[kMaxPathLength];
|
||||
// Full path to report, obtained as <path_prefix>.PID
|
||||
char full_path[kMaxPathLength];
|
||||
// PID of the process that opened fd. If a fork() occurs,
|
||||
// the PID of child will be different from fd_pid.
|
||||
uptr fd_pid;
|
||||
|
||||
private:
|
||||
void ReopenIfNecessary();
|
||||
};
|
||||
extern ReportFile report_file;
|
||||
|
||||
extern uptr stoptheworld_tracer_pid;
|
||||
extern uptr stoptheworld_tracer_ppid;
|
||||
|
||||
uptr OpenFile(const char *filename, bool write);
|
||||
enum FileAccessMode {
|
||||
RdOnly,
|
||||
WrOnly,
|
||||
RdWr
|
||||
};
|
||||
|
||||
// Returns kInvalidFd on error.
|
||||
fd_t OpenFile(const char *filename, FileAccessMode mode,
|
||||
error_t *errno_p = nullptr);
|
||||
void CloseFile(fd_t);
|
||||
|
||||
// Return true on success, false on error.
|
||||
bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
|
||||
uptr *bytes_read = nullptr, error_t *error_p = nullptr);
|
||||
bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
|
||||
uptr *bytes_written = nullptr, error_t *error_p = nullptr);
|
||||
|
||||
bool RenameFile(const char *oldpath, const char *newpath,
|
||||
error_t *error_p = nullptr);
|
||||
|
||||
// Scoped file handle closer.
|
||||
struct FileCloser {
|
||||
explicit FileCloser(fd_t fd) : fd(fd) {}
|
||||
~FileCloser() { CloseFile(fd); }
|
||||
fd_t fd;
|
||||
};
|
||||
|
||||
bool SupportsColoredOutput(fd_t fd);
|
||||
|
||||
// Opens the file 'file_name" and reads up to 'max_len' bytes.
|
||||
// The resulting buffer is mmaped and stored in '*buff'.
|
||||
// The size of the mmaped region is stored in '*buff_size',
|
||||
// Returns the number of read bytes or 0 if file can not be opened.
|
||||
uptr ReadFileToBuffer(const char *file_name, char **buff,
|
||||
uptr *buff_size, uptr max_len);
|
||||
// The size of the mmaped region is stored in '*buff_size'.
|
||||
// The total number of read bytes is stored in '*read_len'.
|
||||
// Returns true if file was successfully opened and read.
|
||||
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
|
||||
uptr *read_len, uptr max_len = 1 << 26,
|
||||
error_t *errno_p = nullptr);
|
||||
// Maps given file to virtual memory, and returns pointer to it
|
||||
// (or NULL if the mapping failes). Stores the size of mmaped region
|
||||
// (or NULL if mapping fails). Stores the size of mmaped region
|
||||
// in '*buff_size'.
|
||||
void *MapFileToMemory(const char *file_name, uptr *buff_size);
|
||||
void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset);
|
||||
void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
|
||||
|
||||
bool IsAccessibleMemoryRange(uptr beg, uptr size);
|
||||
|
||||
|
@ -174,6 +256,12 @@ const char *StripPathPrefix(const char *filepath,
|
|||
const char *StripModuleName(const char *module);
|
||||
|
||||
// OS
|
||||
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
|
||||
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
|
||||
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
|
||||
const char *GetProcessName();
|
||||
void UpdateProcessName();
|
||||
void CacheBinaryName();
|
||||
void DisableCoreDumperIfNecessary();
|
||||
void DumpProcessMap();
|
||||
bool FileExists(const char *filename);
|
||||
|
@ -181,6 +269,9 @@ const char *GetEnv(const char *name);
|
|||
bool SetEnv(const char *name, const char *value);
|
||||
const char *GetPwd();
|
||||
char *FindPathToBinary(const char *name);
|
||||
bool IsPathSeparator(const char c);
|
||||
bool IsAbsolutePath(const char *path);
|
||||
|
||||
u32 GetUid();
|
||||
void ReExec();
|
||||
bool StackSizeIsUnlimited();
|
||||
|
@ -192,10 +283,13 @@ void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
|
|||
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
|
||||
void SetSandboxingCallback(void (*f)());
|
||||
|
||||
void CovUpdateMapping(uptr caller_pc = 0);
|
||||
void CoverageUpdateMapping();
|
||||
void CovBeforeFork();
|
||||
void CovAfterFork(int child_pid);
|
||||
|
||||
void InitializeCoverage(bool enabled, const char *coverage_dir);
|
||||
void ReInitializeCoverage(bool enabled, const char *coverage_dir);
|
||||
|
||||
void InitTlsSize();
|
||||
uptr GetTlsSize();
|
||||
|
||||
|
@ -205,12 +299,15 @@ void SleepForMillis(int millis);
|
|||
u64 NanoTime();
|
||||
int Atexit(void (*function)(void));
|
||||
void SortArray(uptr *array, uptr size);
|
||||
bool TemplateMatch(const char *templ, const char *str);
|
||||
|
||||
// Exit
|
||||
void NORETURN Abort();
|
||||
void NORETURN Die();
|
||||
void NORETURN
|
||||
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
|
||||
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
||||
error_t err);
|
||||
|
||||
// Set the name of the current thread to 'name', return true on succees.
|
||||
// The name may be truncated to a system-dependent limit.
|
||||
|
@ -222,12 +319,26 @@ bool SanitizerGetThreadName(char *name, int max_len);
|
|||
// Specific tools may override behavior of "Die" and "CheckFailed" functions
|
||||
// to do tool-specific job.
|
||||
typedef void (*DieCallbackType)(void);
|
||||
void SetDieCallback(DieCallbackType);
|
||||
DieCallbackType GetDieCallback();
|
||||
|
||||
// It's possible to add several callbacks that would be run when "Die" is
|
||||
// called. The callbacks will be run in the opposite order. The tools are
|
||||
// strongly recommended to setup all callbacks during initialization, when there
|
||||
// is only a single thread.
|
||||
bool AddDieCallback(DieCallbackType callback);
|
||||
bool RemoveDieCallback(DieCallbackType callback);
|
||||
|
||||
void SetUserDieCallback(DieCallbackType callback);
|
||||
|
||||
typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
|
||||
u64, u64);
|
||||
void SetCheckFailedCallback(CheckFailedCallbackType callback);
|
||||
|
||||
// Callback will be called if soft_rss_limit_mb is given and the limit is
|
||||
// exceeded (exceeded==true) or if rss went down below the limit
|
||||
// (exceeded==false).
|
||||
// The callback should be registered once at the tool init time.
|
||||
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
|
||||
|
||||
// Functions related to signal handling.
|
||||
typedef void (*SignalHandlerType)(int, void *, void *);
|
||||
bool IsDeadlySignal(int signum);
|
||||
|
@ -243,9 +354,9 @@ const int kMaxSummaryLength = 1024;
|
|||
// and pass it to __sanitizer_report_error_summary.
|
||||
void ReportErrorSummary(const char *error_message);
|
||||
// Same as above, but construct error_message as:
|
||||
// error_type file:line function
|
||||
void ReportErrorSummary(const char *error_type, const char *file,
|
||||
int line, const char *function);
|
||||
// error_type file:line[:column][ function]
|
||||
void ReportErrorSummary(const char *error_type, const AddressInfo &info);
|
||||
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
|
||||
void ReportErrorSummary(const char *error_type, StackTrace *trace);
|
||||
|
||||
// Math
|
||||
|
@ -264,7 +375,11 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
|
|||
CHECK_NE(x, 0U);
|
||||
unsigned long up; // NOLINT
|
||||
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
|
||||
# ifdef _WIN64
|
||||
up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
|
||||
# else
|
||||
up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
|
||||
# endif
|
||||
#elif defined(_WIN64)
|
||||
_BitScanReverse64(&up, x);
|
||||
#else
|
||||
|
@ -277,7 +392,11 @@ INLINE uptr LeastSignificantSetBitIndex(uptr x) {
|
|||
CHECK_NE(x, 0U);
|
||||
unsigned long up; // NOLINT
|
||||
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
|
||||
# ifdef _WIN64
|
||||
up = __builtin_ctzll(x);
|
||||
# else
|
||||
up = __builtin_ctzl(x);
|
||||
# endif
|
||||
#elif defined(_WIN64)
|
||||
_BitScanForward64(&up, x);
|
||||
#else
|
||||
|
@ -297,7 +416,7 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
|
|||
uptr up = MostSignificantSetBitIndex(size);
|
||||
CHECK(size < (1ULL << (up + 1)));
|
||||
CHECK(size > (1ULL << up));
|
||||
return 1UL << (up + 1);
|
||||
return 1ULL << (up + 1);
|
||||
}
|
||||
|
||||
INLINE uptr RoundUpTo(uptr size, uptr boundary) {
|
||||
|
@ -315,17 +434,7 @@ INLINE bool IsAligned(uptr a, uptr alignment) {
|
|||
|
||||
INLINE uptr Log2(uptr x) {
|
||||
CHECK(IsPowerOfTwo(x));
|
||||
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
|
||||
return __builtin_ctzl(x);
|
||||
#elif defined(_WIN64)
|
||||
unsigned long ret; // NOLINT
|
||||
_BitScanForward64(&ret, x);
|
||||
return ret;
|
||||
#else
|
||||
unsigned long ret; // NOLINT
|
||||
_BitScanForward(&ret, x);
|
||||
return ret;
|
||||
#endif
|
||||
return LeastSignificantSetBitIndex(x);
|
||||
}
|
||||
|
||||
// Don't use std::min, std::max or std::swap, to minimize dependency
|
||||
|
@ -354,14 +463,14 @@ INLINE int ToLower(int c) {
|
|||
// small vectors.
|
||||
// WARNING: The current implementation supports only POD types.
|
||||
template<typename T>
|
||||
class InternalMmapVector {
|
||||
class InternalMmapVectorNoCtor {
|
||||
public:
|
||||
explicit InternalMmapVector(uptr initial_capacity) {
|
||||
void Initialize(uptr initial_capacity) {
|
||||
capacity_ = Max(initial_capacity, (uptr)1);
|
||||
size_ = 0;
|
||||
data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector");
|
||||
data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
|
||||
}
|
||||
~InternalMmapVector() {
|
||||
void Destroy() {
|
||||
UnmapOrDie(data_, capacity_ * sizeof(T));
|
||||
}
|
||||
T &operator[](uptr i) {
|
||||
|
@ -394,11 +503,15 @@ class InternalMmapVector {
|
|||
const T *data() const {
|
||||
return data_;
|
||||
}
|
||||
T *data() {
|
||||
return data_;
|
||||
}
|
||||
uptr capacity() const {
|
||||
return capacity_;
|
||||
}
|
||||
|
||||
void clear() { size_ = 0; }
|
||||
bool empty() const { return size() == 0; }
|
||||
|
||||
private:
|
||||
void Resize(uptr new_capacity) {
|
||||
|
@ -412,15 +525,24 @@ class InternalMmapVector {
|
|||
UnmapOrDie(old_data, capacity_ * sizeof(T));
|
||||
capacity_ = new_capacity;
|
||||
}
|
||||
// Disallow evil constructors.
|
||||
InternalMmapVector(const InternalMmapVector&);
|
||||
void operator=(const InternalMmapVector&);
|
||||
|
||||
T *data_;
|
||||
uptr capacity_;
|
||||
uptr size_;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
|
||||
public:
|
||||
explicit InternalMmapVector(uptr initial_capacity) {
|
||||
InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
|
||||
}
|
||||
~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
|
||||
// Disallow evil constructors.
|
||||
InternalMmapVector(const InternalMmapVector&);
|
||||
void operator=(const InternalMmapVector&);
|
||||
};
|
||||
|
||||
// HeapSort for arrays and InternalMmapVector.
|
||||
template<class Container, class Compare>
|
||||
void InternalSort(Container *v, uptr size, Compare comp) {
|
||||
|
@ -478,29 +600,32 @@ uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
|
|||
// executable or a shared object).
|
||||
class LoadedModule {
|
||||
public:
|
||||
LoadedModule(const char *module_name, uptr base_address);
|
||||
LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
|
||||
void set(const char *module_name, uptr base_address);
|
||||
void clear();
|
||||
void addAddressRange(uptr beg, uptr end, bool executable);
|
||||
bool containsAddress(uptr address) const;
|
||||
|
||||
const char *full_name() const { return full_name_; }
|
||||
uptr base_address() const { return base_address_; }
|
||||
|
||||
uptr n_ranges() const { return n_ranges_; }
|
||||
uptr address_range_start(int i) const { return ranges_[i].beg; }
|
||||
uptr address_range_end(int i) const { return ranges_[i].end; }
|
||||
bool address_range_executable(int i) const { return exec_[i]; }
|
||||
|
||||
private:
|
||||
struct AddressRange {
|
||||
AddressRange *next;
|
||||
uptr beg;
|
||||
uptr end;
|
||||
bool executable;
|
||||
|
||||
AddressRange(uptr beg, uptr end, bool executable)
|
||||
: next(nullptr), beg(beg), end(end), executable(executable) {}
|
||||
};
|
||||
char *full_name_;
|
||||
|
||||
typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
|
||||
Iterator ranges() const { return Iterator(&ranges_); }
|
||||
|
||||
private:
|
||||
char *full_name_; // Owned.
|
||||
uptr base_address_;
|
||||
static const uptr kMaxNumberOfAddressRanges = 6;
|
||||
AddressRange ranges_[kMaxNumberOfAddressRanges];
|
||||
bool exec_[kMaxNumberOfAddressRanges];
|
||||
uptr n_ranges_;
|
||||
IntrusiveList<AddressRange> ranges_;
|
||||
};
|
||||
|
||||
// OS-dependent function that fills array with descriptions of at most
|
||||
|
@ -511,45 +636,80 @@ typedef bool (*string_predicate_t)(const char *);
|
|||
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
|
||||
string_predicate_t filter);
|
||||
|
||||
#if SANITIZER_POSIX
|
||||
const uptr kPthreadDestructorIterations = 4;
|
||||
#else
|
||||
// Unused on Windows.
|
||||
const uptr kPthreadDestructorIterations = 0;
|
||||
#endif
|
||||
|
||||
// Callback type for iterating over a set of memory ranges.
|
||||
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
|
||||
|
||||
#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !defined(SANITIZER_GO)
|
||||
extern uptr indirect_call_wrapper;
|
||||
void SetIndirectCallWrapper(uptr wrapper);
|
||||
enum AndroidApiLevel {
|
||||
ANDROID_NOT_ANDROID = 0,
|
||||
ANDROID_KITKAT = 19,
|
||||
ANDROID_LOLLIPOP_MR1 = 22,
|
||||
ANDROID_POST_LOLLIPOP = 23
|
||||
};
|
||||
|
||||
template <typename F>
|
||||
F IndirectExternCall(F f) {
|
||||
typedef F (*WrapF)(F);
|
||||
return indirect_call_wrapper ? ((WrapF)indirect_call_wrapper)(f) : f;
|
||||
}
|
||||
#if SANITIZER_LINUX
|
||||
// Initialize Android logging. Any writes before this are silently lost.
|
||||
void AndroidLogInit();
|
||||
void WriteToSyslog(const char *buffer);
|
||||
#else
|
||||
INLINE void SetIndirectCallWrapper(uptr wrapper) {}
|
||||
template <typename F>
|
||||
F IndirectExternCall(F f) {
|
||||
return f;
|
||||
}
|
||||
INLINE void AndroidLogInit() {}
|
||||
INLINE void WriteToSyslog(const char *buffer) {}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
// Initialize Android logging. Any writes before this are silently lost.
|
||||
void AndroidLogInit();
|
||||
void AndroidLogWrite(const char *buffer);
|
||||
void GetExtraActivationFlags(char *buf, uptr size);
|
||||
void SanitizerInitializeUnwinder();
|
||||
AndroidApiLevel AndroidGetApiLevel();
|
||||
#else
|
||||
INLINE void AndroidLogInit() {}
|
||||
INLINE void AndroidLogWrite(const char *buffer_unused) {}
|
||||
INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
|
||||
INLINE void SanitizerInitializeUnwinder() {}
|
||||
INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
|
||||
#endif
|
||||
|
||||
INLINE uptr GetPthreadDestructorIterations() {
|
||||
#if SANITIZER_ANDROID
|
||||
return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
|
||||
#elif SANITIZER_POSIX
|
||||
return 4;
|
||||
#else
|
||||
// Unused on Windows.
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
void *internal_start_thread(void(*func)(void*), void *arg);
|
||||
void internal_join_thread(void *th);
|
||||
void MaybeStartBackgroudThread();
|
||||
|
||||
// Make the compiler think that something is going on there.
|
||||
// Use this inside a loop that looks like memset/memcpy/etc to prevent the
|
||||
// compiler from recognising it and turning it into an actual call to
|
||||
// memset/memcpy/etc.
|
||||
static inline void SanitizerBreakOptimization(void *arg) {
|
||||
#if _MSC_VER && !defined(__clang__)
|
||||
_ReadWriteBarrier();
|
||||
#else
|
||||
__asm__ __volatile__("" : : "r" (arg) : "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
struct SignalContext {
|
||||
void *context;
|
||||
uptr addr;
|
||||
uptr pc;
|
||||
uptr sp;
|
||||
uptr bp;
|
||||
|
||||
SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) :
|
||||
context(context), addr(addr), pc(pc), sp(sp), bp(bp) {
|
||||
}
|
||||
|
||||
// Creates signal context in a platform-specific manner.
|
||||
static SignalContext Create(void *siginfo, void *context);
|
||||
};
|
||||
|
||||
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -11,6 +11,7 @@
|
|||
// with a few common GNU extensions.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
static const char *parse_number(const char *p, int *out) {
|
||||
|
@ -189,7 +190,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
|
|||
continue;
|
||||
}
|
||||
if (*p == '\0') {
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
// %n$
|
||||
p = maybe_parse_param_index(p, &dir->argIdx);
|
||||
|
@ -204,7 +205,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
|
|||
p = parse_number(p, &dir->fieldWidth);
|
||||
CHECK(p);
|
||||
if (dir->fieldWidth <= 0) // Width if at all must be non-zero
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
// m
|
||||
if (*p == 'm') {
|
||||
|
@ -224,8 +225,8 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
|
|||
while (*p && *p != ']')
|
||||
++p;
|
||||
if (*p == 0)
|
||||
return 0; // unexpected end of string
|
||||
// Consume the closing ']'.
|
||||
return nullptr; // unexpected end of string
|
||||
// Consume the closing ']'.
|
||||
++p;
|
||||
}
|
||||
// This is unfortunately ambiguous between old GNU extension
|
||||
|
@ -249,7 +250,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
|
|||
while (*q && *q != ']' && *q != '%')
|
||||
++q;
|
||||
if (*q == 0 || *q == '%')
|
||||
return 0;
|
||||
return nullptr;
|
||||
p = q + 1; // Consume the closing ']'.
|
||||
dir->maybeGnuMalloc = true;
|
||||
}
|
||||
|
@ -393,7 +394,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
|
|||
continue;
|
||||
}
|
||||
if (*p == '\0') {
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
// %n$
|
||||
p = maybe_parse_param_index(p, &dir->precisionIdx);
|
||||
|
@ -406,7 +407,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
|
|||
p = maybe_parse_number_or_star(p, &dir->fieldWidth,
|
||||
&dir->starredWidth);
|
||||
if (!p)
|
||||
return 0;
|
||||
return nullptr;
|
||||
// Precision
|
||||
if (*p == '.') {
|
||||
++p;
|
||||
|
@ -414,7 +415,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
|
|||
p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
|
||||
&dir->starredPrecision);
|
||||
if (!p)
|
||||
return 0;
|
||||
return nullptr;
|
||||
// m$
|
||||
if (dir->starredPrecision) {
|
||||
p = maybe_parse_param_index(p, &dir->precisionIdx);
|
||||
|
@ -554,4 +555,4 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
|
|||
}
|
||||
}
|
||||
|
||||
#endif // SANITIZER_INTERCEPT_PRINTF
|
||||
#endif // SANITIZER_INTERCEPT_PRINTF
|
||||
|
|
|
@ -518,7 +518,7 @@ static const ioctl_desc *ioctl_table_lookup(unsigned req) {
|
|||
if (left == right && ioctl_table[left].req == req)
|
||||
return ioctl_table + left;
|
||||
else
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
|
||||
|
@ -565,7 +565,7 @@ static const ioctl_desc *ioctl_lookup(unsigned req) {
|
|||
(desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
|
||||
desc->type == ioctl_desc::READ))
|
||||
return desc;
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
|
||||
|
@ -576,14 +576,10 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
|
|||
}
|
||||
if (desc->type != ioctl_desc::CUSTOM)
|
||||
return;
|
||||
switch (request) {
|
||||
case 0x00008912: { // SIOCGIFCONF
|
||||
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
|
||||
break;
|
||||
}
|
||||
if (request == IOCTL_SIOCGIFCONF) {
|
||||
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
|
||||
|
@ -595,12 +591,8 @@ static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
|
|||
}
|
||||
if (desc->type != ioctl_desc::CUSTOM)
|
||||
return;
|
||||
switch (request) {
|
||||
case 0x00008912: { // SIOCGIFCONF
|
||||
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
|
||||
break;
|
||||
}
|
||||
if (request == IOCTL_SIOCGIFCONF) {
|
||||
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -11,35 +11,31 @@
|
|||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_stackdepot.h"
|
||||
#include "sanitizer_stacktrace.h"
|
||||
#include "sanitizer_symbolizer.h"
|
||||
|
||||
#if SANITIZER_POSIX
|
||||
#include "sanitizer_posix.h"
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
bool PrintsToTty() {
|
||||
MaybeOpenReportFile();
|
||||
return internal_isatty(report_fd) != 0;
|
||||
}
|
||||
|
||||
bool PrintsToTtyCached() {
|
||||
// FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
|
||||
// printing on Windows.
|
||||
if (SANITIZER_WINDOWS)
|
||||
return 0;
|
||||
|
||||
static int cached = 0;
|
||||
static bool prints_to_tty;
|
||||
if (!cached) { // Not thread-safe.
|
||||
prints_to_tty = PrintsToTty();
|
||||
cached = 1;
|
||||
}
|
||||
return prints_to_tty;
|
||||
bool ReportFile::SupportsColors() {
|
||||
SpinMutexLock l(mu);
|
||||
ReopenIfNecessary();
|
||||
return SupportsColoredOutput(fd);
|
||||
}
|
||||
|
||||
bool ColorizeReports() {
|
||||
// FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
|
||||
// printing on Windows.
|
||||
if (SANITIZER_WINDOWS)
|
||||
return false;
|
||||
|
||||
const char *flag = common_flags()->color;
|
||||
return internal_strcmp(flag, "always") == 0 ||
|
||||
(internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached());
|
||||
(internal_strcmp(flag, "auto") == 0 && report_file.SupportsColors());
|
||||
}
|
||||
|
||||
static void (*sandboxing_callback)();
|
||||
|
@ -50,16 +46,82 @@ void SetSandboxingCallback(void (*f)()) {
|
|||
void ReportErrorSummary(const char *error_type, StackTrace *stack) {
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
AddressInfo ai;
|
||||
#if !SANITIZER_GO
|
||||
if (stack->size > 0 && Symbolizer::GetOrInit()->CanReturnFileLineInfo()) {
|
||||
// Currently, we include the first stack frame into the report summary.
|
||||
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
|
||||
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
|
||||
Symbolizer::GetOrInit()->SymbolizePC(pc, &ai, 1);
|
||||
if (stack->size == 0) {
|
||||
ReportErrorSummary(error_type);
|
||||
return;
|
||||
}
|
||||
// Currently, we include the first stack frame into the report summary.
|
||||
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
|
||||
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
|
||||
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
|
||||
ReportErrorSummary(error_type, frame->info);
|
||||
frame->ClearAll();
|
||||
}
|
||||
|
||||
static void (*SoftRssLimitExceededCallback)(bool exceeded);
|
||||
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
|
||||
CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
|
||||
SoftRssLimitExceededCallback = Callback;
|
||||
}
|
||||
|
||||
void BackgroundThread(void *arg) {
|
||||
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
|
||||
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
|
||||
uptr prev_reported_rss = 0;
|
||||
uptr prev_reported_stack_depot_size = 0;
|
||||
bool reached_soft_rss_limit = false;
|
||||
while (true) {
|
||||
SleepForMillis(100);
|
||||
uptr current_rss_mb = GetRSS() >> 20;
|
||||
if (Verbosity()) {
|
||||
// If RSS has grown 10% since last time, print some information.
|
||||
if (prev_reported_rss * 11 / 10 < current_rss_mb) {
|
||||
Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
|
||||
prev_reported_rss = current_rss_mb;
|
||||
}
|
||||
// If stack depot has grown 10% since last time, print it too.
|
||||
StackDepotStats *stack_depot_stats = StackDepotGetStats();
|
||||
if (prev_reported_stack_depot_size * 11 / 10 <
|
||||
stack_depot_stats->allocated) {
|
||||
Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
|
||||
SanitizerToolName,
|
||||
stack_depot_stats->n_uniq_ids,
|
||||
stack_depot_stats->allocated >> 20);
|
||||
prev_reported_stack_depot_size = stack_depot_stats->allocated;
|
||||
}
|
||||
}
|
||||
// Check RSS against the limit.
|
||||
if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
|
||||
Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
|
||||
SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
|
||||
DumpProcessMap();
|
||||
Die();
|
||||
}
|
||||
if (soft_rss_limit_mb) {
|
||||
if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
|
||||
reached_soft_rss_limit = true;
|
||||
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
|
||||
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
|
||||
if (SoftRssLimitExceededCallback)
|
||||
SoftRssLimitExceededCallback(true);
|
||||
} else if (soft_rss_limit_mb >= current_rss_mb &&
|
||||
reached_soft_rss_limit) {
|
||||
reached_soft_rss_limit = false;
|
||||
if (SoftRssLimitExceededCallback)
|
||||
SoftRssLimitExceededCallback(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MaybeStartBackgroudThread() {
|
||||
#if SANITIZER_LINUX // Need to implement/test on other platforms.
|
||||
// Start the background thread if one of the rss limits is given.
|
||||
if (!common_flags()->hard_rss_limit_mb &&
|
||||
!common_flags()->soft_rss_limit_mb) return;
|
||||
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
|
||||
internal_start_thread(BackgroundThread, nullptr);
|
||||
#endif
|
||||
ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
|
|
@ -2297,7 +2297,9 @@ PRE_SYSCALL(ni_syscall)() {}
|
|||
POST_SYSCALL(ni_syscall)(long res) {}
|
||||
|
||||
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
|
||||
#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
|
||||
#if !SANITIZER_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__))
|
||||
if (data) {
|
||||
if (request == ptrace_setregs) {
|
||||
PRE_READ((void *)data, struct_user_regs_struct_sz);
|
||||
|
@ -2316,7 +2318,9 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
|
|||
}
|
||||
|
||||
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
|
||||
#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
|
||||
#if !SANITIZER_ANDROID && \
|
||||
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
|
||||
defined(__powerpc64__) || defined(__aarch64__))
|
||||
if (res >= 0 && data) {
|
||||
// Note that this is different from the interceptor in
|
||||
// sanitizer_common_interceptors.inc.
|
||||
|
|
|
@ -10,18 +10,24 @@
|
|||
//
|
||||
// Compiler instrumentation:
|
||||
// For every interesting basic block the compiler injects the following code:
|
||||
// if (*Guard) {
|
||||
// __sanitizer_cov();
|
||||
// *Guard = 1;
|
||||
// if (Guard < 0) {
|
||||
// __sanitizer_cov(&Guard);
|
||||
// }
|
||||
// At the module start up time __sanitizer_cov_module_init sets the guards
|
||||
// to consecutive negative numbers (-1, -2, -3, ...).
|
||||
// It's fine to call __sanitizer_cov more than once for a given block.
|
||||
//
|
||||
// Run-time:
|
||||
// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
|
||||
// and atomically set Guard to -Guard.
|
||||
// - __sanitizer_cov_dump: dump the coverage data to disk.
|
||||
// For every module of the current process that has coverage data
|
||||
// this will create a file module_name.PID.sancov. The file format is simple:
|
||||
// it's just a sorted sequence of 4-byte offsets in the module.
|
||||
// this will create a file module_name.PID.sancov.
|
||||
//
|
||||
// The file format is simple: the first 8 bytes is the magic,
|
||||
// one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
|
||||
// magic defines the size of the following offsets.
|
||||
// The rest of the data is the offsets in the module.
|
||||
//
|
||||
// Eventually, this coverage implementation should be obsoleted by a more
|
||||
// powerful general purpose Clang/LLVM coverage instrumentation.
|
||||
|
@ -39,7 +45,12 @@
|
|||
#include "sanitizer_symbolizer.h"
|
||||
#include "sanitizer_flags.h"
|
||||
|
||||
atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
|
||||
static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
|
||||
static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
|
||||
|
||||
static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
|
||||
|
||||
static atomic_uintptr_t coverage_counter;
|
||||
|
||||
// pc_array is the array containing the covered PCs.
|
||||
// To make the pc_array thread- and async-signal-safe it has to be large enough.
|
||||
|
@ -50,29 +61,55 @@ atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
|
|||
// dump current memory layout to another file.
|
||||
|
||||
static bool cov_sandboxed = false;
|
||||
static int cov_fd = kInvalidFd;
|
||||
static fd_t cov_fd = kInvalidFd;
|
||||
static unsigned int cov_max_block_size = 0;
|
||||
static bool coverage_enabled = false;
|
||||
static const char *coverage_dir;
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class CoverageData {
|
||||
public:
|
||||
void Init();
|
||||
void Enable();
|
||||
void Disable();
|
||||
void ReInit();
|
||||
void BeforeFork();
|
||||
void AfterFork(int child_pid);
|
||||
void Extend(uptr npcs);
|
||||
void Add(uptr pc);
|
||||
void Add(uptr pc, u32 *guard);
|
||||
void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
|
||||
uptr cache_size);
|
||||
void DumpCallerCalleePairs();
|
||||
void DumpTrace();
|
||||
void DumpAsBitSet();
|
||||
void DumpCounters();
|
||||
void DumpOffsets();
|
||||
void DumpAll();
|
||||
|
||||
ALWAYS_INLINE
|
||||
void TraceBasicBlock(s32 *id);
|
||||
|
||||
void InitializeGuardArray(s32 *guards);
|
||||
void InitializeGuards(s32 *guards, uptr n, const char *module_name,
|
||||
uptr caller_pc);
|
||||
void InitializeCounters(u8 *counters, uptr n);
|
||||
void ReinitializeGuards();
|
||||
uptr GetNumberOf8bitCounters();
|
||||
uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
|
||||
|
||||
uptr *data();
|
||||
uptr size();
|
||||
|
||||
private:
|
||||
void DirectOpen();
|
||||
void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
|
||||
|
||||
// Maximal size pc array may ever grow.
|
||||
// We MmapNoReserve this space to ensure that the array is contiguous.
|
||||
static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
|
||||
static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
|
||||
1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
|
||||
1 << 27);
|
||||
// The amount file mapping for the pc array is grown by.
|
||||
static const uptr kPcArrayMmapSize = 64 * 1024;
|
||||
|
||||
|
@ -86,7 +123,27 @@ class CoverageData {
|
|||
// Current file mapped size of the pc array.
|
||||
uptr pc_array_mapped_size;
|
||||
// Descriptor of the file mapped pc array.
|
||||
int pc_fd;
|
||||
fd_t pc_fd;
|
||||
|
||||
// Vector of coverage guard arrays, protected by mu.
|
||||
InternalMmapVectorNoCtor<s32*> guard_array_vec;
|
||||
|
||||
struct NamedPcRange {
|
||||
const char *copied_module_name;
|
||||
uptr beg, end; // elements [beg,end) in pc_array.
|
||||
};
|
||||
|
||||
// Vector of module and compilation unit pc ranges.
|
||||
InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
|
||||
InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
|
||||
|
||||
struct CounterAndSize {
|
||||
u8 *counters;
|
||||
uptr n;
|
||||
};
|
||||
|
||||
InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
|
||||
uptr num_8bit_counters;
|
||||
|
||||
// Caller-Callee (cc) array, size and current index.
|
||||
static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
|
||||
|
@ -94,59 +151,131 @@ class CoverageData {
|
|||
atomic_uintptr_t cc_array_index;
|
||||
atomic_uintptr_t cc_array_size;
|
||||
|
||||
// Tracing event array, size and current pointer.
|
||||
// We record all events (basic block entries) in a global buffer of u32
|
||||
// values. Each such value is the index in pc_array.
|
||||
// So far the tracing is highly experimental:
|
||||
// - not thread-safe;
|
||||
// - does not support long traces;
|
||||
// - not tuned for performance.
|
||||
static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
|
||||
u32 *tr_event_array;
|
||||
uptr tr_event_array_size;
|
||||
u32 *tr_event_pointer;
|
||||
static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
|
||||
|
||||
StaticSpinMutex mu;
|
||||
|
||||
void DirectOpen();
|
||||
void ReInit();
|
||||
};
|
||||
|
||||
static CoverageData coverage_data;
|
||||
|
||||
void CovUpdateMapping(const char *path, uptr caller_pc = 0);
|
||||
|
||||
void CoverageData::DirectOpen() {
|
||||
InternalScopedString path(1024);
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
|
||||
common_flags()->coverage_dir, internal_getpid());
|
||||
pc_fd = OpenFile(path.data(), true);
|
||||
if (internal_iserror(pc_fd)) {
|
||||
Report(" Coverage: failed to open %s for writing\n", path.data());
|
||||
coverage_dir, internal_getpid());
|
||||
pc_fd = OpenFile(path.data(), RdWr);
|
||||
if (pc_fd == kInvalidFd) {
|
||||
Report("Coverage: failed to open %s for reading/writing\n", path.data());
|
||||
Die();
|
||||
}
|
||||
|
||||
pc_array_mapped_size = 0;
|
||||
CovUpdateMapping();
|
||||
CovUpdateMapping(coverage_dir);
|
||||
}
|
||||
|
||||
void CoverageData::Init() {
|
||||
pc_fd = kInvalidFd;
|
||||
}
|
||||
|
||||
void CoverageData::Enable() {
|
||||
if (pc_array)
|
||||
return;
|
||||
pc_array = reinterpret_cast<uptr *>(
|
||||
MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
|
||||
pc_fd = kInvalidFd;
|
||||
atomic_store(&pc_array_index, 0, memory_order_relaxed);
|
||||
if (common_flags()->coverage_direct) {
|
||||
atomic_store(&pc_array_size, 0, memory_order_relaxed);
|
||||
atomic_store(&pc_array_index, 0, memory_order_relaxed);
|
||||
} else {
|
||||
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
|
||||
atomic_store(&pc_array_index, 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
|
||||
sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
|
||||
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
|
||||
atomic_store(&cc_array_index, 0, memory_order_relaxed);
|
||||
|
||||
// Allocate tr_event_array with a guard page at the end.
|
||||
tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
|
||||
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
|
||||
"CovInit::tr_event_array"));
|
||||
MprotectNoAccess(
|
||||
reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
|
||||
GetMmapGranularity());
|
||||
tr_event_array_size = kTrEventArrayMaxSize;
|
||||
tr_event_pointer = tr_event_array;
|
||||
|
||||
num_8bit_counters = 0;
|
||||
}
|
||||
|
||||
void CoverageData::InitializeGuardArray(s32 *guards) {
|
||||
Enable(); // Make sure coverage is enabled at this point.
|
||||
s32 n = guards[0];
|
||||
for (s32 j = 1; j <= n; j++) {
|
||||
uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
|
||||
guards[j] = -static_cast<s32>(idx + 1);
|
||||
}
|
||||
}
|
||||
|
||||
void CoverageData::Disable() {
|
||||
if (pc_array) {
|
||||
UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
|
||||
pc_array = nullptr;
|
||||
}
|
||||
if (cc_array) {
|
||||
UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
|
||||
cc_array = nullptr;
|
||||
}
|
||||
if (tr_event_array) {
|
||||
UnmapOrDie(tr_event_array,
|
||||
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
|
||||
GetMmapGranularity());
|
||||
tr_event_array = nullptr;
|
||||
tr_event_pointer = nullptr;
|
||||
}
|
||||
if (pc_fd != kInvalidFd) {
|
||||
CloseFile(pc_fd);
|
||||
pc_fd = kInvalidFd;
|
||||
}
|
||||
}
|
||||
|
||||
void CoverageData::ReinitializeGuards() {
|
||||
// Assuming single thread.
|
||||
atomic_store(&pc_array_index, 0, memory_order_relaxed);
|
||||
for (uptr i = 0; i < guard_array_vec.size(); i++)
|
||||
InitializeGuardArray(guard_array_vec[i]);
|
||||
}
|
||||
|
||||
void CoverageData::ReInit() {
|
||||
internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
|
||||
if (pc_fd != kInvalidFd) internal_close(pc_fd);
|
||||
if (common_flags()->coverage_direct) {
|
||||
// In memory-mapped mode we must extend the new file to the known array
|
||||
// size.
|
||||
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
|
||||
Init();
|
||||
if (size) Extend(size);
|
||||
} else {
|
||||
Init();
|
||||
Disable();
|
||||
if (coverage_enabled) {
|
||||
if (common_flags()->coverage_direct) {
|
||||
// In memory-mapped mode we must extend the new file to the known array
|
||||
// size.
|
||||
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
|
||||
uptr npcs = size / sizeof(uptr);
|
||||
Enable();
|
||||
if (size) Extend(npcs);
|
||||
if (coverage_enabled) CovUpdateMapping(coverage_dir);
|
||||
} else {
|
||||
Enable();
|
||||
}
|
||||
}
|
||||
// Re-initialize the guards.
|
||||
// We are single-threaded now, no need to grab any lock.
|
||||
CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
|
||||
ReinitializeGuards();
|
||||
}
|
||||
|
||||
void CoverageData::BeforeFork() {
|
||||
|
@ -164,15 +293,16 @@ void CoverageData::Extend(uptr npcs) {
|
|||
if (!common_flags()->coverage_direct) return;
|
||||
SpinMutexLock l(&mu);
|
||||
|
||||
if (pc_fd == kInvalidFd) DirectOpen();
|
||||
CHECK_NE(pc_fd, kInvalidFd);
|
||||
|
||||
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
|
||||
size += npcs * sizeof(uptr);
|
||||
|
||||
if (size > pc_array_mapped_size) {
|
||||
if (coverage_enabled && size > pc_array_mapped_size) {
|
||||
if (pc_fd == kInvalidFd) DirectOpen();
|
||||
CHECK_NE(pc_fd, kInvalidFd);
|
||||
|
||||
uptr new_mapped_size = pc_array_mapped_size;
|
||||
while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
|
||||
CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
|
||||
|
||||
// Extend the file and map the new space at the end of pc_array.
|
||||
uptr res = internal_ftruncate(pc_fd, new_mapped_size);
|
||||
|
@ -181,24 +311,100 @@ void CoverageData::Extend(uptr npcs) {
|
|||
Printf("failed to extend raw coverage file: %d\n", err);
|
||||
Die();
|
||||
}
|
||||
void *p = MapWritableFileToMemory(pc_array + pc_array_mapped_size,
|
||||
|
||||
uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
|
||||
void *p = MapWritableFileToMemory((void *)next_map_base,
|
||||
new_mapped_size - pc_array_mapped_size,
|
||||
pc_fd, pc_array_mapped_size);
|
||||
CHECK_EQ(p, pc_array + pc_array_mapped_size);
|
||||
CHECK_EQ((uptr)p, next_map_base);
|
||||
pc_array_mapped_size = new_mapped_size;
|
||||
}
|
||||
|
||||
atomic_store(&pc_array_size, size, memory_order_release);
|
||||
}
|
||||
|
||||
// Simply add the pc into the vector under lock. If the function is called more
|
||||
// than once for a given PC it will be inserted multiple times, which is fine.
|
||||
void CoverageData::Add(uptr pc) {
|
||||
void CoverageData::InitializeCounters(u8 *counters, uptr n) {
|
||||
if (!counters) return;
|
||||
CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
|
||||
n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
|
||||
SpinMutexLock l(&mu);
|
||||
counters_vec.push_back({counters, n});
|
||||
num_8bit_counters += n;
|
||||
}
|
||||
|
||||
void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
|
||||
uptr range_end) {
|
||||
auto sym = Symbolizer::GetOrInit();
|
||||
if (!sym)
|
||||
return;
|
||||
const char *module_name = sym->GetModuleNameForPc(caller_pc);
|
||||
if (!module_name) return;
|
||||
if (module_name_vec.empty() ||
|
||||
module_name_vec.back().copied_module_name != module_name)
|
||||
module_name_vec.push_back({module_name, range_beg, range_end});
|
||||
else
|
||||
module_name_vec.back().end = range_end;
|
||||
}
|
||||
|
||||
void CoverageData::InitializeGuards(s32 *guards, uptr n,
|
||||
const char *comp_unit_name,
|
||||
uptr caller_pc) {
|
||||
// The array 'guards' has n+1 elements, we use the element zero
|
||||
// to store 'n'.
|
||||
CHECK_LT(n, 1 << 30);
|
||||
guards[0] = static_cast<s32>(n);
|
||||
InitializeGuardArray(guards);
|
||||
SpinMutexLock l(&mu);
|
||||
uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
|
||||
uptr range_beg = range_end - n;
|
||||
comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
|
||||
guard_array_vec.push_back(guards);
|
||||
UpdateModuleNameVec(caller_pc, range_beg, range_end);
|
||||
}
|
||||
|
||||
static const uptr kBundleCounterBits = 16;
|
||||
|
||||
// When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
|
||||
// we insert the global counter into the first 16 bits of the PC.
|
||||
uptr BundlePcAndCounter(uptr pc, uptr counter) {
|
||||
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
|
||||
return pc;
|
||||
static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1;
|
||||
if (counter > kMaxCounter)
|
||||
counter = kMaxCounter;
|
||||
CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits));
|
||||
return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits));
|
||||
}
|
||||
|
||||
uptr UnbundlePc(uptr bundle) {
|
||||
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
|
||||
return bundle;
|
||||
return (bundle << kBundleCounterBits) >> kBundleCounterBits;
|
||||
}
|
||||
|
||||
uptr UnbundleCounter(uptr bundle) {
|
||||
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
|
||||
return 0;
|
||||
return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits);
|
||||
}
|
||||
|
||||
// If guard is negative, atomically set it to -guard and store the PC in
|
||||
// pc_array.
|
||||
void CoverageData::Add(uptr pc, u32 *guard) {
|
||||
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
|
||||
s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
|
||||
if (guard_value >= 0) return;
|
||||
|
||||
atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
|
||||
if (!pc_array) return;
|
||||
uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
|
||||
|
||||
uptr idx = -guard_value - 1;
|
||||
if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
|
||||
return; // May happen after fork when pc_array_index becomes 0.
|
||||
CHECK_LT(idx * sizeof(uptr),
|
||||
atomic_load(&pc_array_size, memory_order_acquire));
|
||||
pc_array[idx] = pc;
|
||||
uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
|
||||
pc_array[idx] = BundlePcAndCounter(pc, counter);
|
||||
}
|
||||
|
||||
// Registers a pair caller=>callee.
|
||||
|
@ -226,13 +432,73 @@ void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
|
|||
for (uptr i = 2; i < cache_size; i++) {
|
||||
uptr was = 0;
|
||||
if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
|
||||
memory_order_seq_cst))
|
||||
memory_order_seq_cst)) {
|
||||
atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
|
||||
return;
|
||||
}
|
||||
if (was == callee) // Already have this callee.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
uptr CoverageData::GetNumberOf8bitCounters() {
|
||||
return num_8bit_counters;
|
||||
}
|
||||
|
||||
// Map every 8bit counter to a 8-bit bitset and clear the counter.
|
||||
uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
|
||||
uptr num_new_bits = 0;
|
||||
uptr cur = 0;
|
||||
// For better speed we map 8 counters to 8 bytes of bitset at once.
|
||||
static const uptr kBatchSize = 8;
|
||||
CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
|
||||
for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
|
||||
u8 *c = counters_vec[i].counters;
|
||||
uptr n = counters_vec[i].n;
|
||||
CHECK_EQ(n % 16, 0);
|
||||
CHECK_EQ(cur % kBatchSize, 0);
|
||||
CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
|
||||
if (!bitset) {
|
||||
internal_bzero_aligned16(c, n);
|
||||
cur += n;
|
||||
continue;
|
||||
}
|
||||
for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
|
||||
CHECK_LT(cur, num_8bit_counters);
|
||||
u64 *pc64 = reinterpret_cast<u64*>(c + j);
|
||||
u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
|
||||
u64 c64 = *pc64;
|
||||
u64 old_bits_64 = *pb64;
|
||||
u64 new_bits_64 = old_bits_64;
|
||||
if (c64) {
|
||||
*pc64 = 0;
|
||||
for (uptr k = 0; k < kBatchSize; k++) {
|
||||
u64 x = (c64 >> (8 * k)) & 0xff;
|
||||
if (x) {
|
||||
u64 bit = 0;
|
||||
/**/ if (x >= 128) bit = 128;
|
||||
else if (x >= 32) bit = 64;
|
||||
else if (x >= 16) bit = 32;
|
||||
else if (x >= 8) bit = 16;
|
||||
else if (x >= 4) bit = 8;
|
||||
else if (x >= 3) bit = 4;
|
||||
else if (x >= 2) bit = 2;
|
||||
else if (x >= 1) bit = 1;
|
||||
u64 mask = bit << (8 * k);
|
||||
if (!(new_bits_64 & mask)) {
|
||||
num_new_bits++;
|
||||
new_bits_64 |= mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
*pb64 = new_bits_64;
|
||||
}
|
||||
}
|
||||
}
|
||||
CHECK_EQ(cur, num_8bit_counters);
|
||||
return num_new_bits;
|
||||
}
|
||||
|
||||
uptr *CoverageData::data() {
|
||||
return pc_array;
|
||||
}
|
||||
|
@ -251,15 +517,15 @@ struct CovHeader {
|
|||
|
||||
static void CovWritePacked(int pid, const char *module, const void *blob,
|
||||
unsigned int blob_size) {
|
||||
if (cov_fd < 0) return;
|
||||
if (cov_fd == kInvalidFd) return;
|
||||
unsigned module_name_length = internal_strlen(module);
|
||||
CovHeader header = {pid, module_name_length, blob_size};
|
||||
|
||||
if (cov_max_block_size == 0) {
|
||||
// Writing to a file. Just go ahead.
|
||||
internal_write(cov_fd, &header, sizeof(header));
|
||||
internal_write(cov_fd, module, module_name_length);
|
||||
internal_write(cov_fd, blob, blob_size);
|
||||
WriteToFile(cov_fd, &header, sizeof(header));
|
||||
WriteToFile(cov_fd, module, module_name_length);
|
||||
WriteToFile(cov_fd, blob, blob_size);
|
||||
} else {
|
||||
// Writing to a socket. We want to split the data into appropriately sized
|
||||
// blocks.
|
||||
|
@ -275,15 +541,14 @@ static void CovWritePacked(int pid, const char *module, const void *blob,
|
|||
internal_memcpy(block_pos, module, module_name_length);
|
||||
block_pos += module_name_length;
|
||||
char *block_data_begin = block_pos;
|
||||
char *blob_pos = (char *)blob;
|
||||
const char *blob_pos = (const char *)blob;
|
||||
while (blob_size > 0) {
|
||||
unsigned int payload_size = Min(blob_size, max_payload_size);
|
||||
blob_size -= payload_size;
|
||||
internal_memcpy(block_data_begin, blob_pos, payload_size);
|
||||
blob_pos += payload_size;
|
||||
((CovHeader *)block.data())->data_length = payload_size;
|
||||
internal_write(cov_fd, block.data(),
|
||||
header_size_with_module + payload_size);
|
||||
WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -292,29 +557,77 @@ static void CovWritePacked(int pid, const char *module, const void *blob,
|
|||
// If packed = true and name == 0: <pid>.<sancov>.<packed>.
|
||||
// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
|
||||
// user-supplied).
|
||||
static int CovOpenFile(bool packed, const char* name) {
|
||||
InternalScopedBuffer<char> path(1024);
|
||||
static fd_t CovOpenFile(InternalScopedString *path, bool packed,
|
||||
const char *name, const char *extension = "sancov") {
|
||||
path->clear();
|
||||
if (!packed) {
|
||||
CHECK(name);
|
||||
internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
|
||||
common_flags()->coverage_dir, name, internal_getpid());
|
||||
path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
|
||||
extension);
|
||||
} else {
|
||||
if (!name)
|
||||
internal_snprintf((char *)path.data(), path.size(),
|
||||
"%s/%zd.sancov.packed", common_flags()->coverage_dir,
|
||||
internal_getpid());
|
||||
path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
|
||||
extension);
|
||||
else
|
||||
internal_snprintf((char *)path.data(), path.size(), "%s/%s.sancov.packed",
|
||||
common_flags()->coverage_dir, name);
|
||||
}
|
||||
uptr fd = OpenFile(path.data(), true);
|
||||
if (internal_iserror(fd)) {
|
||||
Report(" SanitizerCoverage: failed to open %s for writing\n", path.data());
|
||||
return -1;
|
||||
path->append("%s/%s.%s.packed", coverage_dir, name, extension);
|
||||
}
|
||||
error_t err;
|
||||
fd_t fd = OpenFile(path->data(), WrOnly, &err);
|
||||
if (fd == kInvalidFd)
|
||||
Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
|
||||
path->data(), err);
|
||||
return fd;
|
||||
}
|
||||
|
||||
// Dump trace PCs and trace events into two separate files.
|
||||
void CoverageData::DumpTrace() {
|
||||
uptr max_idx = tr_event_pointer - tr_event_array;
|
||||
if (!max_idx) return;
|
||||
auto sym = Symbolizer::GetOrInit();
|
||||
if (!sym)
|
||||
return;
|
||||
InternalScopedString out(32 << 20);
|
||||
for (uptr i = 0, n = size(); i < n; i++) {
|
||||
const char *module_name = "<unknown>";
|
||||
uptr module_address = 0;
|
||||
sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name,
|
||||
&module_address);
|
||||
out.append("%s 0x%zx\n", module_name, module_address);
|
||||
}
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
fd_t fd = CovOpenFile(&path, false, "trace-points");
|
||||
if (fd == kInvalidFd) return;
|
||||
WriteToFile(fd, out.data(), out.length());
|
||||
CloseFile(fd);
|
||||
|
||||
fd = CovOpenFile(&path, false, "trace-compunits");
|
||||
if (fd == kInvalidFd) return;
|
||||
out.clear();
|
||||
for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
|
||||
out.append("%s\n", comp_unit_name_vec[i].copied_module_name);
|
||||
WriteToFile(fd, out.data(), out.length());
|
||||
CloseFile(fd);
|
||||
|
||||
fd = CovOpenFile(&path, false, "trace-events");
|
||||
if (fd == kInvalidFd) return;
|
||||
uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
|
||||
u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
|
||||
// The trace file could be huge, and may not be written with a single syscall.
|
||||
while (bytes_to_write) {
|
||||
uptr actually_written;
|
||||
if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) &&
|
||||
actually_written <= bytes_to_write) {
|
||||
bytes_to_write -= actually_written;
|
||||
event_bytes += actually_written;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
CloseFile(fd);
|
||||
VReport(1, " CovDump: Trace: %zd PCs written\n", size());
|
||||
VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
|
||||
}
|
||||
|
||||
// This function dumps the caller=>callee pairs into a file as a sequence of
|
||||
// lines like "module_name offset".
|
||||
void CoverageData::DumpCallerCalleePairs() {
|
||||
|
@ -347,88 +660,166 @@ void CoverageData::DumpCallerCalleePairs() {
|
|||
callee_module_address);
|
||||
}
|
||||
}
|
||||
int fd = CovOpenFile(false, "caller-callee");
|
||||
if (fd < 0) return;
|
||||
internal_write(fd, out.data(), out.length());
|
||||
internal_close(fd);
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
fd_t fd = CovOpenFile(&path, false, "caller-callee");
|
||||
if (fd == kInvalidFd) return;
|
||||
WriteToFile(fd, out.data(), out.length());
|
||||
CloseFile(fd);
|
||||
VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
|
||||
}
|
||||
|
||||
// Dump the coverage on disk.
|
||||
static void CovDump() {
|
||||
if (!common_flags()->coverage || common_flags()->coverage_direct) return;
|
||||
#if !SANITIZER_WINDOWS
|
||||
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
|
||||
return;
|
||||
uptr size = coverage_data.size();
|
||||
InternalMmapVector<u32> offsets(size);
|
||||
uptr *vb = coverage_data.data();
|
||||
uptr *ve = vb + size;
|
||||
SortArray(vb, size);
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
uptr mb, me, off, prot;
|
||||
InternalScopedBuffer<char> module(4096);
|
||||
InternalScopedBuffer<char> path(4096 * 2);
|
||||
for (int i = 0;
|
||||
proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
|
||||
i++) {
|
||||
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
|
||||
continue;
|
||||
while (vb < ve && *vb < mb) vb++;
|
||||
if (vb >= ve) break;
|
||||
if (*vb < me) {
|
||||
offsets.clear();
|
||||
const uptr *old_vb = vb;
|
||||
CHECK_LE(off, *vb);
|
||||
for (; vb < ve && *vb < me; vb++) {
|
||||
uptr diff = *vb - (i ? mb : 0) + off;
|
||||
CHECK_LE(diff, 0xffffffffU);
|
||||
offsets.push_back(static_cast<u32>(diff));
|
||||
}
|
||||
const char *module_name = StripModuleName(module.data());
|
||||
if (cov_sandboxed) {
|
||||
if (cov_fd >= 0) {
|
||||
CovWritePacked(internal_getpid(), module_name, offsets.data(),
|
||||
offsets.size() * sizeof(u32));
|
||||
VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb);
|
||||
}
|
||||
} else {
|
||||
// One file per module per process.
|
||||
internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
|
||||
common_flags()->coverage_dir, module_name,
|
||||
internal_getpid());
|
||||
int fd = CovOpenFile(false /* packed */, module_name);
|
||||
if (fd > 0) {
|
||||
internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
|
||||
internal_close(fd);
|
||||
VReport(1, " CovDump: %s: %zd PCs written\n", path.data(),
|
||||
vb - old_vb);
|
||||
}
|
||||
// Record the current PC into the event buffer.
|
||||
// Every event is a u32 value (index in tr_pc_array_index) so we compute
|
||||
// it once and then cache in the provided 'cache' storage.
|
||||
//
|
||||
// This function will eventually be inlined by the compiler.
|
||||
void CoverageData::TraceBasicBlock(s32 *id) {
|
||||
// Will trap here if
|
||||
// 1. coverage is not enabled at run-time.
|
||||
// 2. The array tr_event_array is full.
|
||||
*tr_event_pointer = static_cast<u32>(*id - 1);
|
||||
tr_event_pointer++;
|
||||
}
|
||||
|
||||
void CoverageData::DumpCounters() {
|
||||
if (!common_flags()->coverage_counters) return;
|
||||
uptr n = coverage_data.GetNumberOf8bitCounters();
|
||||
if (!n) return;
|
||||
InternalScopedBuffer<u8> bitset(n);
|
||||
coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data());
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
|
||||
for (uptr m = 0; m < module_name_vec.size(); m++) {
|
||||
auto r = module_name_vec[m];
|
||||
CHECK(r.copied_module_name);
|
||||
CHECK_LE(r.beg, r.end);
|
||||
CHECK_LE(r.end, size());
|
||||
const char *base_name = StripModuleName(r.copied_module_name);
|
||||
fd_t fd =
|
||||
CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov");
|
||||
if (fd == kInvalidFd) return;
|
||||
WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg);
|
||||
CloseFile(fd);
|
||||
VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg,
|
||||
base_name);
|
||||
}
|
||||
}
|
||||
|
||||
void CoverageData::DumpAsBitSet() {
|
||||
if (!common_flags()->coverage_bitset) return;
|
||||
if (!size()) return;
|
||||
InternalScopedBuffer<char> out(size());
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
for (uptr m = 0; m < module_name_vec.size(); m++) {
|
||||
uptr n_set_bits = 0;
|
||||
auto r = module_name_vec[m];
|
||||
CHECK(r.copied_module_name);
|
||||
CHECK_LE(r.beg, r.end);
|
||||
CHECK_LE(r.end, size());
|
||||
for (uptr i = r.beg; i < r.end; i++) {
|
||||
uptr pc = UnbundlePc(pc_array[i]);
|
||||
out[i] = pc ? '1' : '0';
|
||||
if (pc)
|
||||
n_set_bits++;
|
||||
}
|
||||
const char *base_name = StripModuleName(r.copied_module_name);
|
||||
fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov");
|
||||
if (fd == kInvalidFd) return;
|
||||
WriteToFile(fd, out.data() + r.beg, r.end - r.beg);
|
||||
CloseFile(fd);
|
||||
VReport(1,
|
||||
" CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
|
||||
r.end - r.beg, base_name, n_set_bits);
|
||||
}
|
||||
}
|
||||
|
||||
void CoverageData::DumpOffsets() {
|
||||
auto sym = Symbolizer::GetOrInit();
|
||||
if (!common_flags()->coverage_pcs) return;
|
||||
CHECK_NE(sym, nullptr);
|
||||
InternalMmapVector<uptr> offsets(0);
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
for (uptr m = 0; m < module_name_vec.size(); m++) {
|
||||
offsets.clear();
|
||||
uptr num_words_for_magic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
|
||||
for (uptr i = 0; i < num_words_for_magic; i++)
|
||||
offsets.push_back(0);
|
||||
auto r = module_name_vec[m];
|
||||
CHECK(r.copied_module_name);
|
||||
CHECK_LE(r.beg, r.end);
|
||||
CHECK_LE(r.end, size());
|
||||
for (uptr i = r.beg; i < r.end; i++) {
|
||||
uptr pc = UnbundlePc(pc_array[i]);
|
||||
uptr counter = UnbundleCounter(pc_array[i]);
|
||||
if (!pc) continue; // Not visited.
|
||||
uptr offset = 0;
|
||||
sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
|
||||
offsets.push_back(BundlePcAndCounter(offset, counter));
|
||||
}
|
||||
|
||||
CHECK_GE(offsets.size(), num_words_for_magic);
|
||||
SortArray(offsets.data(), offsets.size());
|
||||
for (uptr i = 0; i < offsets.size(); i++)
|
||||
offsets[i] = UnbundlePc(offsets[i]);
|
||||
|
||||
uptr num_offsets = offsets.size() - num_words_for_magic;
|
||||
u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
|
||||
CHECK_EQ(*magic_p, 0ULL);
|
||||
// FIXME: we may want to write 32-bit offsets even in 64-mode
|
||||
// if all the offsets are small enough.
|
||||
*magic_p = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
|
||||
|
||||
const char *module_name = StripModuleName(r.copied_module_name);
|
||||
if (cov_sandboxed) {
|
||||
if (cov_fd != kInvalidFd) {
|
||||
CovWritePacked(internal_getpid(), module_name, offsets.data(),
|
||||
offsets.size() * sizeof(offsets[0]));
|
||||
VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets);
|
||||
}
|
||||
} else {
|
||||
// One file per module per process.
|
||||
fd_t fd = CovOpenFile(&path, false /* packed */, module_name);
|
||||
if (fd == kInvalidFd) continue;
|
||||
WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
|
||||
CloseFile(fd);
|
||||
VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
|
||||
}
|
||||
}
|
||||
if (cov_fd >= 0)
|
||||
internal_close(cov_fd);
|
||||
coverage_data.DumpCallerCalleePairs();
|
||||
#endif // !SANITIZER_WINDOWS
|
||||
if (cov_fd != kInvalidFd)
|
||||
CloseFile(cov_fd);
|
||||
}
|
||||
|
||||
void CoverageData::DumpAll() {
|
||||
if (!coverage_enabled || common_flags()->coverage_direct) return;
|
||||
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
|
||||
return;
|
||||
DumpAsBitSet();
|
||||
DumpCounters();
|
||||
DumpTrace();
|
||||
DumpOffsets();
|
||||
DumpCallerCalleePairs();
|
||||
}
|
||||
|
||||
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
|
||||
if (!args) return;
|
||||
if (!common_flags()->coverage) return;
|
||||
if (!coverage_enabled) return;
|
||||
cov_sandboxed = args->coverage_sandboxed;
|
||||
if (!cov_sandboxed) return;
|
||||
cov_fd = args->coverage_fd;
|
||||
cov_max_block_size = args->coverage_max_block_size;
|
||||
if (cov_fd < 0)
|
||||
if (args->coverage_fd >= 0) {
|
||||
cov_fd = (fd_t)args->coverage_fd;
|
||||
} else {
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
// Pre-open the file now. The sandbox won't allow us to do it later.
|
||||
cov_fd = CovOpenFile(true /* packed */, 0);
|
||||
cov_fd = CovOpenFile(&path, true /* packed */, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
int MaybeOpenCovFile(const char *name) {
|
||||
fd_t MaybeOpenCovFile(const char *name) {
|
||||
CHECK(name);
|
||||
if (!common_flags()->coverage) return -1;
|
||||
return CovOpenFile(true /* packed */, name);
|
||||
if (!coverage_enabled) return kInvalidFd;
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
return CovOpenFile(&path, true /* packed */, name);
|
||||
}
|
||||
|
||||
void CovBeforeFork() {
|
||||
|
@ -439,32 +830,114 @@ void CovAfterFork(int child_pid) {
|
|||
coverage_data.AfterFork(child_pid);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
static void MaybeDumpCoverage() {
|
||||
if (common_flags()->coverage)
|
||||
__sanitizer_cov_dump();
|
||||
}
|
||||
|
||||
void InitializeCoverage(bool enabled, const char *dir) {
|
||||
if (coverage_enabled)
|
||||
return; // May happen if two sanitizer enable coverage in the same process.
|
||||
coverage_enabled = enabled;
|
||||
coverage_dir = dir;
|
||||
coverage_data.Init();
|
||||
if (enabled) coverage_data.Enable();
|
||||
if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
|
||||
AddDieCallback(MaybeDumpCoverage);
|
||||
}
|
||||
|
||||
void ReInitializeCoverage(bool enabled, const char *dir) {
|
||||
coverage_enabled = enabled;
|
||||
coverage_dir = dir;
|
||||
coverage_data.ReInit();
|
||||
}
|
||||
|
||||
void CoverageUpdateMapping() {
|
||||
if (coverage_enabled)
|
||||
CovUpdateMapping(coverage_dir);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
|
||||
coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
|
||||
coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
|
||||
guard);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
|
||||
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
|
||||
if (static_cast<s32>(
|
||||
__sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
|
||||
__sanitizer_cov(guard);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void
|
||||
__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
|
||||
coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
|
||||
callee, callee_cache16, 16);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
|
||||
coverage_enabled = true;
|
||||
coverage_dir = common_flags()->coverage_dir;
|
||||
coverage_data.Init();
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(uptr npcs) {
|
||||
if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
|
||||
if (SANITIZER_ANDROID) {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
|
||||
coverage_data.DumpAll();
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void
|
||||
__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
|
||||
const char *comp_unit_name) {
|
||||
coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
|
||||
coverage_data.InitializeCounters(counters, npcs);
|
||||
if (!common_flags()->coverage_direct) return;
|
||||
if (SANITIZER_ANDROID && coverage_enabled) {
|
||||
// dlopen/dlclose interceptors do not work on Android, so we rely on
|
||||
// Extend() calls to update .sancov.map.
|
||||
CovUpdateMapping(GET_CALLER_PC());
|
||||
CovUpdateMapping(coverage_dir, GET_CALLER_PC());
|
||||
}
|
||||
coverage_data.Extend(npcs);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
sptr __sanitizer_maybe_open_cov_file(const char *name) {
|
||||
return MaybeOpenCovFile(name);
|
||||
return (sptr)MaybeOpenCovFile(name);
|
||||
}
|
||||
} // extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_total_unique_coverage() {
|
||||
return atomic_load(&coverage_counter, memory_order_relaxed);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_func_enter(s32 *id) {
|
||||
coverage_data.TraceBasicBlock(id);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_basic_block(s32 *id) {
|
||||
coverage_data.TraceBasicBlock(id);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_reset_coverage() {
|
||||
coverage_data.ReinitializeGuards();
|
||||
internal_bzero_aligned16(
|
||||
coverage_data.data(),
|
||||
RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_coverage_guards(uptr **data) {
|
||||
*data = coverage_data.data();
|
||||
return coverage_data.size();
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_number_of_counters() {
|
||||
return coverage_data.GetNumberOf8bitCounters();
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
|
||||
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
|
||||
}
|
||||
// Default empty implementations (weak). Users should redefine them.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_switch() {}
|
||||
} // extern "C"
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
|
||||
namespace __sanitizer {
|
||||
|
||||
static const uptr kMaxNumberOfModules = 1 << 14;
|
||||
static const uptr kMaxTextSize = 64 * 1024;
|
||||
|
||||
struct CachedMapping {
|
||||
|
@ -60,8 +59,8 @@ struct CachedMapping {
|
|||
static CachedMapping cached_mapping;
|
||||
static StaticSpinMutex mapping_mu;
|
||||
|
||||
void CovUpdateMapping(uptr caller_pc) {
|
||||
if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
|
||||
void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
|
||||
if (!common_flags()->coverage_direct) return;
|
||||
|
||||
SpinMutexLock l(&mapping_mu);
|
||||
|
||||
|
@ -69,57 +68,58 @@ void CovUpdateMapping(uptr caller_pc) {
|
|||
return;
|
||||
|
||||
InternalScopedString text(kMaxTextSize);
|
||||
InternalScopedBuffer<char> modules_data(kMaxNumberOfModules *
|
||||
sizeof(LoadedModule));
|
||||
LoadedModule *modules = (LoadedModule *)modules_data.data();
|
||||
CHECK(modules);
|
||||
int n_modules = GetListOfModules(modules, kMaxNumberOfModules,
|
||||
/* filter */ 0);
|
||||
|
||||
text.append("%d\n", sizeof(uptr) * 8);
|
||||
for (int i = 0; i < n_modules; ++i) {
|
||||
const char *module_name = StripModuleName(modules[i].full_name());
|
||||
for (unsigned j = 0; j < modules[i].n_ranges(); ++j) {
|
||||
if (modules[i].address_range_executable(j)) {
|
||||
uptr start = modules[i].address_range_start(j);
|
||||
uptr end = modules[i].address_range_end(j);
|
||||
uptr base = modules[i].base_address();
|
||||
text.append("%zx %zx %zx %s\n", start, end, base, module_name);
|
||||
if (caller_pc && caller_pc >= start && caller_pc < end)
|
||||
cached_mapping.SetModuleRange(start, end);
|
||||
{
|
||||
InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
|
||||
CHECK(modules.data());
|
||||
int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
|
||||
/* filter */ nullptr);
|
||||
|
||||
text.append("%d\n", sizeof(uptr) * 8);
|
||||
for (int i = 0; i < n_modules; ++i) {
|
||||
const char *module_name = StripModuleName(modules[i].full_name());
|
||||
uptr base = modules[i].base_address();
|
||||
for (auto iter = modules[i].ranges(); iter.hasNext();) {
|
||||
const auto *range = iter.next();
|
||||
if (range->executable) {
|
||||
uptr start = range->beg;
|
||||
uptr end = range->end;
|
||||
text.append("%zx %zx %zx %s\n", start, end, base, module_name);
|
||||
if (caller_pc && caller_pc >= start && caller_pc < end)
|
||||
cached_mapping.SetModuleRange(start, end);
|
||||
}
|
||||
}
|
||||
modules[i].clear();
|
||||
}
|
||||
}
|
||||
|
||||
int err;
|
||||
InternalScopedString tmp_path(64 +
|
||||
internal_strlen(common_flags()->coverage_dir));
|
||||
error_t err;
|
||||
InternalScopedString tmp_path(64 + internal_strlen(coverage_dir));
|
||||
uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
|
||||
"%s/%zd.sancov.map.tmp", common_flags()->coverage_dir,
|
||||
internal_getpid());
|
||||
"%s/%zd.sancov.map.tmp", coverage_dir,
|
||||
internal_getpid());
|
||||
CHECK_LE(res, tmp_path.size());
|
||||
uptr map_fd = OpenFile(tmp_path.data(), true);
|
||||
if (internal_iserror(map_fd)) {
|
||||
Report(" Coverage: failed to open %s for writing\n", tmp_path.data());
|
||||
fd_t map_fd = OpenFile(tmp_path.data(), WrOnly, &err);
|
||||
if (map_fd == kInvalidFd) {
|
||||
Report("Coverage: failed to open %s for writing: %d\n", tmp_path.data(),
|
||||
err);
|
||||
Die();
|
||||
}
|
||||
|
||||
res = internal_write(map_fd, text.data(), text.length());
|
||||
if (internal_iserror(res, &err)) {
|
||||
if (!WriteToFile(map_fd, text.data(), text.length(), nullptr, &err)) {
|
||||
Printf("sancov.map write failed: %d\n", err);
|
||||
Die();
|
||||
}
|
||||
internal_close(map_fd);
|
||||
CloseFile(map_fd);
|
||||
|
||||
InternalScopedString path(64 + internal_strlen(common_flags()->coverage_dir));
|
||||
InternalScopedString path(64 + internal_strlen(coverage_dir));
|
||||
res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
|
||||
common_flags()->coverage_dir, internal_getpid());
|
||||
coverage_dir, internal_getpid());
|
||||
CHECK_LE(res, path.size());
|
||||
res = internal_rename(tmp_path.data(), path.data());
|
||||
if (internal_iserror(res, &err)) {
|
||||
if (!RenameFile(tmp_path.data(), path.data(), &err)) {
|
||||
Printf("sancov.map rename failed: %d\n", err);
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
} // namespace __sanitizer
|
||||
|
|
|
@ -38,19 +38,20 @@ struct DD : public DDetector {
|
|||
|
||||
explicit DD(const DDFlags *flags);
|
||||
|
||||
DDPhysicalThread* CreatePhysicalThread();
|
||||
void DestroyPhysicalThread(DDPhysicalThread *pt);
|
||||
DDPhysicalThread *CreatePhysicalThread() override;
|
||||
void DestroyPhysicalThread(DDPhysicalThread *pt) override;
|
||||
|
||||
DDLogicalThread* CreateLogicalThread(u64 ctx);
|
||||
void DestroyLogicalThread(DDLogicalThread *lt);
|
||||
DDLogicalThread *CreateLogicalThread(u64 ctx) override;
|
||||
void DestroyLogicalThread(DDLogicalThread *lt) override;
|
||||
|
||||
void MutexInit(DDCallback *cb, DDMutex *m);
|
||||
void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
|
||||
void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock);
|
||||
void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
|
||||
void MutexDestroy(DDCallback *cb, DDMutex *m);
|
||||
void MutexInit(DDCallback *cb, DDMutex *m) override;
|
||||
void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
|
||||
void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
|
||||
bool trylock) override;
|
||||
void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
|
||||
void MutexDestroy(DDCallback *cb, DDMutex *m) override;
|
||||
|
||||
DDReport *GetReport(DDCallback *cb);
|
||||
DDReport *GetReport(DDCallback *cb) override;
|
||||
|
||||
void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
|
||||
void ReportDeadlock(DDCallback *cb, DDMutex *m);
|
||||
|
@ -68,7 +69,7 @@ DD::DD(const DDFlags *flags)
|
|||
}
|
||||
|
||||
DDPhysicalThread* DD::CreatePhysicalThread() {
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
|
||||
|
@ -178,10 +179,10 @@ void DD::MutexDestroy(DDCallback *cb,
|
|||
|
||||
DDReport *DD::GetReport(DDCallback *cb) {
|
||||
if (!cb->lt->report_pending)
|
||||
return 0;
|
||||
return nullptr;
|
||||
cb->lt->report_pending = false;
|
||||
return &cb->lt->rep;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
|
||||
} // namespace __sanitizer
|
||||
#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
|
||||
|
|
|
@ -70,10 +70,10 @@ struct DDCallback {
|
|||
struct DDetector {
|
||||
static DDetector *Create(const DDFlags *flags);
|
||||
|
||||
virtual DDPhysicalThread* CreatePhysicalThread() { return 0; }
|
||||
virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; }
|
||||
virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
|
||||
|
||||
virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return 0; }
|
||||
virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; }
|
||||
virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
|
||||
|
||||
virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
|
||||
|
@ -83,7 +83,7 @@ struct DDetector {
|
|||
virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
|
||||
virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
|
||||
|
||||
virtual DDReport *GetReport(DDCallback *cb) { return 0; }
|
||||
virtual DDReport *GetReport(DDCallback *cb) { return nullptr; }
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
|
169
libsanitizer/sanitizer_common/sanitizer_flag_parser.cc
Normal file
169
libsanitizer/sanitizer_common/sanitizer_flag_parser.cc
Normal file
|
@ -0,0 +1,169 @@
|
|||
//===-- sanitizer_flag_parser.cc ------------------------------------------===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_flag_parser.h"
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_flag_parser.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
LowLevelAllocator FlagParser::Alloc;
|
||||
|
||||
class UnknownFlags {
|
||||
static const int kMaxUnknownFlags = 20;
|
||||
const char *unknown_flags_[kMaxUnknownFlags];
|
||||
int n_unknown_flags_;
|
||||
|
||||
public:
|
||||
void Add(const char *name) {
|
||||
CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
|
||||
unknown_flags_[n_unknown_flags_++] = name;
|
||||
}
|
||||
|
||||
void Report() {
|
||||
if (!n_unknown_flags_) return;
|
||||
Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
|
||||
for (int i = 0; i < n_unknown_flags_; ++i)
|
||||
Printf(" %s\n", unknown_flags_[i]);
|
||||
n_unknown_flags_ = 0;
|
||||
}
|
||||
};
|
||||
|
||||
UnknownFlags unknown_flags;
|
||||
|
||||
void ReportUnrecognizedFlags() {
|
||||
unknown_flags.Report();
|
||||
}
|
||||
|
||||
char *FlagParser::ll_strndup(const char *s, uptr n) {
|
||||
uptr len = internal_strnlen(s, n);
|
||||
char *s2 = (char*)Alloc.Allocate(len + 1);
|
||||
internal_memcpy(s2, s, len);
|
||||
s2[len] = 0;
|
||||
return s2;
|
||||
}
|
||||
|
||||
void FlagParser::PrintFlagDescriptions() {
|
||||
Printf("Available flags for %s:\n", SanitizerToolName);
|
||||
for (int i = 0; i < n_flags_; ++i)
|
||||
Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc);
|
||||
}
|
||||
|
||||
void FlagParser::fatal_error(const char *err) {
|
||||
Printf("ERROR: %s\n", err);
|
||||
Die();
|
||||
}
|
||||
|
||||
bool FlagParser::is_space(char c) {
|
||||
return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
|
||||
c == '\r';
|
||||
}
|
||||
|
||||
void FlagParser::skip_whitespace() {
|
||||
while (is_space(buf_[pos_])) ++pos_;
|
||||
}
|
||||
|
||||
void FlagParser::parse_flag() {
|
||||
uptr name_start = pos_;
|
||||
while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
|
||||
if (buf_[pos_] != '=') fatal_error("expected '='");
|
||||
char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
|
||||
|
||||
uptr value_start = ++pos_;
|
||||
char *value;
|
||||
if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
|
||||
char quote = buf_[pos_++];
|
||||
while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
|
||||
if (buf_[pos_] == 0) fatal_error("unterminated string");
|
||||
value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
|
||||
++pos_; // consume the closing quote
|
||||
} else {
|
||||
while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
|
||||
if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
|
||||
fatal_error("expected separator or eol");
|
||||
value = ll_strndup(buf_ + value_start, pos_ - value_start);
|
||||
}
|
||||
|
||||
bool res = run_handler(name, value);
|
||||
if (!res) fatal_error("Flag parsing failed.");
|
||||
}
|
||||
|
||||
void FlagParser::parse_flags() {
|
||||
while (true) {
|
||||
skip_whitespace();
|
||||
if (buf_[pos_] == 0) break;
|
||||
parse_flag();
|
||||
}
|
||||
|
||||
// Do a sanity check for certain flags.
|
||||
if (common_flags_dont_use.malloc_context_size < 1)
|
||||
common_flags_dont_use.malloc_context_size = 1;
|
||||
}
|
||||
|
||||
void FlagParser::ParseString(const char *s) {
|
||||
if (!s) return;
|
||||
// Backup current parser state to allow nested ParseString() calls.
|
||||
const char *old_buf_ = buf_;
|
||||
uptr old_pos_ = pos_;
|
||||
buf_ = s;
|
||||
pos_ = 0;
|
||||
|
||||
parse_flags();
|
||||
|
||||
buf_ = old_buf_;
|
||||
pos_ = old_pos_;
|
||||
}
|
||||
|
||||
bool FlagParser::ParseFile(const char *path, bool ignore_missing) {
|
||||
static const uptr kMaxIncludeSize = 1 << 15;
|
||||
char *data;
|
||||
uptr data_mapped_size;
|
||||
error_t err;
|
||||
uptr len;
|
||||
if (!ReadFileToBuffer(path, &data, &data_mapped_size, &len,
|
||||
Max(kMaxIncludeSize, GetPageSizeCached()), &err)) {
|
||||
if (ignore_missing)
|
||||
return true;
|
||||
Printf("Failed to read options from '%s': error %d\n", path, err);
|
||||
return false;
|
||||
}
|
||||
ParseString(data);
|
||||
UnmapOrDie(data, data_mapped_size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FlagParser::run_handler(const char *name, const char *value) {
|
||||
for (int i = 0; i < n_flags_; ++i) {
|
||||
if (internal_strcmp(name, flags_[i].name) == 0)
|
||||
return flags_[i].handler->Parse(value);
|
||||
}
|
||||
// Unrecognized flag. This is not a fatal error, we may print a warning later.
|
||||
unknown_flags.Add(name);
|
||||
return true;
|
||||
}
|
||||
|
||||
void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
|
||||
const char *desc) {
|
||||
CHECK_LT(n_flags_, kMaxFlags);
|
||||
flags_[n_flags_].name = name;
|
||||
flags_[n_flags_].desc = desc;
|
||||
flags_[n_flags_].handler = handler;
|
||||
++n_flags_;
|
||||
}
|
||||
|
||||
FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
|
||||
flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
120
libsanitizer/sanitizer_common/sanitizer_flag_parser.h
Normal file
120
libsanitizer/sanitizer_common/sanitizer_flag_parser.h
Normal file
|
@ -0,0 +1,120 @@
|
|||
//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_FLAG_REGISTRY_H
|
||||
#define SANITIZER_FLAG_REGISTRY_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class FlagHandlerBase {
|
||||
public:
|
||||
virtual bool Parse(const char *value) { return false; }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class FlagHandler : public FlagHandlerBase {
|
||||
T *t_;
|
||||
|
||||
public:
|
||||
explicit FlagHandler(T *t) : t_(t) {}
|
||||
bool Parse(const char *value) final;
|
||||
};
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<bool>::Parse(const char *value) {
|
||||
if (internal_strcmp(value, "0") == 0 ||
|
||||
internal_strcmp(value, "no") == 0 ||
|
||||
internal_strcmp(value, "false") == 0) {
|
||||
*t_ = false;
|
||||
return true;
|
||||
}
|
||||
if (internal_strcmp(value, "1") == 0 ||
|
||||
internal_strcmp(value, "yes") == 0 ||
|
||||
internal_strcmp(value, "true") == 0) {
|
||||
*t_ = true;
|
||||
return true;
|
||||
}
|
||||
Printf("ERROR: Invalid value for bool option: '%s'\n", value);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<const char *>::Parse(const char *value) {
|
||||
*t_ = internal_strdup(value);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<int>::Parse(const char *value) {
|
||||
char *value_end;
|
||||
*t_ = internal_simple_strtoll(value, &value_end, 10);
|
||||
bool ok = *value_end == 0;
|
||||
if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
|
||||
return ok;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<uptr>::Parse(const char *value) {
|
||||
char *value_end;
|
||||
*t_ = internal_simple_strtoll(value, &value_end, 10);
|
||||
bool ok = *value_end == 0;
|
||||
if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
|
||||
return ok;
|
||||
}
|
||||
|
||||
class FlagParser {
|
||||
static const int kMaxFlags = 200;
|
||||
struct Flag {
|
||||
const char *name;
|
||||
const char *desc;
|
||||
FlagHandlerBase *handler;
|
||||
} *flags_;
|
||||
int n_flags_;
|
||||
|
||||
const char *buf_;
|
||||
uptr pos_;
|
||||
|
||||
public:
|
||||
FlagParser();
|
||||
void RegisterHandler(const char *name, FlagHandlerBase *handler,
|
||||
const char *desc);
|
||||
void ParseString(const char *s);
|
||||
bool ParseFile(const char *path, bool ignore_missing);
|
||||
void PrintFlagDescriptions();
|
||||
|
||||
static LowLevelAllocator Alloc;
|
||||
|
||||
private:
|
||||
void fatal_error(const char *err);
|
||||
bool is_space(char c);
|
||||
void skip_whitespace();
|
||||
void parse_flags();
|
||||
void parse_flag();
|
||||
bool run_handler(const char *name, const char *value);
|
||||
char *ll_strndup(const char *s, uptr n);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
|
||||
T *var) {
|
||||
FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var); // NOLINT
|
||||
parser->RegisterHandler(name, fh, desc);
|
||||
}
|
||||
|
||||
void ReportUnrecognizedFlags();
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FLAG_REGISTRY_H
|
|
@ -14,6 +14,7 @@
|
|||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_list.h"
|
||||
#include "sanitizer_flag_parser.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
|
@ -32,274 +33,68 @@ IntrusiveList<FlagDescription> flag_descriptions;
|
|||
# define SANITIZER_NEEDS_SEGV 1
|
||||
#endif
|
||||
|
||||
void SetCommonFlagsDefaults(CommonFlags *f) {
|
||||
f->symbolize = true;
|
||||
f->external_symbolizer_path = 0;
|
||||
f->allow_addr2line = false;
|
||||
f->strip_path_prefix = "";
|
||||
f->fast_unwind_on_check = false;
|
||||
f->fast_unwind_on_fatal = false;
|
||||
f->fast_unwind_on_malloc = true;
|
||||
f->handle_ioctl = false;
|
||||
f->malloc_context_size = 1;
|
||||
f->log_path = "stderr";
|
||||
f->verbosity = 0;
|
||||
f->detect_leaks = true;
|
||||
f->leak_check_at_exit = true;
|
||||
f->allocator_may_return_null = false;
|
||||
f->print_summary = true;
|
||||
f->check_printf = true;
|
||||
// TODO(glider): tools may want to set different defaults for handle_segv.
|
||||
f->handle_segv = SANITIZER_NEEDS_SEGV;
|
||||
f->allow_user_segv_handler = false;
|
||||
f->use_sigaltstack = true;
|
||||
f->detect_deadlocks = false;
|
||||
f->clear_shadow_mmap_threshold = 64 * 1024;
|
||||
f->color = "auto";
|
||||
f->legacy_pthread_cond = false;
|
||||
f->intercept_tls_get_addr = false;
|
||||
f->coverage = false;
|
||||
f->coverage_direct = SANITIZER_ANDROID;
|
||||
f->coverage_dir = ".";
|
||||
f->full_address_space = false;
|
||||
f->suppressions = "";
|
||||
f->print_suppressions = true;
|
||||
f->disable_coredump = (SANITIZER_WORDSIZE == 64);
|
||||
f->symbolize_inline_frames = true;
|
||||
f->stack_trace_format = "DEFAULT";
|
||||
void CommonFlags::SetDefaults() {
|
||||
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||
#include "sanitizer_flags.inc"
|
||||
#undef COMMON_FLAG
|
||||
}
|
||||
|
||||
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
|
||||
ParseFlag(str, &f->symbolize, "symbolize",
|
||||
"If set, use the online symbolizer from common sanitizer runtime to turn "
|
||||
"virtual addresses to file/line locations.");
|
||||
ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path",
|
||||
"Path to external symbolizer. If empty, the tool will search $PATH for "
|
||||
"the symbolizer.");
|
||||
ParseFlag(str, &f->allow_addr2line, "allow_addr2line",
|
||||
"If set, allows online symbolizer to run addr2line binary to symbolize "
|
||||
"stack traces (addr2line will only be used if llvm-symbolizer binary is "
|
||||
"unavailable.");
|
||||
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
|
||||
"Strips this prefix from file paths in error reports.");
|
||||
ParseFlag(str, &f->fast_unwind_on_check, "fast_unwind_on_check",
|
||||
"If available, use the fast frame-pointer-based unwinder on "
|
||||
"internal CHECK failures.");
|
||||
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
|
||||
"If available, use the fast frame-pointer-based unwinder on fatal "
|
||||
"errors.");
|
||||
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc",
|
||||
"If available, use the fast frame-pointer-based unwinder on "
|
||||
"malloc/free.");
|
||||
ParseFlag(str, &f->handle_ioctl, "handle_ioctl",
|
||||
"Intercept and handle ioctl requests.");
|
||||
ParseFlag(str, &f->malloc_context_size, "malloc_context_size",
|
||||
"Max number of stack frames kept for each allocation/deallocation.");
|
||||
ParseFlag(str, &f->log_path, "log_path",
|
||||
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
|
||||
"\"stderr\". The default is \"stderr\".");
|
||||
ParseFlag(str, &f->verbosity, "verbosity",
|
||||
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).");
|
||||
ParseFlag(str, &f->detect_leaks, "detect_leaks",
|
||||
"Enable memory leak detection.");
|
||||
ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit",
|
||||
"Invoke leak checking in an atexit handler. Has no effect if "
|
||||
"detect_leaks=false, or if __lsan_do_leak_check() is called before the "
|
||||
"handler has a chance to run.");
|
||||
ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null",
|
||||
"If false, the allocator will crash instead of returning 0 on "
|
||||
"out-of-memory.");
|
||||
ParseFlag(str, &f->print_summary, "print_summary",
|
||||
"If false, disable printing error summaries in addition to error "
|
||||
"reports.");
|
||||
ParseFlag(str, &f->check_printf, "check_printf",
|
||||
"Check printf arguments.");
|
||||
ParseFlag(str, &f->handle_segv, "handle_segv",
|
||||
"If set, registers the tool's custom SEGV handler (both SIGBUS and "
|
||||
"SIGSEGV on OSX).");
|
||||
ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler",
|
||||
"If set, allows user to register a SEGV handler even if the tool "
|
||||
"registers one.");
|
||||
ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack",
|
||||
"If set, uses alternate stack for signal handling.");
|
||||
ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks",
|
||||
"If set, deadlock detection is enabled.");
|
||||
ParseFlag(str, &f->clear_shadow_mmap_threshold,
|
||||
"clear_shadow_mmap_threshold",
|
||||
"Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
|
||||
"memset(). This is the threshold size in bytes.");
|
||||
ParseFlag(str, &f->color, "color",
|
||||
"Colorize reports: (always|never|auto).");
|
||||
ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond",
|
||||
"Enables support for dynamic libraries linked with libpthread 2.2.5.");
|
||||
ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr",
|
||||
"Intercept __tls_get_addr.");
|
||||
ParseFlag(str, &f->help, "help", "Print the flag descriptions.");
|
||||
ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb",
|
||||
"Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
|
||||
"not a user-facing flag, used mosly for testing the tools");
|
||||
ParseFlag(str, &f->coverage, "coverage",
|
||||
"If set, coverage information will be dumped at program shutdown (if the "
|
||||
"coverage instrumentation was enabled at compile time).");
|
||||
ParseFlag(str, &f->coverage_direct, "coverage_direct",
|
||||
"If set, coverage information will be dumped directly to a memory "
|
||||
"mapped file. This way data is not lost even if the process is "
|
||||
"suddenly killed.");
|
||||
ParseFlag(str, &f->coverage_dir, "coverage_dir",
|
||||
"Target directory for coverage dumps. Defaults to the current "
|
||||
"directory.");
|
||||
ParseFlag(str, &f->full_address_space, "full_address_space",
|
||||
"Sanitize complete address space; "
|
||||
"by default kernel area on 32-bit platforms will not be sanitized");
|
||||
ParseFlag(str, &f->suppressions, "suppressions", "Suppressions file name.");
|
||||
ParseFlag(str, &f->print_suppressions, "print_suppressions",
|
||||
"Print matched suppressions at exit.");
|
||||
ParseFlag(str, &f->disable_coredump, "disable_coredump",
|
||||
"Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
|
||||
"dumping a 16T+ core file. Ignored on OSes that don't dump core by"
|
||||
"default and for sanitizers that don't reserve lots of virtual memory.");
|
||||
ParseFlag(str, &f->symbolize_inline_frames, "symbolize_inline_frames",
|
||||
"Print inlined frames in stacktraces. Defaults to true.");
|
||||
ParseFlag(str, &f->stack_trace_format, "stack_trace_format",
|
||||
"Format string used to render stack frames. "
|
||||
"See sanitizer_stacktrace_printer.h for the format description. "
|
||||
"Use DEFAULT to get default format.");
|
||||
|
||||
// Do a sanity check for certain flags.
|
||||
if (f->malloc_context_size < 1)
|
||||
f->malloc_context_size = 1;
|
||||
void CommonFlags::CopyFrom(const CommonFlags &other) {
|
||||
internal_memcpy(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
static bool GetFlagValue(const char *env, const char *name,
|
||||
const char **value, int *value_length) {
|
||||
if (env == 0)
|
||||
return false;
|
||||
const char *pos = 0;
|
||||
for (;;) {
|
||||
pos = internal_strstr(env, name);
|
||||
if (pos == 0)
|
||||
return false;
|
||||
const char *name_end = pos + internal_strlen(name);
|
||||
if ((pos != env &&
|
||||
((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) ||
|
||||
*name_end != '=') {
|
||||
// Seems to be middle of another flag name or value.
|
||||
env = pos + 1;
|
||||
continue;
|
||||
// Copy the string from "s" to "out", replacing "%b" with the binary basename.
|
||||
static void SubstituteBinaryName(const char *s, char *out, uptr out_size) {
|
||||
char *out_end = out + out_size;
|
||||
while (*s && out < out_end - 1) {
|
||||
if (s[0] != '%' || s[1] != 'b') { *out++ = *s++; continue; }
|
||||
const char *base = GetProcessName();
|
||||
CHECK(base);
|
||||
while (*base && out < out_end - 1)
|
||||
*out++ = *base++;
|
||||
s += 2; // skip "%b"
|
||||
}
|
||||
*out = '\0';
|
||||
}
|
||||
|
||||
class FlagHandlerInclude : public FlagHandlerBase {
|
||||
FlagParser *parser_;
|
||||
bool ignore_missing_;
|
||||
|
||||
public:
|
||||
explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing)
|
||||
: parser_(parser), ignore_missing_(ignore_missing) {}
|
||||
bool Parse(const char *value) final {
|
||||
if (internal_strchr(value, '%')) {
|
||||
char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
|
||||
SubstituteBinaryName(value, buf, kMaxPathLength);
|
||||
bool res = parser_->ParseFile(buf, ignore_missing_);
|
||||
UnmapOrDie(buf, kMaxPathLength);
|
||||
return res;
|
||||
}
|
||||
pos = name_end;
|
||||
break;
|
||||
return parser_->ParseFile(value, ignore_missing_);
|
||||
}
|
||||
const char *end;
|
||||
if (pos[0] != '=') {
|
||||
end = pos;
|
||||
} else {
|
||||
pos += 1;
|
||||
if (pos[0] == '"') {
|
||||
pos += 1;
|
||||
end = internal_strchr(pos, '"');
|
||||
} else if (pos[0] == '\'') {
|
||||
pos += 1;
|
||||
end = internal_strchr(pos, '\'');
|
||||
} else {
|
||||
// Read until the next space or colon.
|
||||
end = pos + internal_strcspn(pos, " :");
|
||||
}
|
||||
if (end == 0)
|
||||
end = pos + internal_strlen(pos);
|
||||
}
|
||||
*value = pos;
|
||||
*value_length = end - pos;
|
||||
return true;
|
||||
};
|
||||
|
||||
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
|
||||
FlagHandlerInclude *fh_include = new (FlagParser::Alloc) // NOLINT
|
||||
FlagHandlerInclude(parser, /*ignore_missing*/ false);
|
||||
parser->RegisterHandler("include", fh_include,
|
||||
"read more options from the given file");
|
||||
FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc) // NOLINT
|
||||
FlagHandlerInclude(parser, /*ignore_missing*/ true);
|
||||
parser->RegisterHandler(
|
||||
"include_if_exists", fh_include_if_exists,
|
||||
"read more options from the given file (if it exists)");
|
||||
}
|
||||
|
||||
static bool StartsWith(const char *flag, int flag_length, const char *value) {
|
||||
if (!flag || !value)
|
||||
return false;
|
||||
int value_length = internal_strlen(value);
|
||||
return (flag_length >= value_length) &&
|
||||
(0 == internal_strncmp(flag, value, value_length));
|
||||
}
|
||||
void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
|
||||
#define COMMON_FLAG(Type, Name, DefaultValue, Description) \
|
||||
RegisterFlag(parser, #Name, Description, &cf->Name);
|
||||
#include "sanitizer_flags.inc"
|
||||
#undef COMMON_FLAG
|
||||
|
||||
static LowLevelAllocator allocator_for_flags;
|
||||
|
||||
// The linear scan is suboptimal, but the number of flags is relatively small.
|
||||
bool FlagInDescriptionList(const char *name) {
|
||||
IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
|
||||
while (it.hasNext()) {
|
||||
if (!internal_strcmp(it.next()->name, name)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void AddFlagDescription(const char *name, const char *description) {
|
||||
if (FlagInDescriptionList(name)) return;
|
||||
FlagDescription *new_description = new(allocator_for_flags) FlagDescription;
|
||||
new_description->name = name;
|
||||
new_description->description = description;
|
||||
flag_descriptions.push_back(new_description);
|
||||
}
|
||||
|
||||
// TODO(glider): put the descriptions inside CommonFlags.
|
||||
void PrintFlagDescriptions() {
|
||||
IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
|
||||
Printf("Available flags for %s:\n", SanitizerToolName);
|
||||
while (it.hasNext()) {
|
||||
FlagDescription *descr = it.next();
|
||||
Printf("\t%s\n\t\t- %s\n", descr->name, descr->description);
|
||||
}
|
||||
}
|
||||
|
||||
void ParseFlag(const char *env, bool *flag,
|
||||
const char *name, const char *descr) {
|
||||
const char *value;
|
||||
int value_length;
|
||||
AddFlagDescription(name, descr);
|
||||
if (!GetFlagValue(env, name, &value, &value_length))
|
||||
return;
|
||||
if (StartsWith(value, value_length, "0") ||
|
||||
StartsWith(value, value_length, "no") ||
|
||||
StartsWith(value, value_length, "false"))
|
||||
*flag = false;
|
||||
if (StartsWith(value, value_length, "1") ||
|
||||
StartsWith(value, value_length, "yes") ||
|
||||
StartsWith(value, value_length, "true"))
|
||||
*flag = true;
|
||||
}
|
||||
|
||||
void ParseFlag(const char *env, int *flag,
|
||||
const char *name, const char *descr) {
|
||||
const char *value;
|
||||
int value_length;
|
||||
AddFlagDescription(name, descr);
|
||||
if (!GetFlagValue(env, name, &value, &value_length))
|
||||
return;
|
||||
*flag = static_cast<int>(internal_atoll(value));
|
||||
}
|
||||
|
||||
void ParseFlag(const char *env, uptr *flag,
|
||||
const char *name, const char *descr) {
|
||||
const char *value;
|
||||
int value_length;
|
||||
AddFlagDescription(name, descr);
|
||||
if (!GetFlagValue(env, name, &value, &value_length))
|
||||
return;
|
||||
*flag = static_cast<uptr>(internal_atoll(value));
|
||||
}
|
||||
|
||||
void ParseFlag(const char *env, const char **flag,
|
||||
const char *name, const char *descr) {
|
||||
const char *value;
|
||||
int value_length;
|
||||
AddFlagDescription(name, descr);
|
||||
if (!GetFlagValue(env, name, &value, &value_length))
|
||||
return;
|
||||
// Copy the flag value. Don't use locks here, as flags are parsed at
|
||||
// tool startup.
|
||||
char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1));
|
||||
internal_memcpy(value_copy, value, value_length);
|
||||
value_copy[value_length] = '\0';
|
||||
*flag = value_copy;
|
||||
RegisterIncludeFlags(parser, cf);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
|
|
@ -16,62 +16,38 @@
|
|||
|
||||
namespace __sanitizer {
|
||||
|
||||
void ParseFlag(const char *env, bool *flag,
|
||||
const char *name, const char *descr);
|
||||
void ParseFlag(const char *env, int *flag,
|
||||
const char *name, const char *descr);
|
||||
void ParseFlag(const char *env, uptr *flag,
|
||||
const char *name, const char *descr);
|
||||
void ParseFlag(const char *env, const char **flag,
|
||||
const char *name, const char *descr);
|
||||
|
||||
struct CommonFlags {
|
||||
bool symbolize;
|
||||
const char *external_symbolizer_path;
|
||||
bool allow_addr2line;
|
||||
const char *strip_path_prefix;
|
||||
bool fast_unwind_on_check;
|
||||
bool fast_unwind_on_fatal;
|
||||
bool fast_unwind_on_malloc;
|
||||
bool handle_ioctl;
|
||||
int malloc_context_size;
|
||||
const char *log_path;
|
||||
int verbosity;
|
||||
bool detect_leaks;
|
||||
bool leak_check_at_exit;
|
||||
bool allocator_may_return_null;
|
||||
bool print_summary;
|
||||
bool check_printf;
|
||||
bool handle_segv;
|
||||
bool allow_user_segv_handler;
|
||||
bool use_sigaltstack;
|
||||
bool detect_deadlocks;
|
||||
uptr clear_shadow_mmap_threshold;
|
||||
const char *color;
|
||||
bool legacy_pthread_cond;
|
||||
bool intercept_tls_get_addr;
|
||||
bool help;
|
||||
uptr mmap_limit_mb;
|
||||
bool coverage;
|
||||
bool coverage_direct;
|
||||
const char *coverage_dir;
|
||||
bool full_address_space;
|
||||
const char *suppressions;
|
||||
bool print_suppressions;
|
||||
bool disable_coredump;
|
||||
bool symbolize_inline_frames;
|
||||
const char *stack_trace_format;
|
||||
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
||||
#include "sanitizer_flags.inc"
|
||||
#undef COMMON_FLAG
|
||||
|
||||
void SetDefaults();
|
||||
void CopyFrom(const CommonFlags &other);
|
||||
};
|
||||
|
||||
inline CommonFlags *common_flags() {
|
||||
extern CommonFlags common_flags_dont_use;
|
||||
// Functions to get/set global CommonFlags shared by all sanitizer runtimes:
|
||||
extern CommonFlags common_flags_dont_use;
|
||||
inline const CommonFlags *common_flags() {
|
||||
return &common_flags_dont_use;
|
||||
}
|
||||
|
||||
void SetCommonFlagsDefaults(CommonFlags *f);
|
||||
void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
|
||||
void PrintFlagDescriptions();
|
||||
inline void SetCommonFlagsDefaults() {
|
||||
common_flags_dont_use.SetDefaults();
|
||||
}
|
||||
|
||||
// This function can only be used to setup tool-specific overrides for
|
||||
// CommonFlags defaults. Generally, it should only be used right after
|
||||
// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
|
||||
// only during the flags initialization (i.e. before they are used for
|
||||
// the first time).
|
||||
inline void OverrideCommonFlags(const CommonFlags &cf) {
|
||||
common_flags_dont_use.CopyFrom(cf);
|
||||
}
|
||||
|
||||
class FlagParser;
|
||||
void RegisterCommonFlags(FlagParser *parser,
|
||||
CommonFlags *cf = &common_flags_dont_use);
|
||||
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FLAGS_H
|
||||
|
|
192
libsanitizer/sanitizer_common/sanitizer_flags.inc
Normal file
192
libsanitizer/sanitizer_common/sanitizer_flags.inc
Normal file
|
@ -0,0 +1,192 @@
|
|||
//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file describes common flags available in all sanitizers.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef COMMON_FLAG
|
||||
#error "Define COMMON_FLAG prior to including this file!"
|
||||
#endif
|
||||
|
||||
// COMMON_FLAG(Type, Name, DefaultValue, Description)
|
||||
// Supported types: bool, const char *, int, uptr.
|
||||
// Default value must be a compile-time constant.
|
||||
// Description must be a string literal.
|
||||
|
||||
COMMON_FLAG(
|
||||
bool, symbolize, true,
|
||||
"If set, use the online symbolizer from common sanitizer runtime to turn "
|
||||
"virtual addresses to file/line locations.")
|
||||
COMMON_FLAG(
|
||||
const char *, external_symbolizer_path, nullptr,
|
||||
"Path to external symbolizer. If empty, the tool will search $PATH for "
|
||||
"the symbolizer.")
|
||||
COMMON_FLAG(
|
||||
bool, allow_addr2line, false,
|
||||
"If set, allows online symbolizer to run addr2line binary to symbolize "
|
||||
"stack traces (addr2line will only be used if llvm-symbolizer binary is "
|
||||
"unavailable.")
|
||||
COMMON_FLAG(const char *, strip_path_prefix, "",
|
||||
"Strips this prefix from file paths in error reports.")
|
||||
COMMON_FLAG(bool, fast_unwind_on_check, false,
|
||||
"If available, use the fast frame-pointer-based unwinder on "
|
||||
"internal CHECK failures.")
|
||||
COMMON_FLAG(bool, fast_unwind_on_fatal, false,
|
||||
"If available, use the fast frame-pointer-based unwinder on fatal "
|
||||
"errors.")
|
||||
COMMON_FLAG(bool, fast_unwind_on_malloc, true,
|
||||
"If available, use the fast frame-pointer-based unwinder on "
|
||||
"malloc/free.")
|
||||
COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
|
||||
COMMON_FLAG(int, malloc_context_size, 1,
|
||||
"Max number of stack frames kept for each allocation/deallocation.")
|
||||
COMMON_FLAG(
|
||||
const char *, log_path, "stderr",
|
||||
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
|
||||
"\"stderr\". The default is \"stderr\".")
|
||||
COMMON_FLAG(
|
||||
bool, log_exe_name, false,
|
||||
"Mention name of executable when reporting error and "
|
||||
"append executable name to logs (as in \"log_path.exe_name.pid\").")
|
||||
COMMON_FLAG(
|
||||
bool, log_to_syslog, SANITIZER_ANDROID,
|
||||
"Write all sanitizer output to syslog in addition to other means of "
|
||||
"logging.")
|
||||
COMMON_FLAG(
|
||||
int, verbosity, 0,
|
||||
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
|
||||
COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.")
|
||||
COMMON_FLAG(
|
||||
bool, leak_check_at_exit, true,
|
||||
"Invoke leak checking in an atexit handler. Has no effect if "
|
||||
"detect_leaks=false, or if __lsan_do_leak_check() is called before the "
|
||||
"handler has a chance to run.")
|
||||
COMMON_FLAG(bool, allocator_may_return_null, false,
|
||||
"If false, the allocator will crash instead of returning 0 on "
|
||||
"out-of-memory.")
|
||||
COMMON_FLAG(bool, print_summary, true,
|
||||
"If false, disable printing error summaries in addition to error "
|
||||
"reports.")
|
||||
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
|
||||
COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
|
||||
"If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
|
||||
COMMON_FLAG(bool, handle_abort, false,
|
||||
"If set, registers the tool's custom SIGABRT handler.")
|
||||
COMMON_FLAG(bool, handle_sigfpe, true,
|
||||
"If set, registers the tool's custom SIGFPE handler.")
|
||||
COMMON_FLAG(bool, allow_user_segv_handler, false,
|
||||
"If set, allows user to register a SEGV handler even if the tool "
|
||||
"registers one.")
|
||||
COMMON_FLAG(bool, use_sigaltstack, true,
|
||||
"If set, uses alternate stack for signal handling.")
|
||||
COMMON_FLAG(bool, detect_deadlocks, false,
|
||||
"If set, deadlock detection is enabled.")
|
||||
COMMON_FLAG(
|
||||
uptr, clear_shadow_mmap_threshold, 64 * 1024,
|
||||
"Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
|
||||
"memset(). This is the threshold size in bytes.")
|
||||
COMMON_FLAG(const char *, color, "auto",
|
||||
"Colorize reports: (always|never|auto).")
|
||||
COMMON_FLAG(
|
||||
bool, legacy_pthread_cond, false,
|
||||
"Enables support for dynamic libraries linked with libpthread 2.2.5.")
|
||||
COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.")
|
||||
COMMON_FLAG(bool, help, false, "Print the flag descriptions.")
|
||||
COMMON_FLAG(uptr, mmap_limit_mb, 0,
|
||||
"Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
|
||||
"not a user-facing flag, used mosly for testing the tools")
|
||||
COMMON_FLAG(uptr, hard_rss_limit_mb, 0,
|
||||
"Hard RSS limit in Mb."
|
||||
" If non-zero, a background thread is spawned at startup"
|
||||
" which periodically reads RSS and aborts the process if the"
|
||||
" limit is reached")
|
||||
COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
|
||||
"Soft RSS limit in Mb."
|
||||
" If non-zero, a background thread is spawned at startup"
|
||||
" which periodically reads RSS. If the limit is reached"
|
||||
" all subsequent malloc/new calls will fail or return NULL"
|
||||
" (depending on the value of allocator_may_return_null)"
|
||||
" until the RSS goes below the soft limit."
|
||||
" This limit does not affect memory allocations other than"
|
||||
" malloc/new.")
|
||||
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
|
||||
"If false, do not attempt to read /proc/maps/statm."
|
||||
" Mostly useful for testing sanitizers.")
|
||||
COMMON_FLAG(
|
||||
bool, coverage, false,
|
||||
"If set, coverage information will be dumped at program shutdown (if the "
|
||||
"coverage instrumentation was enabled at compile time).")
|
||||
COMMON_FLAG(bool, coverage_pcs, true,
|
||||
"If set (and if 'coverage' is set too), the coverage information "
|
||||
"will be dumped as a set of PC offsets for every module.")
|
||||
COMMON_FLAG(bool, coverage_order_pcs, false,
|
||||
"If true, the PCs will be dumped in the order they've"
|
||||
" appeared during the execution.")
|
||||
COMMON_FLAG(bool, coverage_bitset, false,
|
||||
"If set (and if 'coverage' is set too), the coverage information "
|
||||
"will also be dumped as a bitset to a separate file.")
|
||||
COMMON_FLAG(bool, coverage_counters, false,
|
||||
"If set (and if 'coverage' is set too), the bitmap that corresponds"
|
||||
" to coverage counters will be dumped.")
|
||||
COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
|
||||
"If set, coverage information will be dumped directly to a memory "
|
||||
"mapped file. This way data is not lost even if the process is "
|
||||
"suddenly killed.")
|
||||
COMMON_FLAG(const char *, coverage_dir, ".",
|
||||
"Target directory for coverage dumps. Defaults to the current "
|
||||
"directory.")
|
||||
COMMON_FLAG(bool, full_address_space, false,
|
||||
"Sanitize complete address space; "
|
||||
"by default kernel area on 32-bit platforms will not be sanitized")
|
||||
COMMON_FLAG(bool, print_suppressions, true,
|
||||
"Print matched suppressions at exit.")
|
||||
COMMON_FLAG(
|
||||
bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
|
||||
"Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
|
||||
"dumping a 16T+ core file. Ignored on OSes that don't dump core by"
|
||||
"default and for sanitizers that don't reserve lots of virtual memory.")
|
||||
COMMON_FLAG(bool, use_madv_dontdump, true,
|
||||
"If set, instructs kernel to not store the (huge) shadow "
|
||||
"in core file.")
|
||||
COMMON_FLAG(bool, symbolize_inline_frames, true,
|
||||
"Print inlined frames in stacktraces. Defaults to true.")
|
||||
COMMON_FLAG(bool, symbolize_vs_style, false,
|
||||
"Print file locations in Visual Studio style (e.g: "
|
||||
" file(10,42): ...")
|
||||
COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
|
||||
"Format string used to render stack frames. "
|
||||
"See sanitizer_stacktrace_printer.h for the format description. "
|
||||
"Use DEFAULT to get default format.")
|
||||
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
|
||||
"If true, the shadow is not allowed to use huge pages. ")
|
||||
COMMON_FLAG(bool, strict_string_checks, false,
|
||||
"If set check that string arguments are properly null-terminated")
|
||||
COMMON_FLAG(bool, intercept_strstr, true,
|
||||
"If set, uses custom wrappers for strstr and strcasestr functions "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strspn, true,
|
||||
"If set, uses custom wrappers for strspn and strcspn function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strpbrk, true,
|
||||
"If set, uses custom wrappers for strpbrk function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_memcmp, true,
|
||||
"If set, uses custom wrappers for memcmp function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, strict_memcmp, true,
|
||||
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
|
||||
"comparing p1 and p2.")
|
||||
COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer "
|
||||
"mappings in /proc/self/maps with "
|
||||
"user-readable names")
|
||||
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
|
||||
"found an error")
|
||||
COMMON_FLAG(
|
||||
bool, abort_on_error, SANITIZER_MAC,
|
||||
"If set, the tool calls abort() instead of _exit() after printing the "
|
||||
"error report.")
|
|
@ -1,23 +0,0 @@
|
|||
//===-- sanitizer_interception.h --------------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Common macro definitions for interceptors.
|
||||
// Always use this headers instead of interception/interception.h.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_INTERCEPTION_H
|
||||
#define SANITIZER_INTERCEPTION_H
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
#if SANITIZER_LINUX && !defined(SANITIZER_GO)
|
||||
#undef REAL
|
||||
#define REAL(x) IndirectExternCall(__interception::PTR_TO_REAL(x))
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_INTERCEPTION_H
|
56
libsanitizer/sanitizer_common/sanitizer_interface_internal.h
Normal file
56
libsanitizer/sanitizer_common/sanitizer_interface_internal.h
Normal file
|
@ -0,0 +1,56 @@
|
|||
//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between run-time libraries of sanitizers.
|
||||
//
|
||||
// This header declares the sanitizer runtime interface functions.
|
||||
// The runtime library has to define these functions so the instrumented program
|
||||
// could call them.
|
||||
//
|
||||
// See also include/sanitizer/common_interface_defs.h
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_INTERFACE_INTERNAL_H
|
||||
#define SANITIZER_INTERFACE_INTERNAL_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
extern "C" {
|
||||
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
|
||||
// The special values are "stdout" and "stderr".
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_report_path(const char *path);
|
||||
|
||||
typedef struct {
|
||||
int coverage_sandboxed;
|
||||
__sanitizer::sptr coverage_fd;
|
||||
unsigned int coverage_max_block_size;
|
||||
} __sanitizer_sandbox_arguments;
|
||||
|
||||
// Notify the tools that the sandbox is going to be turned on.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
|
||||
|
||||
// This function is called by the tool when it has just finished reporting
|
||||
// an error. 'error_summary' is a one-line string that summarizes
|
||||
// the error message. This function can be overridden by the client.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_report_error_summary(const char *error_summary);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_annotate_contiguous_container(const void *beg,
|
||||
const void *end,
|
||||
const void *old_mid,
|
||||
const void *new_mid);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
||||
const void *end);
|
||||
} // extern "C"
|
||||
|
||||
#endif // SANITIZER_INTERFACE_INTERNAL_H
|
|
@ -13,6 +13,10 @@
|
|||
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#ifndef SANITIZER_DEBUG
|
||||
# define SANITIZER_DEBUG 0
|
||||
#endif
|
||||
|
||||
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
|
||||
#if SANITIZER_WINDOWS
|
||||
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
|
||||
|
@ -26,7 +30,7 @@
|
|||
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !defined(SANITIZER_GO)
|
||||
#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO)
|
||||
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
|
||||
#else
|
||||
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
|
||||
|
@ -74,13 +78,22 @@ typedef signed char s8;
|
|||
typedef signed short s16; // NOLINT
|
||||
typedef signed int s32;
|
||||
typedef signed long long s64; // NOLINT
|
||||
#if SANITIZER_WINDOWS
|
||||
// On Windows, files are HANDLE, which is a synonim of void*.
|
||||
// Use void* to avoid including <windows.h> everywhere.
|
||||
typedef void* fd_t;
|
||||
typedef unsigned error_t;
|
||||
#else
|
||||
typedef int fd_t;
|
||||
typedef int error_t;
|
||||
#endif
|
||||
|
||||
// WARNING: OFF_T may be different from OS type off_t, depending on the value of
|
||||
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
|
||||
// like pread and mmap, as opposed to pread64 and mmap64.
|
||||
// Mac and Linux/x86-64 are special.
|
||||
#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__))
|
||||
// FreeBSD, Mac and Linux/x86-64 are special.
|
||||
#if SANITIZER_FREEBSD || SANITIZER_MAC || \
|
||||
(SANITIZER_LINUX && defined(__x86_64__))
|
||||
typedef u64 OFF_T;
|
||||
#else
|
||||
typedef uptr OFF_T;
|
||||
|
@ -94,41 +107,6 @@ typedef u32 operator_new_size_type;
|
|||
#endif
|
||||
} // namespace __sanitizer
|
||||
|
||||
extern "C" {
|
||||
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
|
||||
// The special values are "stdout" and "stderr".
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_report_path(const char *path);
|
||||
|
||||
typedef struct {
|
||||
int coverage_sandboxed;
|
||||
__sanitizer::sptr coverage_fd;
|
||||
unsigned int coverage_max_block_size;
|
||||
} __sanitizer_sandbox_arguments;
|
||||
|
||||
// Notify the tools that the sandbox is going to be turned on.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
|
||||
|
||||
// This function is called by the tool when it has just finished reporting
|
||||
// an error. 'error_summary' is a one-line string that summarizes
|
||||
// the error message. This function can be overridden by the client.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_report_error_summary(const char *error_summary);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_annotate_contiguous_container(const void *beg,
|
||||
const void *end,
|
||||
const void *old_mid,
|
||||
const void *new_mid);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
||||
const void *end);
|
||||
} // extern "C"
|
||||
|
||||
|
||||
using namespace __sanitizer; // NOLINT
|
||||
// ----------- ATTENTION -------------
|
||||
|
@ -149,7 +127,6 @@ using namespace __sanitizer; // NOLINT
|
|||
# define NOINLINE __declspec(noinline)
|
||||
# define NORETURN __declspec(noreturn)
|
||||
# define THREADLOCAL __declspec(thread)
|
||||
# define NOTHROW
|
||||
# define LIKELY(x) (x)
|
||||
# define UNLIKELY(x) (x)
|
||||
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
|
||||
|
@ -163,7 +140,6 @@ using namespace __sanitizer; // NOLINT
|
|||
# define NOINLINE __attribute__((noinline))
|
||||
# define NORETURN __attribute__((noreturn))
|
||||
# define THREADLOCAL __thread
|
||||
# define NOTHROW throw()
|
||||
# define LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
|
||||
# if defined(__i386__) || defined(__x86_64__)
|
||||
|
@ -182,6 +158,12 @@ using namespace __sanitizer; // NOLINT
|
|||
# define USED
|
||||
#endif
|
||||
|
||||
#if !defined(_MSC_VER) || defined(__clang__) || MSC_PREREQ(1900)
|
||||
# define NOEXCEPT noexcept
|
||||
#else
|
||||
# define NOEXCEPT throw()
|
||||
#endif
|
||||
|
||||
// Unaligned versions of basic types.
|
||||
typedef ALIGNED(1) u16 uu16;
|
||||
typedef ALIGNED(1) u32 uu32;
|
||||
|
@ -238,7 +220,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
|
|||
#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
|
||||
#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
|
||||
|
||||
#if TSAN_DEBUG
|
||||
#if SANITIZER_DEBUG
|
||||
#define DCHECK(a) CHECK(a)
|
||||
#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
|
||||
#define DCHECK_NE(a, b) CHECK_NE(a, b)
|
||||
|
@ -318,4 +300,12 @@ extern "C" void* _ReturnAddress(void);
|
|||
} while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
|
||||
}
|
||||
|
||||
// Forces the compiler to generate a frame pointer in the function.
|
||||
#define ENABLE_FRAME_POINTER \
|
||||
do { \
|
||||
volatile uptr enable_fp; \
|
||||
enable_fp = GET_CURRENT_FRAME(); \
|
||||
(void)enable_fp; \
|
||||
} while (0)
|
||||
|
||||
#endif // SANITIZER_DEFS_H
|
||||
|
|
|
@ -47,8 +47,8 @@ struct LFStack {
|
|||
u64 cmp = atomic_load(&head_, memory_order_acquire);
|
||||
for (;;) {
|
||||
T *cur = (T*)(uptr)(cmp & kPtrMask);
|
||||
if (cur == 0)
|
||||
return 0;
|
||||
if (!cur)
|
||||
return nullptr;
|
||||
T *nxt = cur->next;
|
||||
u64 cnt = (cmp & kCounterMask);
|
||||
u64 xch = (u64)(uptr)nxt | cnt;
|
||||
|
@ -66,6 +66,6 @@ struct LFStack {
|
|||
|
||||
atomic_uint64_t head_;
|
||||
};
|
||||
} // namespace __sanitizer
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // #ifndef SANITIZER_LFSTACK_H
|
||||
#endif // SANITIZER_LFSTACK_H
|
||||
|
|
|
@ -8,37 +8,37 @@
|
|||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries. See sanitizer_libc.h for details.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_libc.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Make the compiler think that something is going on there.
|
||||
static inline void break_optimization(void *arg) {
|
||||
#if _MSC_VER
|
||||
// FIXME: make sure this is actually enough.
|
||||
__asm;
|
||||
#else
|
||||
__asm__ __volatile__("" : : "r" (arg) : "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
s64 internal_atoll(const char *nptr) {
|
||||
return internal_simple_strtoll(nptr, (char**)0, 10);
|
||||
return internal_simple_strtoll(nptr, nullptr, 10);
|
||||
}
|
||||
|
||||
void *internal_memchr(const void *s, int c, uptr n) {
|
||||
const char* t = (char*)s;
|
||||
const char *t = (const char *)s;
|
||||
for (uptr i = 0; i < n; ++i, ++t)
|
||||
if (*t == c)
|
||||
return (void*)t;
|
||||
return 0;
|
||||
return reinterpret_cast<void *>(const_cast<char *>(t));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void *internal_memrchr(const void *s, int c, uptr n) {
|
||||
const char *t = (const char *)s;
|
||||
void *res = nullptr;
|
||||
for (uptr i = 0; i < n; ++i, ++t) {
|
||||
if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
int internal_memcmp(const void* s1, const void* s2, uptr n) {
|
||||
const char* t1 = (char*)s1;
|
||||
const char* t2 = (char*)s2;
|
||||
const char *t1 = (const char *)s1;
|
||||
const char *t2 = (const char *)s2;
|
||||
for (uptr i = 0; i < n; ++i, ++t1, ++t2)
|
||||
if (*t1 != *t2)
|
||||
return *t1 < *t2 ? -1 : 1;
|
||||
|
@ -47,7 +47,7 @@ int internal_memcmp(const void* s1, const void* s2, uptr n) {
|
|||
|
||||
void *internal_memcpy(void *dest, const void *src, uptr n) {
|
||||
char *d = (char*)dest;
|
||||
char *s = (char*)src;
|
||||
const char *s = (const char *)src;
|
||||
for (uptr i = 0; i < n; ++i)
|
||||
d[i] = s[i];
|
||||
return dest;
|
||||
|
@ -55,7 +55,7 @@ void *internal_memcpy(void *dest, const void *src, uptr n) {
|
|||
|
||||
void *internal_memmove(void *dest, const void *src, uptr n) {
|
||||
char *d = (char*)dest;
|
||||
char *s = (char*)src;
|
||||
const char *s = (const char *)src;
|
||||
sptr i, signed_n = (sptr)n;
|
||||
CHECK_GE(signed_n, 0);
|
||||
if (d < s) {
|
||||
|
@ -76,7 +76,8 @@ void internal_bzero_aligned16(void *s, uptr n) {
|
|||
CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
|
||||
for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
|
||||
p->a = p->b = 0;
|
||||
break_optimization(0); // Make sure this does not become memset.
|
||||
// Make sure this does not become memset.
|
||||
SanitizerBreakOptimization(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -95,7 +96,7 @@ void *internal_memset(void* s, int c, uptr n) {
|
|||
uptr internal_strcspn(const char *s, const char *reject) {
|
||||
uptr i;
|
||||
for (i = 0; s[i]; i++) {
|
||||
if (internal_strchr(reject, s[i]) != 0)
|
||||
if (internal_strchr(reject, s[i]))
|
||||
return i;
|
||||
}
|
||||
return i;
|
||||
|
@ -109,6 +110,14 @@ char* internal_strdup(const char *s) {
|
|||
return s2;
|
||||
}
|
||||
|
||||
char* internal_strndup(const char *s, uptr n) {
|
||||
uptr len = internal_strnlen(s, n);
|
||||
char *s2 = (char*)InternalAlloc(len + 1);
|
||||
internal_memcpy(s2, s, len);
|
||||
s2[len] = 0;
|
||||
return s2;
|
||||
}
|
||||
|
||||
int internal_strcmp(const char *s1, const char *s2) {
|
||||
while (true) {
|
||||
unsigned c1 = *s1;
|
||||
|
@ -136,9 +145,9 @@ int internal_strncmp(const char *s1, const char *s2, uptr n) {
|
|||
char* internal_strchr(const char *s, int c) {
|
||||
while (true) {
|
||||
if (*s == (char)c)
|
||||
return (char*)s;
|
||||
return const_cast<char *>(s);
|
||||
if (*s == 0)
|
||||
return 0;
|
||||
return nullptr;
|
||||
s++;
|
||||
}
|
||||
}
|
||||
|
@ -146,16 +155,16 @@ char* internal_strchr(const char *s, int c) {
|
|||
char *internal_strchrnul(const char *s, int c) {
|
||||
char *res = internal_strchr(s, c);
|
||||
if (!res)
|
||||
res = (char*)s + internal_strlen(s);
|
||||
res = const_cast<char *>(s) + internal_strlen(s);
|
||||
return res;
|
||||
}
|
||||
|
||||
char *internal_strrchr(const char *s, int c) {
|
||||
const char *res = 0;
|
||||
const char *res = nullptr;
|
||||
for (uptr i = 0; s[i]; i++) {
|
||||
if (s[i] == c) res = s + i;
|
||||
}
|
||||
return (char*)res;
|
||||
return const_cast<char *>(res);
|
||||
}
|
||||
|
||||
uptr internal_strlen(const char *s) {
|
||||
|
@ -191,12 +200,12 @@ char *internal_strstr(const char *haystack, const char *needle) {
|
|||
// This is O(N^2), but we are not using it in hot places.
|
||||
uptr len1 = internal_strlen(haystack);
|
||||
uptr len2 = internal_strlen(needle);
|
||||
if (len1 < len2) return 0;
|
||||
if (len1 < len2) return nullptr;
|
||||
for (uptr pos = 0; pos <= len1 - len2; pos++) {
|
||||
if (internal_memcmp(haystack + pos, needle, len2) == 0)
|
||||
return (char*)haystack + pos;
|
||||
return const_cast<char *>(haystack) + pos;
|
||||
}
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
|
||||
|
@ -205,7 +214,7 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
|
|||
int sgn = 1;
|
||||
u64 res = 0;
|
||||
bool have_digits = false;
|
||||
char *old_nptr = (char*)nptr;
|
||||
char *old_nptr = const_cast<char *>(nptr);
|
||||
if (*nptr == '+') {
|
||||
sgn = 1;
|
||||
nptr++;
|
||||
|
@ -220,8 +229,8 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
|
|||
have_digits = true;
|
||||
nptr++;
|
||||
}
|
||||
if (endptr != 0) {
|
||||
*endptr = (have_digits) ? (char*)nptr : old_nptr;
|
||||
if (endptr) {
|
||||
*endptr = (have_digits) ? const_cast<char *>(nptr) : old_nptr;
|
||||
}
|
||||
if (sgn > 0) {
|
||||
return (s64)(Min((u64)INT64_MAX, res));
|
||||
|
@ -249,4 +258,4 @@ bool mem_is_zero(const char *beg, uptr size) {
|
|||
return all == 0;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
} // namespace __sanitizer
|
||||
|
|
|
@ -9,7 +9,9 @@
|
|||
// run-time libraries.
|
||||
// These tools can not use some of the libc functions directly because those
|
||||
// functions are intercepted. Instead, we implement a tiny subset of libc here.
|
||||
// FIXME: Some of functions declared in this file are in fact POSIX, not libc.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LIBC_H
|
||||
#define SANITIZER_LIBC_H
|
||||
|
||||
|
@ -24,6 +26,7 @@ namespace __sanitizer {
|
|||
// String functions
|
||||
s64 internal_atoll(const char *nptr);
|
||||
void *internal_memchr(const void *s, int c, uptr n);
|
||||
void *internal_memrchr(const void *s, int c, uptr n);
|
||||
int internal_memcmp(const void* s1, const void* s2, uptr n);
|
||||
void *internal_memcpy(void *dest, const void *src, uptr n);
|
||||
void *internal_memmove(void *dest, const void *src, uptr n);
|
||||
|
@ -36,6 +39,7 @@ char *internal_strchrnul(const char *s, int c);
|
|||
int internal_strcmp(const char *s1, const char *s2);
|
||||
uptr internal_strcspn(const char *s, const char *reject);
|
||||
char *internal_strdup(const char *s);
|
||||
char *internal_strndup(const char *s, uptr n);
|
||||
uptr internal_strlen(const char *s);
|
||||
char *internal_strncat(char *dst, const char *src, uptr n);
|
||||
int internal_strncmp(const char *s1, const char *s2, uptr n);
|
||||
|
@ -52,55 +56,26 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...);
|
|||
// Optimized for the case when the result is true.
|
||||
bool mem_is_zero(const char *mem, uptr size);
|
||||
|
||||
|
||||
// Memory
|
||||
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
|
||||
int fd, u64 offset);
|
||||
uptr internal_munmap(void *addr, uptr length);
|
||||
|
||||
// I/O
|
||||
const fd_t kInvalidFd = -1;
|
||||
const fd_t kInvalidFd = (fd_t)-1;
|
||||
const fd_t kStdinFd = 0;
|
||||
const fd_t kStdoutFd = 1;
|
||||
const fd_t kStderrFd = 2;
|
||||
uptr internal_close(fd_t fd);
|
||||
int internal_isatty(fd_t fd);
|
||||
const fd_t kStdoutFd = (fd_t)1;
|
||||
const fd_t kStderrFd = (fd_t)2;
|
||||
|
||||
// Use __sanitizer::OpenFile() instead.
|
||||
uptr internal_open(const char *filename, int flags);
|
||||
uptr internal_open(const char *filename, int flags, u32 mode);
|
||||
|
||||
uptr internal_read(fd_t fd, void *buf, uptr count);
|
||||
uptr internal_write(fd_t fd, const void *buf, uptr count);
|
||||
uptr internal_ftruncate(fd_t fd, uptr size);
|
||||
|
||||
// OS
|
||||
uptr internal_filesize(fd_t fd); // -1 on error.
|
||||
uptr internal_stat(const char *path, void *buf);
|
||||
uptr internal_lstat(const char *path, void *buf);
|
||||
uptr internal_fstat(fd_t fd, void *buf);
|
||||
uptr internal_dup2(int oldfd, int newfd);
|
||||
uptr internal_readlink(const char *path, char *buf, uptr bufsize);
|
||||
uptr internal_unlink(const char *path);
|
||||
uptr internal_rename(const char *oldpath, const char *newpath);
|
||||
void NORETURN internal__exit(int exitcode);
|
||||
uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
|
||||
|
||||
uptr internal_ptrace(int request, int pid, void *addr, void *data);
|
||||
uptr internal_waitpid(int pid, int *status, int options);
|
||||
uptr internal_getpid();
|
||||
uptr internal_getppid();
|
||||
|
||||
int internal_fork();
|
||||
|
||||
// Threading
|
||||
uptr internal_sched_yield();
|
||||
|
||||
// Error handling
|
||||
bool internal_iserror(uptr retval, int *rverrno = 0);
|
||||
bool internal_iserror(uptr retval, int *rverrno = nullptr);
|
||||
|
||||
int internal_sigaction(int signum, const void *act, void *oldact);
|
||||
} // namespace __sanitizer
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_LIBC_H
|
||||
#endif // SANITIZER_LIBC_H
|
||||
|
|
|
@ -6,10 +6,12 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
|
||||
#include "sanitizer_libignore.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_posix.h"
|
||||
#include "sanitizer_procmaps.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
@ -17,35 +19,29 @@ namespace __sanitizer {
|
|||
LibIgnore::LibIgnore(LinkerInitialized) {
|
||||
}
|
||||
|
||||
void LibIgnore::Init(const SuppressionContext &supp) {
|
||||
void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
|
||||
BlockingMutexLock lock(&mutex_);
|
||||
CHECK_EQ(count_, 0);
|
||||
const uptr n = supp.SuppressionCount();
|
||||
for (uptr i = 0; i < n; i++) {
|
||||
const Suppression *s = supp.SuppressionAt(i);
|
||||
if (s->type != SuppressionLib)
|
||||
continue;
|
||||
if (count_ >= kMaxLibs) {
|
||||
Report("%s: too many called_from_lib suppressions (max: %d)\n",
|
||||
SanitizerToolName, kMaxLibs);
|
||||
Die();
|
||||
}
|
||||
Lib *lib = &libs_[count_++];
|
||||
lib->templ = internal_strdup(s->templ);
|
||||
lib->name = 0;
|
||||
lib->loaded = false;
|
||||
if (count_ >= kMaxLibs) {
|
||||
Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
|
||||
kMaxLibs);
|
||||
Die();
|
||||
}
|
||||
Lib *lib = &libs_[count_++];
|
||||
lib->templ = internal_strdup(name_templ);
|
||||
lib->name = nullptr;
|
||||
lib->real_name = nullptr;
|
||||
lib->loaded = false;
|
||||
}
|
||||
|
||||
void LibIgnore::OnLibraryLoaded(const char *name) {
|
||||
BlockingMutexLock lock(&mutex_);
|
||||
// Try to match suppressions with symlink target.
|
||||
InternalScopedBuffer<char> buf(4096);
|
||||
if (name != 0 && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
|
||||
buf.data()[0]) {
|
||||
InternalScopedString buf(kMaxPathLength);
|
||||
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
|
||||
buf[0]) {
|
||||
for (uptr i = 0; i < count_; i++) {
|
||||
Lib *lib = &libs_[i];
|
||||
if (!lib->loaded && lib->real_name == 0 &&
|
||||
if (!lib->loaded && (!lib->real_name) &&
|
||||
TemplateMatch(lib->templ, name))
|
||||
lib->real_name = internal_strdup(buf.data());
|
||||
}
|
||||
|
@ -53,7 +49,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
|
|||
|
||||
// Scan suppressions list and find newly loaded and unloaded libraries.
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
|
||||
InternalScopedBuffer<char> module(4096);
|
||||
InternalScopedString module(kMaxPathLength);
|
||||
for (uptr i = 0; i < count_; i++) {
|
||||
Lib *lib = &libs_[i];
|
||||
bool loaded = false;
|
||||
|
@ -63,7 +59,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
|
|||
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
|
||||
continue;
|
||||
if (TemplateMatch(lib->templ, module.data()) ||
|
||||
(lib->real_name != 0 &&
|
||||
(lib->real_name &&
|
||||
internal_strcmp(lib->real_name, module.data()) == 0)) {
|
||||
if (loaded) {
|
||||
Report("%s: called_from_lib suppression '%s' is matched against"
|
||||
|
@ -96,9 +92,9 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
|
|||
}
|
||||
|
||||
void LibIgnore::OnLibraryUnloaded() {
|
||||
OnLibraryLoaded(0);
|
||||
OnLibraryLoaded(nullptr);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
|
|
|
@ -6,8 +6,8 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// LibIgnore allows to ignore all interceptors called from a particular set
|
||||
// of dynamic libraries. LibIgnore remembers all "called_from_lib" suppressions
|
||||
// from the provided SuppressionContext; finds code ranges for the libraries;
|
||||
// of dynamic libraries. LibIgnore can be initialized with several templates
|
||||
// of names of libraries to be ignored. It finds code ranges for the libraries;
|
||||
// and checks whether the provided PC value belongs to the code ranges.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -17,7 +17,6 @@
|
|||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_suppressions.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
||||
|
@ -27,8 +26,8 @@ class LibIgnore {
|
|||
public:
|
||||
explicit LibIgnore(LinkerInitialized);
|
||||
|
||||
// Fetches all "called_from_lib" suppressions from the SuppressionContext.
|
||||
void Init(const SuppressionContext &supp);
|
||||
// Must be called during initialization.
|
||||
void AddIgnoredLibrary(const char *name_templ);
|
||||
|
||||
// Must be called after a new dynamic library is loaded.
|
||||
void OnLibraryLoaded(const char *name);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue