libsanitizer: merge from master

This commit is contained in:
Martin Liska 2020-10-16 10:03:04 +02:00
parent 4a70aa7a62
commit 0b997f6e07
130 changed files with 2840 additions and 1148 deletions

View file

@ -1,4 +1,4 @@
b638b63b99d66786cb37336292604a2ae3490cfd
51ff04567b2f8d06b2062bd3ed72eab2e93e4466
The first line of this file holds the git revision number of the
last merge done from the master library sources.

View file

@ -15,20 +15,21 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@ -50,6 +51,22 @@ static u32 RZSize2Log(u32 rz_size) {
static AsanAllocator &get_allocator();
static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
u32 tid, u32 stack) {
u64 context = tid;
context <<= 32;
context += stack;
atomic_store(atomic_context, context, memory_order_relaxed);
}
static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
u32 &tid, u32 &stack) {
u64 context = atomic_load(atomic_context, memory_order_relaxed);
stack = context;
context >>= 32;
tid = context;
}
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
@ -67,32 +84,59 @@ static AsanAllocator &get_allocator();
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
static const uptr kAllocBegMagic = 0xCC6E96B9;
struct ChunkHeader {
// 1-st 8 bytes.
u32 chunk_state : 8; // Must be first.
u32 alloc_tid : 24;
class ChunkHeader {
public:
atomic_uint8_t chunk_state;
u8 alloc_type : 2;
u8 lsan_tag : 2;
u32 free_tid : 24;
u32 from_memalign : 1;
u32 alloc_type : 2;
u32 rz_log : 3;
u32 lsan_tag : 2;
// 2-nd 8 bytes
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
// SecondaryAllocator's metadata.
u32 user_requested_size : 29;
// align < 8 -> 0
// else -> log2(min(align, 512)) - 2
u32 user_requested_alignment_log : 3;
u32 alloc_context_id;
u8 user_requested_alignment_log : 3;
private:
u16 user_requested_size_hi;
u32 user_requested_size_lo;
atomic_uint64_t alloc_context_id;
public:
uptr UsedSize() const {
uptr R = user_requested_size_lo;
if (sizeof(uptr) > sizeof(user_requested_size_lo))
R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo));
return R;
}
void SetUsedSize(uptr size) {
user_requested_size_lo = size;
if (sizeof(uptr) > sizeof(user_requested_size_lo)) {
size >>= (8 * sizeof(user_requested_size_lo));
user_requested_size_hi = size;
CHECK_EQ(user_requested_size_hi, size);
}
}
void SetAllocContext(u32 tid, u32 stack) {
AtomicContextStore(&alloc_context_id, tid, stack);
}
void GetAllocContext(u32 &tid, u32 &stack) const {
AtomicContextLoad(&alloc_context_id, tid, stack);
}
};
struct ChunkBase : ChunkHeader {
// Header2, intersects with user memory.
u32 free_context_id;
class ChunkBase : public ChunkHeader {
atomic_uint64_t free_context_id;
public:
void SetFreeContext(u32 tid, u32 stack) {
AtomicContextStore(&free_context_id, tid, stack);
}
void GetFreeContext(u32 &tid, u32 &stack) const {
AtomicContextLoad(&free_context_id, tid, stack);
}
};
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
@ -100,35 +144,50 @@ static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
COMPILER_CHECK(kChunkHeaderSize == 16);
COMPILER_CHECK(kChunkHeader2Size <= 16);
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
enum {
CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
CHUNK_ALLOCATED = 2,
CHUNK_QUARANTINE = 3
// Either just allocated by underlying allocator, but AsanChunk is not yet
// ready, or almost returned to undelying allocator and AsanChunk is already
// meaningless.
CHUNK_INVALID = 0,
// The chunk is allocated and not yet freed.
CHUNK_ALLOCATED = 2,
// The chunk was freed and put into quarantine zone.
CHUNK_QUARANTINE = 3,
};
struct AsanChunk: ChunkBase {
class AsanChunk : public ChunkBase {
public:
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
uptr UsedSize(bool locked_version = false) {
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
return *reinterpret_cast<uptr *>(
get_allocator().GetMetaData(AllocBeg(locked_version)));
bool AddrIsInside(uptr addr) {
return (addr >= Beg()) && (addr < Beg() + UsedSize());
}
void *AllocBeg(bool locked_version = false) {
if (from_memalign) {
if (locked_version)
return get_allocator().GetBlockBeginFastLocked(
reinterpret_cast<void *>(this));
return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
};
class LargeChunkHeader {
static constexpr uptr kAllocBegMagic =
FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
atomic_uintptr_t magic;
AsanChunk *chunk_header;
public:
AsanChunk *Get() const {
return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
? chunk_header
: nullptr;
}
void Set(AsanChunk *p) {
if (p) {
chunk_header = p;
atomic_store(&magic, kAllocBegMagic, memory_order_release);
return;
}
uptr old = kAllocBegMagic;
if (!atomic_compare_exchange_strong(&magic, &old, 0,
memory_order_release)) {
CHECK_EQ(old, kAllocBegMagic);
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
};
@ -139,22 +198,22 @@ struct QuarantineCallback {
}
void Recycle(AsanChunk *m) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
CHECK_NE(m->alloc_tid, kInvalidTid);
CHECK_NE(m->free_tid, kInvalidTid);
void *p = get_allocator().GetBlockBegin(m);
if (p != m) {
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
}
u8 old_chunk_state = CHUNK_QUARANTINE;
if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
CHUNK_INVALID, memory_order_acquire)) {
CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
}
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapLeftRedzoneMagic);
void *p = reinterpret_cast<void *>(m->AllocBeg());
if (p != m) {
uptr *alloc_magic = reinterpret_cast<uptr *>(p);
CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
alloc_magic[0] = 0;
CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
}
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
@ -299,23 +358,26 @@ struct Allocator {
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
uptr beg = ac->Beg();
uptr end = ac->Beg() + ac->UsedSize(true);
uptr chunk_end = chunk + allocated_size;
if (chunk < beg && beg < end && end <= chunk_end &&
ac->chunk_state == CHUNK_ALLOCATED) {
// Looks like a valid AsanChunk in use, poison redzones only.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
} else {
// This is either not an AsanChunk or freed or quarantined AsanChunk.
// In either case, poison everything.
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
CHUNK_ALLOCATED) {
uptr beg = ac->Beg();
uptr end = ac->Beg() + ac->UsedSize();
uptr chunk_end = chunk + allocated_size;
if (chunk < beg && beg < end && end <= chunk_end) {
// Looks like a valid AsanChunk in use, poison redzones only.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
return;
}
}
// This is either not an AsanChunk or freed or quarantined AsanChunk.
// In either case, poison everything.
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
void ReInitialize(const AllocatorOptions &options) {
@ -348,17 +410,18 @@ struct Allocator {
// -------------------- Helper methods. -------------------------
uptr ComputeRZLog(uptr user_requested_size) {
u32 rz_log =
user_requested_size <= 64 - 16 ? 0 :
user_requested_size <= 128 - 32 ? 1 :
user_requested_size <= 512 - 64 ? 2 :
user_requested_size <= 4096 - 128 ? 3 :
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
u32 rz_log = user_requested_size <= 64 - 16 ? 0
: user_requested_size <= 128 - 32 ? 1
: user_requested_size <= 512 - 64 ? 2
: user_requested_size <= 4096 - 128 ? 3
: user_requested_size <= (1 << 14) - 256 ? 4
: user_requested_size <= (1 << 15) - 512 ? 5
: user_requested_size <= (1 << 16) - 1024 ? 6
: 7;
u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
}
static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
@ -378,16 +441,23 @@ struct Allocator {
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
AsanChunk *right_chunk) {
if (!left_chunk)
return right_chunk;
if (!right_chunk)
return left_chunk;
// Prefer an allocated chunk over freed chunk and freed chunk
// over available chunk.
if (left_chunk->chunk_state != right_chunk->chunk_state) {
if (left_chunk->chunk_state == CHUNK_ALLOCATED)
u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
u8 right_state =
atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
if (left_state != right_state) {
if (left_state == CHUNK_ALLOCATED)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_ALLOCATED)
if (right_state == CHUNK_ALLOCATED)
return right_chunk;
if (left_chunk->chunk_state == CHUNK_QUARANTINE)
if (left_state == CHUNK_QUARANTINE)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_QUARANTINE)
if (right_state == CHUNK_QUARANTINE)
return right_chunk;
}
// Same chunk_state: choose based on offset.
@ -402,10 +472,11 @@ struct Allocator {
bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
AsanChunk *m = GetAsanChunkByAddr(addr);
if (!m) return false;
if (m->chunk_state != CHUNK_ALLOCATED) return false;
if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
return false;
if (m->Beg() != addr) return false;
atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
memory_order_relaxed);
AsanThread *t = GetCurrentThread();
m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
return true;
}
@ -442,13 +513,10 @@ struct Allocator {
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
needed_size += rz_size;
using_primary_allocator = false;
}
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
size > max_user_defined_malloc_size) {
@ -490,8 +558,7 @@ struct Allocator {
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
uptr user_beg = alloc_beg + rz_size;
if (!IsAligned(user_beg, alignment))
user_beg = RoundUpTo(user_beg, alignment);
uptr user_end = user_beg + size;
@ -499,31 +566,11 @@ struct Allocator {
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->alloc_type = alloc_type;
m->rz_log = rz_log;
u32 alloc_tid = t ? t->tid() : 0;
m->alloc_tid = alloc_tid;
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
m->free_tid = kInvalidTid;
m->from_memalign = user_beg != beg_plus_redzone;
if (alloc_beg != chunk_beg) {
CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
}
if (using_primary_allocator) {
CHECK(size);
m->user_requested_size = size;
CHECK(allocator.FromPrimary(allocated));
} else {
CHECK(!allocator.FromPrimary(allocated));
m->user_requested_size = SizeClassMap::kMaxSize;
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
meta[0] = size;
meta[1] = chunk_beg;
}
CHECK(size);
m->SetUsedSize(size);
m->user_requested_alignment_log = user_requested_alignment_log;
m->alloc_context_id = StackDepotPut(*stack);
m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY);
@ -556,7 +603,11 @@ struct Allocator {
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
if (alloc_beg != chunk_beg) {
CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
}
ASAN_MALLOC_HOOK(res, size);
return res;
}
@ -564,10 +615,10 @@ struct Allocator {
// Set quarantine flag if chunk is allocated, issue ASan error report on
// available and quarantined chunks. Return true on success, false otherwise.
bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
BufferedStackTrace *stack) {
BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
CHUNK_QUARANTINE,
memory_order_acquire)) {
ReportInvalidFree(ptr, old_chunk_state, stack);
@ -575,19 +626,18 @@ struct Allocator {
return false;
}
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
// It was a user data.
m->SetFreeContext(kInvalidTid, 0);
return true;
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlagIfAllocated.
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
CHUNK_QUARANTINE);
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(*stack);
m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
Flags &fl = *flags();
if (fl.max_free_fill_size > 0) {
@ -676,7 +726,7 @@ struct Allocator {
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
u8 chunk_state = m->chunk_state;
u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), nullptr);
@ -719,17 +769,24 @@ struct Allocator {
// -------------------------- Chunk lookup ----------------------
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
// Returns nullptr if AsanChunk is not yet initialized just after
// get_allocator().Allocate(), or is being destroyed just before
// get_allocator().Deallocate().
AsanChunk *GetAsanChunk(void *alloc_beg) {
if (!alloc_beg) return nullptr;
if (!allocator.FromPrimary(alloc_beg)) {
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
return m;
if (!alloc_beg)
return nullptr;
AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
if (!p) {
if (!allocator.FromPrimary(alloc_beg))
return nullptr;
p = reinterpret_cast<AsanChunk *>(alloc_beg);
}
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
if (alloc_magic[0] == kAllocBegMagic)
return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
return reinterpret_cast<AsanChunk *>(alloc_beg);
u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
// It does not guaranty that Chunk is initialized, but it's
// definitely not for any other value.
if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
return p;
return nullptr;
}
AsanChunk *GetAsanChunkByAddr(uptr p) {
@ -747,16 +804,16 @@ struct Allocator {
uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
if (m->chunk_state != CHUNK_ALLOCATED) return 0;
if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
return 0;
if (m->Beg() != p) return 0;
return m->UsedSize();
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
if (!m1) return AsanChunkView(m1);
sptr offset = 0;
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
// Search a bit to the left to see if there is another chunk.
@ -813,13 +870,16 @@ static AsanAllocator &get_allocator() {
}
bool AsanChunkView::IsValid() const {
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
CHUNK_INVALID;
}
bool AsanChunkView::IsAllocated() const {
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
CHUNK_ALLOCATED;
}
bool AsanChunkView::IsQuarantined() const {
return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
CHUNK_QUARANTINE;
}
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
@ -827,8 +887,23 @@ uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
u32 AsanChunkView::UserRequestedAlignment() const {
return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
}
uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
uptr AsanChunkView::AllocTid() const {
u32 tid = 0;
u32 stack = 0;
chunk_->GetAllocContext(tid, stack);
return tid;
}
uptr AsanChunkView::FreeTid() const {
if (!IsQuarantined())
return kInvalidTid;
u32 tid = 0;
u32 stack = 0;
chunk_->GetFreeContext(tid, stack);
return tid;
}
AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type;
}
@ -840,8 +915,21 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
u32 AsanChunkView::GetAllocStackId() const {
u32 tid = 0;
u32 stack = 0;
chunk_->GetAllocContext(tid, stack);
return stack;
}
u32 AsanChunkView::GetFreeStackId() const {
if (!IsQuarantined())
return 0;
u32 tid = 0;
u32 stack = 0;
chunk_->GetFreeContext(tid, stack);
return stack;
}
StackTrace AsanChunkView::GetAllocStack() const {
return GetStackTraceFromId(GetAllocStackId());
@ -1005,7 +1093,7 @@ void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
instance.SetRssLimitExceeded(limit_exceeded);
}
} // namespace __asan
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
@ -1022,34 +1110,36 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(__asan::get_allocator());
}
uptr PointsIntoChunk(void* p) {
uptr PointsIntoChunk(void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
__asan::CHUNK_ALLOCATED)
return 0;
if (m->AddrIsInside(addr, /*locked_version=*/true))
uptr chunk = m->Beg();
if (m->AddrIsInside(addr))
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
addr))
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
CHECK(m);
return m->Beg();
return m ? m->Beg() : 0;
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
: nullptr;
}
bool LsanMetadata::allocated() const {
if (!metadata_)
return false;
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->chunk_state == __asan::CHUNK_ALLOCATED;
return atomic_load(&m->chunk_state, memory_order_relaxed) ==
__asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
@ -1064,12 +1154,15 @@ void LsanMetadata::set_tag(ChunkTag value) {
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->UsedSize(/*locked_version=*/true);
return m->UsedSize();
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->alloc_context_id;
u32 tid = 0;
u32 stack = 0;
m->GetAllocContext(tid, stack);
return stack;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
@ -1079,15 +1172,16 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
if (!m) return kIgnoreObjectInvalid;
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
if (m->lsan_tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess;
} else {
if (!m ||
(atomic_load(&m->chunk_state, memory_order_acquire) !=
__asan::CHUNK_ALLOCATED) ||
!m->AddrIsInside(addr)) {
return kIgnoreObjectInvalid;
}
if (m->lsan_tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess;
}
} // namespace __lsan

View file

@ -15,10 +15,11 @@
#define ASAN_ALLOCATOR_H
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_platform.h"
namespace __asan {
@ -28,7 +29,7 @@ enum AllocType {
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
};
struct AsanChunk;
class AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
@ -132,6 +133,10 @@ typedef DefaultSizeClassMap SizeClassMap;
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
#elif SANITIZER_RISCV64
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryDenseSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
@ -171,7 +176,7 @@ template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 16;
static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
using AddressSpaceView = AddressSpaceViewTy;

View file

@ -26,10 +26,6 @@ namespace __asan {
Flags asan_flags_dont_use_directly; // use via flags().
static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS);
@ -108,14 +104,14 @@ void InitializeFlags() {
asan_parser.ParseString(asan_compile_def);
// Override from user-specified string.
const char *asan_default_options = MaybeCallAsanDefaultOptions();
const char *asan_default_options = __asan_default_options();
asan_parser.ParseString(asan_default_options);
#if CAN_SANITIZE_UB
const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
#if CAN_SANITIZE_LEAKS
const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions();
const char *lsan_default_options = __lsan_default_options();
lsan_parser.ParseString(lsan_default_options);
#endif

View file

@ -62,6 +62,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
UNIMPLEMENTED();
}
bool PlatformUnpoisonStacks() { return false; }
// We can use a plain thread_local variable for TSD.
static thread_local void *per_thread;
@ -196,6 +198,10 @@ bool HandleDlopenInit() {
return false;
}
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
__sanitizer_fill_shadow(p, size, 0, 0);
}
} // namespace __asan
// These are declared (in extern "C") by <zircon/sanitizer.h>.

View file

@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
}
}
// Check ODR violation for given global G by checking if it's already poisoned.
// We use this method in case compiler doesn't use private aliases for global
// variables.
static void CheckODRViolationViaPoisoning(const Global *g) {
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
// This check may not be enough: if the first global is much larger
// the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g));
}
}
}
// Clang provides two different ways for global variables protection:
// it can poison the global itself or its private alias. In former
// case we may poison same symbol multiple times, that can help us to
@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
// where two globals with the same name are defined in different modules.
if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g);
else
CheckODRViolationViaPoisoning(g);
}
if (CanPoisonMemory())
PoisonRedZones(*g);

View file

@ -13,9 +13,10 @@
#ifndef ASAN_INTERCEPTORS_H
#define ASAN_INTERCEPTORS_H
#include "asan_internal.h"
#include "asan_interceptors_memintrinsics.h"
#include "asan_internal.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
namespace __asan {
@ -80,12 +81,7 @@ void InitializePlatformInterceptors();
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
!SANITIZER_NETBSD
# define ASAN_INTERCEPT___CXA_THROW 1
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# else
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
# endif
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
# else
@ -116,8 +112,9 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT___STRDUP 0
#endif
#if SANITIZER_LINUX && (defined(__arm__) || defined(__aarch64__) || \
defined(__i386__) || defined(__x86_64__))
#if SANITIZER_LINUX && \
(defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
defined(__x86_64__) || SANITIZER_RISCV64)
# define ASAN_INTERCEPT_VFORK 1
#else
# define ASAN_INTERCEPT_VFORK 0

View file

@ -5,8 +5,9 @@
#define COMMON_INTERCEPTOR_HANDLE_VFORK __asan_handle_vfork
#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#endif
NO_EXEC_STACK_DIRECTIVE

View file

@ -173,8 +173,8 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char* __asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;

View file

@ -83,6 +83,16 @@ void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
// Unpoisons platform-specific stacks.
// Returns true if all stacks have been unpoisoned.
bool PlatformUnpoisonStacks();
// asan_rtl.cpp
// Unpoison a region containing a stack.
// Performs a sanity check and warns if the bounds don't look right.
// The warning contains the type string to identify the stack type.
void UnpoisonStack(uptr bottom, uptr top, const char *type);
// asan_thread.cpp
AsanThread *CreateMainThread();
@ -108,8 +118,6 @@ void AppendToErrorMessageBuffer(const char *buffer);
void *AsanDlSymNext(const char *sym);
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
// Returns `true` iff most of ASan init process should be skipped due to the
// ASan library being loaded via `dlopen()`. Platforms may perform any
// `dlopen()` specific initialization inside this function.

View file

@ -87,25 +87,12 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h
}
static void UnmapFromTo(uptr from, uptr to) {
CHECK(to >= from);
if (to == from) return;
uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
if (UNLIKELY(internal_iserror(res))) {
Report(
"ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
"%p\n",
to - from, to - from, from);
CHECK("unable to unmap" && 0);
}
}
#if ASAN_PREMAP_SHADOW
uptr FindPremappedShadowStart() {
uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
uptr granularity = GetMmapGranularity();
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
uptr premap_shadow_size = PremapShadowSize();
uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
// We may have mapped too much. Release extra memory.
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
@ -113,31 +100,26 @@ uptr FindPremappedShadowStart() {
#endif
uptr FindDynamicShadowStart() {
uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd);
#if ASAN_PREMAP_SHADOW
if (!PremapShadowFailed())
return FindPremappedShadowStart();
return FindPremappedShadowStart(shadow_size_bytes);
#endif
uptr granularity = GetMmapGranularity();
uptr alignment = granularity * 8;
uptr left_padding = granularity;
uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
uptr map_size = shadow_size + left_padding + alignment;
uptr map_start = (uptr)MmapNoAccess(map_size);
CHECK_NE(map_start, ~(uptr)0);
uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
UnmapFromTo(map_start, shadow_start - left_padding);
UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
return shadow_start;
return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
#if SANITIZER_ANDROID
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}

View file

@ -55,46 +55,8 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
uptr granularity = GetMmapGranularity();
uptr alignment = 8 * granularity;
uptr left_padding = granularity;
uptr space_size = kHighShadowEnd + left_padding;
uptr largest_gap_found = 0;
uptr max_occupied_addr = 0;
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
uptr shadow_start =
FindAvailableMemoryRange(space_size, alignment, granularity,
&largest_gap_found, &max_occupied_addr);
// If the shadow doesn't fit, restrict the address space to make it fit.
if (shadow_start == 0) {
VReport(
2,
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
largest_gap_found, max_occupied_addr);
uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
if (new_max_vm < max_occupied_addr) {
Report("Unable to find a memory range for dynamic shadow.\n");
Report(
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
"new_max_vm = %p\n",
space_size, largest_gap_found, max_occupied_addr, new_max_vm);
CHECK(0 && "cannot place shadow");
}
RestrictMemoryToMaxAddress(new_max_vm);
kHighMemEnd = new_max_vm - 1;
space_size = kHighShadowEnd + left_padding;
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
nullptr, nullptr);
if (shadow_start == 0) {
Report("Unable to find a memory range after restricting VM.\n");
CHECK(0 && "cannot place shadow after restricting vm");
}
}
CHECK_NE((uptr)0, shadow_start);
CHECK(IsAligned(shadow_start, alignment));
return shadow_start;
return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
// No-op. Mac does not support static linkage anyway.
@ -127,6 +89,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
op(globals, size / sizeof(__asan_global));
}
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}

View file

@ -34,7 +34,7 @@ static uptr last_dlsym_alloc_size_in_words;
static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
static INLINE bool IsInDlsymAllocPool(const void *ptr) {
static inline bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
}
@ -95,12 +95,12 @@ bool IsFromLocalPool(const void *ptr) {
}
#endif
static INLINE bool MaybeInDlsym() {
static inline bool MaybeInDlsym() {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && asan_init_is_running;
}
static INLINE bool UseLocalPool() {
static inline bool UseLocalPool() {
return EarlyMalloc() || MaybeInDlsym();
}
@ -120,19 +120,19 @@ static void *ReallocFromLocalPool(void *ptr, uptr size) {
}
INTERCEPTOR(void, free, void *ptr) {
GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
DeallocateFromLocalPool(ptr);
return;
}
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *ptr) {
GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
return;
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#endif // SANITIZER_INTERCEPT_CFREE

View file

@ -17,7 +17,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "asan_internal.h"
static INLINE bool EarlyMalloc() {
static inline bool EarlyMalloc() {
return SANITIZER_RTEMS &&
(!__asan::asan_inited || __asan::asan_init_is_running);
}

View file

@ -79,6 +79,20 @@
// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
//
// RISC-V has only 38 bits for task size
// Low mem size is set with kRiscv64_ShadowOffset64 in
// compiler-rt/lib/asan/asan_allocator.h and in
// llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp with
// kRiscv64_ShadowOffset64, High mem top border is set with
// GetMaxVirtualAddress() in
// compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
// Default Linux/RISCV64 Sv39/Sv48 mapping:
// || `[0x000820000000, 0x003fffffffff]` || HighMem ||
// || `[0x000124000000, 0x00081fffffff]` || HighShadow ||
// || `[0x000024000000, 0x000123ffffff]` || ShadowGap ||
// || `[0x000020000000, 0x000023ffffff]` || LowShadow ||
// || `[0x000000000000, 0x00001fffffff]` || LowMem ||
//
// Default Linux/AArch64 (42-bit VMA) mapping:
// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
@ -161,9 +175,10 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kRiscv64_ShadowOffset64 = 0x20000000;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
@ -206,6 +221,10 @@ static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
#else
# if SANITIZER_IOS
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# elif SANITIZER_MAC && defined(__aarch64__)
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
#elif SANITIZER_RISCV64
#define SHADOW_OFFSET kRiscv64_ShadowOffset64
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif defined(__powerpc64__)
@ -355,6 +374,8 @@ static inline bool AddrIsInShadowGap(uptr a) {
namespace __asan {
static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||

View file

@ -62,12 +62,6 @@ struct ShadowSegmentEndpoint {
}
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
if (Verbosity()) {

View file

@ -17,6 +17,7 @@
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h"
@ -24,6 +25,7 @@
#include "sanitizer_common/sanitizer_procmaps.h"
#include <pthread.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
@ -37,6 +39,32 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ReportDeadlySignal(sig);
}
bool PlatformUnpoisonStacks() {
stack_t signal_stack;
CHECK_EQ(0, sigaltstack(nullptr, &signal_stack));
uptr sigalt_bottom = (uptr)signal_stack.ss_sp;
uptr sigalt_top = (uptr)((char *)signal_stack.ss_sp + signal_stack.ss_size);
// If we're executing on the signal alternate stack AND the Linux flag
// SS_AUTODISARM was used, then we cannot get the signal alternate stack
// bounds from sigaltstack -- sigaltstack's output looks just as if no
// alternate stack has ever been set up.
// We're always unpoisoning the signal alternate stack to support jumping
// between the default stack and signal alternate stack.
if (signal_stack.ss_flags != SS_DISABLE)
UnpoisonStack(sigalt_bottom, sigalt_top, "sigalt");
if (signal_stack.ss_flags != SS_ONSTACK)
return false;
// Since we're on the signal altnerate stack, we cannot find the DEFAULT
// stack bottom using a local variable.
uptr default_bottom, tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr,
&tls_size);
UnpoisonStack(default_bottom, default_bottom + stack_size, "default");
return true;
}
// ---------------------- TSD ---------------- {{{1
#if SANITIZER_NETBSD && !ASAN_DYNAMIC

View file

@ -32,22 +32,8 @@ uptr PremapShadowSize() {
// Returns an address aligned to 8 pages, such that one page on the left and
// PremapShadowSize() bytes on the right of it are mapped r/o.
uptr PremapShadow() {
uptr granularity = GetMmapGranularity();
uptr alignment = granularity * 8;
uptr left_padding = granularity;
uptr shadow_size = PremapShadowSize();
uptr map_size = shadow_size + left_padding + alignment;
uptr map_start = (uptr)MmapNoAccess(map_size);
CHECK_NE(map_start, ~(uptr)0);
uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
uptr shadow_end = shadow_start + shadow_size;
internal_munmap(reinterpret_cast<void *>(map_start),
shadow_start - left_padding - map_start);
internal_munmap(reinterpret_cast<void *>(shadow_end),
map_start + map_size - shadow_end);
return shadow_start;
return MapDynamicShadow(PremapShadowSize(), /*mmap_alignment_scale*/ 3,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
bool PremapShadowFailed() {

View file

@ -411,7 +411,7 @@ static bool IsInvalidPointerPair(uptr a1, uptr a2) {
return false;
}
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
static inline void CheckForInvalidPointerPair(void *p1, void *p2) {
switch (flags()->detect_invalid_pointer_pairs) {
case 0:
return;

View file

@ -50,6 +50,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
void InitializeAsanInterceptors() {}
@ -64,6 +70,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
UNIMPLEMENTED();
}
bool PlatformUnpoisonStacks() { return false; }
void EarlyInit() {
// Provide early initialization of shadow memory so that
// instrumented code running before full initialzation will not

View file

@ -319,7 +319,7 @@ static void InitializeHighMemEnd() {
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
#endif // !SANITIZER_MYRIAD2
@ -551,22 +551,33 @@ class AsanInitializer {
static AsanInitializer asan_initializer;
#endif // ASAN_DYNAMIC
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
void NOINLINE __asan_handle_no_return() {
if (asan_init_is_running)
void UnpoisonStack(uptr bottom, uptr top, const char *type) {
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
if (top - bottom > kMaxExpectedCleanupSize) {
static bool reported_warning = false;
if (reported_warning)
return;
reported_warning = true;
Report(
"WARNING: ASan is ignoring requested __asan_handle_no_return: "
"stack type: %s top: %p; bottom %p; size: %p (%zd)\n"
"False positive error reports may follow\n"
"For details see "
"https://github.com/google/sanitizers/issues/189\n",
type, top, bottom, top - bottom, top - bottom);
return;
}
PoisonShadow(bottom, top - bottom, 0);
}
int local_stack;
AsanThread *curr_thread = GetCurrentThread();
uptr PageSize = GetPageSizeCached();
uptr top, bottom;
if (curr_thread) {
static void UnpoisonDefaultStack() {
uptr bottom, top;
if (AsanThread *curr_thread = GetCurrentThread()) {
int local_stack;
const uptr page_size = GetPageSizeCached();
top = curr_thread->stack_top();
bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
bottom = ((uptr)&local_stack - page_size) & ~(page_size - 1);
} else if (SANITIZER_RTEMS) {
// Give up On RTEMS.
return;
@ -578,25 +589,31 @@ void NOINLINE __asan_handle_no_return() {
&tls_size);
top = bottom + stack_size;
}
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
if (top - bottom > kMaxExpectedCleanupSize) {
static bool reported_warning = false;
if (reported_warning)
return;
reported_warning = true;
Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
"stack top: %p; bottom %p; size: %p (%zd)\n"
"False positive error reports may follow\n"
"For details see "
"https://github.com/google/sanitizers/issues/189\n",
top, bottom, top - bottom, top - bottom);
return;
}
PoisonShadow(bottom, top - bottom, 0);
UnpoisonStack(bottom, top, "default");
}
static void UnpoisonFakeStack() {
AsanThread *curr_thread = GetCurrentThread();
if (curr_thread && curr_thread->has_fake_stack())
curr_thread->fake_stack()->HandleNoReturn();
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
void NOINLINE __asan_handle_no_return() {
if (asan_init_is_running)
return;
if (!PlatformUnpoisonStacks())
UnpoisonDefaultStack();
UnpoisonFakeStack();
}
extern "C" void *__asan_extra_spill_area() {
AsanThread *t = GetCurrentThread();
CHECK(t);

View file

@ -22,24 +22,6 @@
namespace __asan {
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
CHECK_EQ((beg % GetMmapGranularity()), 0);
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
if (!MmapFixedSuperNoReserve(beg, size, name)) {
Report(
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n",
size);
Abort();
}
if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
}
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
@ -57,30 +39,13 @@ static void ProtectGap(uptr addr, uptr size) {
"unprotected gap shadow");
return;
}
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res) return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
if (addr == kZeroBaseShadowStart) {
uptr step = GetMmapGranularity();
while (size > step && addr < kZeroBaseMaxShadowStart) {
addr += step;
size -= step;
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res) return;
}
}
Report(
"ERROR: Failed to protect the shadow gap. "
"ASan cannot proceed correctly. ABORTING.\n");
DumpProcessMap();
Die();
__sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
kZeroBaseMaxShadowStart);
}
static void MaybeReportLinuxPIEBug() {
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__))
#if SANITIZER_LINUX && \
(defined(__x86_64__) || defined(__aarch64__) || SANITIZER_RISCV64)
Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n");
Report(
"See https://github.com/google/sanitizers/issues/856 for possible "
@ -99,8 +64,6 @@ void InitializeShadowMemory() {
// |kDefaultShadowSentinel|.
bool full_shadow_is_available = false;
if (shadow_start == kDefaultShadowSentinel) {
__asan_shadow_memory_dynamic_address = 0;
CHECK_EQ(0, kLowShadowBeg);
shadow_start = FindDynamicShadowStart();
if (SANITIZER_LINUX) full_shadow_is_available = true;
}

View file

@ -51,11 +51,6 @@ u32 GetMallocContextSize();
stack.Unwind(pc, bp, nullptr, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(sig) \
BufferedStackTrace stack; \
stack.Unwind((sig).pc, (sig).bp, (sig).context, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)

View file

@ -191,6 +191,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
// ---------------------- TSD ---------------- {{{
static bool tsd_key_inited = false;
@ -247,15 +253,8 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
uptr granularity = GetMmapGranularity();
uptr alignment = 8 * granularity;
uptr left_padding = granularity;
uptr space_size = kHighShadowEnd + left_padding;
uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
granularity, nullptr, nullptr);
CHECK_NE((uptr)0, shadow_start);
CHECK(IsAligned(shadow_start, alignment));
return shadow_start;
return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanCheckDynamicRTPrereqs() {}
@ -268,6 +267,8 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); }
bool PlatformUnpoisonStacks() { return false; }
#if SANITIZER_WINDOWS64
// Exception handler for dealing with shadow memory.
static LONG CALLBACK

View file

@ -188,8 +188,8 @@ const char *__asan_get_report_description(void);
/// \param addr Address to locate.
/// \param name Buffer to store the variable's name.
/// \param name_size Size in bytes of the variable's name buffer.
/// \param region_address [out] Address of the region.
/// \param region_size [out] Size of the region in bytes.
/// \param[out] region_address Address of the region.
/// \param[out] region_size Size of the region in bytes.
///
/// \returns Returns the category of the given pointer as a constant string.
const char *__asan_locate_address(void *addr, char *name, size_t name_size,
@ -204,7 +204,7 @@ const char *__asan_locate_address(void *addr, char *name, size_t name_size,
/// \param addr A heap address.
/// \param trace A buffer to store the stack trace.
/// \param size Size in bytes of the trace buffer.
/// \param thread_id [out] The thread ID of the address.
/// \param[out] thread_id The thread ID of the address.
///
/// \returns Returns the number of stored frames or 0 on error.
size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
@ -219,7 +219,7 @@ size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
/// \param addr A heap address.
/// \param trace A buffer to store the stack trace.
/// \param size Size in bytes of the trace buffer.
/// \param thread_id [out] The thread ID of the address.
/// \param[out] thread_id The thread ID of the address.
///
/// \returns Returns the number of stored frames or 0 on error.
size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
@ -228,8 +228,8 @@ size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
/// Gets the current shadow memory mapping (useful for calling from the
/// debugger).
///
/// \param shadow_scale [out] Shadow scale value.
/// \param shadow_offset [out] Offset value.
/// \param[out] shadow_scale Shadow scale value.
/// \param[out] shadow_offset Offset value.
void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
/// This is an internal function that is called to report an error. However,
@ -302,8 +302,8 @@ void *__asan_get_current_fake_stack(void);
///
/// \param fake_stack An opaque handler to a fake stack.
/// \param addr Address to test.
/// \param beg [out] Beginning of fake frame.
/// \param end [out] End of fake frame.
/// \param[out] beg Beginning of fake frame.
/// \param[out] end End of fake frame.
/// \returns Stack address or NULL.
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end);

View file

@ -320,7 +320,7 @@ void __sanitizer_print_memory_profile(size_t top_percent,
/// signal callback runs during the switch, it will not benefit from stack
/// use-after-return detection.
///
/// \param fake_stack_save [out] Fake stack save location.
/// \param[out] fake_stack_save Fake stack save location.
/// \param bottom Bottom address of stack.
/// \param size Size of stack in bytes.
void __sanitizer_start_switch_fiber(void **fake_stack_save,
@ -335,8 +335,8 @@ void __sanitizer_start_switch_fiber(void **fake_stack_save,
/// <c>__sanitizer_start_switch_fiber()</c>.
///
/// \param fake_stack_save Fake stack save location.
/// \param bottom_old [out] Bottom address of old stack.
/// \param size_old [out] Size of old stack in bytes.
/// \param[out] bottom_old Bottom address of old stack.
/// \param[out] size_old Size of old stack in bytes.
void __sanitizer_finish_switch_fiber(void *fake_stack_save,
const void **bottom_old,
size_t *size_old);

View file

@ -80,9 +80,11 @@ dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
size_t dfsan_get_label_count(void);
/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
/// with the application memory. Will work only if there are no other
/// threads executing DFSan-instrumented code concurrently.
/// Use this call to start over the taint tracking within the same procces.
/// with the application memory. Use this call to start over the taint tracking
/// within the same process.
///
/// Note: If another thread is working with tainted data during the flush, that
/// taint could still be written to shadow after the flush.
void dfsan_flush(void);
/// Sets a callback to be invoked on calls to write(). The callback is invoked

View file

@ -114,6 +114,9 @@ extern "C" {
call to __msan_scoped_disable_interceptor_checks. */
void __msan_scoped_enable_interceptor_checks(void);
void __msan_start_switch_fiber(const void *bottom, size_t size);
void __msan_finish_switch_fiber(const void **bottom_old, size_t *size_old);
#ifdef __cplusplus
} // extern "C"
#endif

View file

@ -20,8 +20,8 @@
// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
//
// Generated with: generate_netbsd_syscalls.awk
// Generated date: 2019-12-24
// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
// Generated date: 2020-09-10
// Generated from: syscalls.master,v 1.306 2020/08/14 00:53:16 riastradh Exp
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
@ -474,7 +474,12 @@
__sanitizer_syscall_pre_impl_dup2((long long)(from), (long long)(to))
#define __sanitizer_syscall_post_dup2(res, from, to) \
__sanitizer_syscall_post_impl_dup2(res, (long long)(from), (long long)(to))
/* syscall 91 has been skipped */
#define __sanitizer_syscall_pre_getrandom(buf, buflen, flags) \
__sanitizer_syscall_pre_impl_getrandom( \
(long long)(buf), (long long)(buflen), (long long)(flags))
#define __sanitizer_syscall_post_getrandom(res, buf, buflen, flags) \
__sanitizer_syscall_post_impl_getrandom( \
res, (long long)(buf), (long long)(buflen), (long long)(flags))
#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg) \
__sanitizer_syscall_pre_impl_fcntl((long long)(fd), (long long)(cmd), \
(long long)(arg))
@ -849,9 +854,31 @@
#define __sanitizer_syscall_post_sysarch(res, op, parms) \
__sanitizer_syscall_post_impl_sysarch(res, (long long)(op), \
(long long)(parms))
/* syscall 166 has been skipped */
/* syscall 167 has been skipped */
/* syscall 168 has been skipped */
#define __sanitizer_syscall_pre___futex(uaddr, op, val, timeout, uaddr2, val2, \
val3) \
__sanitizer_syscall_pre_impl___futex((long long)(uaddr), (long long)(op), \
(long long)(val), (long long)(timeout), \
(long long)(uaddr2), (long long)(val2), \
(long long)(val3))
#define __sanitizer_syscall_post___futex(res, uaddr, op, val, timeout, uaddr2, \
val2, val3) \
__sanitizer_syscall_post_impl___futex( \
res, (long long)(uaddr), (long long)(op), (long long)(val), \
(long long)(timeout), (long long)(uaddr2), (long long)(val2), \
(long long)(val3))
#define __sanitizer_syscall_pre___futex_set_robust_list(head, len) \
__sanitizer_syscall_pre_impl___futex_set_robust_list((long long)(head), \
(long long)(len))
#define __sanitizer_syscall_post___futex_set_robust_list(res, head, len) \
__sanitizer_syscall_post_impl___futex_set_robust_list( \
res, (long long)(head), (long long)(len))
#define __sanitizer_syscall_pre___futex_get_robust_list(lwpid, headp, lenp) \
__sanitizer_syscall_pre_impl___futex_get_robust_list( \
(long long)(lwpid), (long long)(headp), (long long)(lenp))
#define __sanitizer_syscall_post___futex_get_robust_list(res, lwpid, headp, \
lenp) \
__sanitizer_syscall_post_impl___futex_get_robust_list( \
res, (long long)(lwpid), (long long)(headp), (long long)(lenp))
#if !defined(_LP64)
#define __sanitizer_syscall_pre_compat_10_osemsys(which, a2, a3, a4, a5) \
__sanitizer_syscall_pre_impl_compat_10_osemsys( \
@ -2731,6 +2758,83 @@
__sanitizer_syscall_post_impl___fhstatvfs190( \
res, (long long)(fhp), (long long)(fh_size), (long long)(buf), \
(long long)(flags))
#define __sanitizer_syscall_pre___acl_get_link(path, type, aclp) \
__sanitizer_syscall_pre_impl___acl_get_link( \
(long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_get_link(res, path, type, aclp) \
__sanitizer_syscall_post_impl___acl_get_link( \
res, (long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre___acl_set_link(path, type, aclp) \
__sanitizer_syscall_pre_impl___acl_set_link( \
(long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_set_link(res, path, type, aclp) \
__sanitizer_syscall_post_impl___acl_set_link( \
res, (long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre___acl_delete_link(path, type) \
__sanitizer_syscall_pre_impl___acl_delete_link((long long)(path), \
(long long)(type))
#define __sanitizer_syscall_post___acl_delete_link(res, path, type) \
__sanitizer_syscall_post_impl___acl_delete_link(res, (long long)(path), \
(long long)(type))
#define __sanitizer_syscall_pre___acl_aclcheck_link(path, type, aclp) \
__sanitizer_syscall_pre_impl___acl_aclcheck_link( \
(long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_aclcheck_link(res, path, type, aclp) \
__sanitizer_syscall_post_impl___acl_aclcheck_link( \
res, (long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre___acl_get_file(path, type, aclp) \
__sanitizer_syscall_pre_impl___acl_get_file( \
(long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_get_file(res, path, type, aclp) \
__sanitizer_syscall_post_impl___acl_get_file( \
res, (long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre___acl_set_file(path, type, aclp) \
__sanitizer_syscall_pre_impl___acl_set_file( \
(long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_set_file(res, path, type, aclp) \
__sanitizer_syscall_post_impl___acl_set_file( \
res, (long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre___acl_get_fd(filedes, type, aclp) \
__sanitizer_syscall_pre_impl___acl_get_fd( \
(long long)(filedes), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_get_fd(res, filedes, type, aclp) \
__sanitizer_syscall_post_impl___acl_get_fd( \
res, (long long)(filedes), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre___acl_set_fd(filedes, type, aclp) \
__sanitizer_syscall_pre_impl___acl_set_fd( \
(long long)(filedes), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_set_fd(res, filedes, type, aclp) \
__sanitizer_syscall_post_impl___acl_set_fd( \
res, (long long)(filedes), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre___acl_delete_file(path, type) \
__sanitizer_syscall_pre_impl___acl_delete_file((long long)(path), \
(long long)(type))
#define __sanitizer_syscall_post___acl_delete_file(res, path, type) \
__sanitizer_syscall_post_impl___acl_delete_file(res, (long long)(path), \
(long long)(type))
#define __sanitizer_syscall_pre___acl_delete_fd(filedes, type) \
__sanitizer_syscall_pre_impl___acl_delete_fd((long long)(filedes), \
(long long)(type))
#define __sanitizer_syscall_post___acl_delete_fd(res, filedes, type) \
__sanitizer_syscall_post_impl___acl_delete_fd(res, (long long)(filedes), \
(long long)(type))
#define __sanitizer_syscall_pre___acl_aclcheck_file(path, type, aclp) \
__sanitizer_syscall_pre_impl___acl_aclcheck_file( \
(long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_aclcheck_file(res, path, type, aclp) \
__sanitizer_syscall_post_impl___acl_aclcheck_file( \
res, (long long)(path), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre___acl_aclcheck_fd(filedes, type, aclp) \
__sanitizer_syscall_pre_impl___acl_aclcheck_fd( \
(long long)(filedes), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_post___acl_aclcheck_fd(res, filedes, type, aclp) \
__sanitizer_syscall_post_impl___acl_aclcheck_fd( \
res, (long long)(filedes), (long long)(type), (long long)(aclp))
#define __sanitizer_syscall_pre_lpathconf(path, name) \
__sanitizer_syscall_pre_impl_lpathconf((long long)(path), (long long)(name))
#define __sanitizer_syscall_post_lpathconf(res, path, name) \
__sanitizer_syscall_post_impl_lpathconf(res, (long long)(path), \
(long long)(name))
/* Compat with older releases */
#define __sanitizer_syscall_pre_getvfsstat \
@ -3088,7 +3192,10 @@ void __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(long long res);
void __sanitizer_syscall_pre_impl_dup2(long long from, long long to);
void __sanitizer_syscall_post_impl_dup2(long long res, long long from,
long long to);
/* syscall 91 has been skipped */
void __sanitizer_syscall_pre_impl_getrandom(long long buf, long long buflen,
long long flags);
void __sanitizer_syscall_post_impl_getrandom(long long res, long long buf,
long long buflen, long long flags);
void __sanitizer_syscall_pre_impl_fcntl(long long fd, long long cmd,
long long arg);
void __sanitizer_syscall_post_impl_fcntl(long long res, long long fd,
@ -3380,9 +3487,26 @@ void __sanitizer_syscall_post_impl_compat_09_ouname(long long res,
void __sanitizer_syscall_pre_impl_sysarch(long long op, long long parms);
void __sanitizer_syscall_post_impl_sysarch(long long res, long long op,
long long parms);
/* syscall 166 has been skipped */
/* syscall 167 has been skipped */
/* syscall 168 has been skipped */
void __sanitizer_syscall_pre_impl___futex(long long uaddr, long long op,
long long val, long long timeout,
long long uaddr2, long long val2,
long long val3);
void __sanitizer_syscall_post_impl___futex(long long res, long long uaddr,
long long op, long long val,
long long timeout, long long uaddr2,
long long val2, long long val3);
void __sanitizer_syscall_pre_impl___futex_set_robust_list(long long head,
long long len);
void __sanitizer_syscall_post_impl___futex_set_robust_list(long long res,
long long head,
long long len);
void __sanitizer_syscall_pre_impl___futex_get_robust_list(long long lwpid,
long long headp,
long long lenp);
void __sanitizer_syscall_post_impl___futex_get_robust_list(long long res,
long long lwpid,
long long headp,
long long lenp);
#if !defined(_LP64)
void __sanitizer_syscall_pre_impl_compat_10_osemsys(long long which,
long long a2, long long a3,
@ -4802,6 +4926,75 @@ void __sanitizer_syscall_post_impl___fhstatvfs190(long long res, long long fhp,
long long fh_size,
long long buf,
long long flags);
void __sanitizer_syscall_pre_impl___acl_get_link(long long path, long long type,
long long aclp);
void __sanitizer_syscall_post_impl___acl_get_link(long long res, long long path,
long long type,
long long aclp);
void __sanitizer_syscall_pre_impl___acl_set_link(long long path, long long type,
long long aclp);
void __sanitizer_syscall_post_impl___acl_set_link(long long res, long long path,
long long type,
long long aclp);
void __sanitizer_syscall_pre_impl___acl_delete_link(long long path,
long long type);
void __sanitizer_syscall_post_impl___acl_delete_link(long long res,
long long path,
long long type);
void __sanitizer_syscall_pre_impl___acl_aclcheck_link(long long path,
long long type,
long long aclp);
void __sanitizer_syscall_post_impl___acl_aclcheck_link(long long res,
long long path,
long long type,
long long aclp);
void __sanitizer_syscall_pre_impl___acl_get_file(long long path, long long type,
long long aclp);
void __sanitizer_syscall_post_impl___acl_get_file(long long res, long long path,
long long type,
long long aclp);
void __sanitizer_syscall_pre_impl___acl_set_file(long long path, long long type,
long long aclp);
void __sanitizer_syscall_post_impl___acl_set_file(long long res, long long path,
long long type,
long long aclp);
void __sanitizer_syscall_pre_impl___acl_get_fd(long long filedes,
long long type, long long aclp);
void __sanitizer_syscall_post_impl___acl_get_fd(long long res,
long long filedes,
long long type, long long aclp);
void __sanitizer_syscall_pre_impl___acl_set_fd(long long filedes,
long long type, long long aclp);
void __sanitizer_syscall_post_impl___acl_set_fd(long long res,
long long filedes,
long long type, long long aclp);
void __sanitizer_syscall_pre_impl___acl_delete_file(long long path,
long long type);
void __sanitizer_syscall_post_impl___acl_delete_file(long long res,
long long path,
long long type);
void __sanitizer_syscall_pre_impl___acl_delete_fd(long long filedes,
long long type);
void __sanitizer_syscall_post_impl___acl_delete_fd(long long res,
long long filedes,
long long type);
void __sanitizer_syscall_pre_impl___acl_aclcheck_file(long long path,
long long type,
long long aclp);
void __sanitizer_syscall_post_impl___acl_aclcheck_file(long long res,
long long path,
long long type,
long long aclp);
void __sanitizer_syscall_pre_impl___acl_aclcheck_fd(long long filedes,
long long type,
long long aclp);
void __sanitizer_syscall_post_impl___acl_aclcheck_fd(long long res,
long long filedes,
long long type,
long long aclp);
void __sanitizer_syscall_pre_impl_lpathconf(long long path, long long name);
void __sanitizer_syscall_post_impl_lpathconf(long long res, long long path,
long long name);
#ifdef __cplusplus
} // extern "C"

View file

@ -73,7 +73,7 @@ static void InitializeFlags() {
RegisterCommonFlags(&parser);
// Override from user-specified string.
const char *lsan_default_options = MaybeCallLsanDefaultOptions();
const char *lsan_default_options = __lsan_default_options();
parser.ParseString(lsan_default_options);
parser.ParseStringFromEnv("LSAN_OPTIONS");

View file

@ -65,13 +65,16 @@ struct AP32 {
template <typename AddressSpaceView>
using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
#elif defined(__x86_64__) || defined(__powerpc64__)
#elif defined(__x86_64__) || defined(__powerpc64__) || defined(__s390x__)
# if SANITIZER_FUCHSIA
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# elif defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
#elif defined(__s390x__)
const uptr kAllocatorSpace = 0x40000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.

View file

@ -34,6 +34,7 @@ BlockingMutex global_mutex(LINKER_INITIALIZED);
Flags lsan_flags;
void DisableCounterUnderflow() {
if (common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
@ -107,10 +108,6 @@ void InitializeRootRegions() {
root_regions = new (placeholder) InternalMmapVector<RootRegion>();
}
const char *MaybeCallLsanDefaultOptions() {
return (&__lsan_default_options) ? __lsan_default_options() : "";
}
void InitCommonLsan() {
InitializeRootRegions();
if (common_flags()->detect_leaks) {
@ -221,10 +218,7 @@ static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end =
reinterpret_cast<uptr>(registers.data() + registers.size());
InternalMmapVector<uptr> registers;
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
@ -241,7 +235,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
uptr sp;
PtraceRegistersStatus have_registers =
suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
suspended_threads.GetRegistersAndSP(i, &registers, &sp);
if (have_registers != REGISTERS_AVAILABLE) {
Report("Unable to get registers from thread %d.\n", os_id);
// If unable to get SP, consider the entire stack to be reachable unless
@ -250,9 +244,13 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
sp = stack_begin;
}
if (flags()->use_registers && have_registers)
if (flags()->use_registers && have_registers) {
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end =
reinterpret_cast<uptr>(registers.data() + registers.size());
ScanRangeForPointers(registers_begin, registers_end, frontier,
"REGISTERS", kReachable);
}
if (flags()->use_stacks) {
LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
@ -892,12 +890,11 @@ int __lsan_do_recoverable_leak_check() {
return 0;
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char * __lsan_default_options() {
SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
return "";
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
return 0;

View file

@ -29,10 +29,10 @@
// To enable LeakSanitizer on a new architecture, one needs to implement the
// internal_clone function as well as (probably) adjust the TLS machinery for
// the new architecture inside the sanitizer library.
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
(SANITIZER_WORDSIZE == 64) && \
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
(SANITIZER_WORDSIZE == 64) && \
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
defined(__powerpc64__))
defined(__powerpc64__) || defined(__s390x__))
#define CAN_SANITIZE_LEAKS 1
#elif defined(__i386__) && \
(SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)

View file

@ -476,6 +476,15 @@ INTERCEPTOR(int, pthread_join, void *th, void **ret) {
return res;
}
INTERCEPTOR(int, pthread_detach, void *th) {
ENSURE_LSAN_INITED;
int tid = ThreadTid((uptr)th);
int res = REAL(pthread_detach)(th);
if (res == 0)
ThreadDetach(tid);
return res;
}
INTERCEPTOR(void, _exit, int status) {
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
REAL(_exit)(status);
@ -508,6 +517,7 @@ void InitializeInterceptors() {
LSAN_MAYBE_INTERCEPT_MALLINFO;
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_detach);
INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(_exit);

View file

@ -83,6 +83,11 @@ u32 ThreadTid(uptr uid) {
return thread_registry->FindThread(FindThreadByUid, (void *)uid);
}
void ThreadDetach(u32 tid) {
CHECK_NE(tid, kInvalidTid);
thread_registry->DetachThread(tid, /* arg */ nullptr);
}
void ThreadJoin(u32 tid) {
CHECK_NE(tid, kInvalidTid);
thread_registry->JoinThread(tid, /* arg */ nullptr);

View file

@ -46,6 +46,7 @@ void InitializeMainThread();
u32 ThreadCreate(u32 tid, uptr uid, bool detached, void *arg = nullptr);
void ThreadFinish();
void ThreadDetach(u32 tid);
void ThreadJoin(u32 tid);
u32 ThreadTid(uptr uid);

View file

@ -137,8 +137,14 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
namespace {
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
struct BlockHeader {
u64 magic;
};
} // namespace
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
@ -147,27 +153,28 @@ static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
}
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
if (size + sizeof(u64) < size)
uptr s = size + sizeof(BlockHeader);
if (s < size)
return nullptr;
void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
BlockHeader *p = (BlockHeader *)RawInternalAlloc(s, cache, alignment);
if (UNLIKELY(!p))
ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
ReportInternalAllocatorOutOfMemory(s);
p->magic = kBlockMagic;
return p + 1;
}
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
if (!addr)
return InternalAlloc(size, cache);
if (size + sizeof(u64) < size)
uptr s = size + sizeof(BlockHeader);
if (s < size)
return nullptr;
addr = (char*)addr - sizeof(u64);
size = size + sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
void *p = RawInternalRealloc(addr, size, cache);
BlockHeader *p = (BlockHeader *)addr - 1;
CHECK_EQ(kBlockMagic, p->magic);
p = (BlockHeader *)RawInternalRealloc(p, s, cache);
if (UNLIKELY(!p))
ReportInternalAllocatorOutOfMemory(size);
return (char*)p + sizeof(u64);
ReportInternalAllocatorOutOfMemory(s);
return p + 1;
}
void *InternalReallocArray(void *addr, uptr count, uptr size,
@ -198,10 +205,10 @@ void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (!addr)
return;
addr = (char*)addr - sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
((u64*)addr)[0] = 0;
RawInternalFree(addr, cache);
BlockHeader *p = (BlockHeader *)addr - 1;
CHECK_EQ(kBlockMagic, p->magic);
p->magic = 0;
RawInternalFree(p, cache);
}
// LowLevelAllocator

View file

@ -52,14 +52,14 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
inline u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
return (*state = *state * 1103515245 + 12345) >> 16;
}
INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
inline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
template<typename T>
INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
if (n <= 1) return;
u32 state = *rand_state;
for (u32 i = n - 1; i > 0; i--)

View file

@ -27,7 +27,7 @@ namespace __sanitizer {
void SetErrnoToENOMEM();
// A common errno setting logic shared by almost all sanitizer allocator APIs.
INLINE void *SetErrnoOnNull(void *ptr) {
inline void *SetErrnoOnNull(void *ptr) {
if (UNLIKELY(!ptr))
SetErrnoToENOMEM();
return ptr;
@ -41,7 +41,7 @@ INLINE void *SetErrnoOnNull(void *ptr) {
// two and that the size is a multiple of alignment for POSIX implementation,
// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
// of alignment.
INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
inline bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
#if SANITIZER_POSIX
return alignment != 0 && IsPowerOfTwo(alignment) &&
(size & (alignment - 1)) == 0;
@ -52,13 +52,13 @@ INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
// Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *).
INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
inline bool CheckPosixMemalignAlignment(uptr alignment) {
return alignment != 0 && IsPowerOfTwo(alignment) &&
(alignment % sizeof(void *)) == 0;
}
// Returns true if calloc(size, n) call overflows on size*n calculation.
INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
inline bool CheckForCallocOverflow(uptr size, uptr n) {
if (!size)
return false;
uptr max = (uptr)-1L;
@ -67,7 +67,7 @@ INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of page_size.
INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) {
inline bool CheckForPvallocOverflow(uptr size, uptr page_size) {
return RoundUpTo(size, page_size) < size;
}

View file

@ -153,6 +153,7 @@ class SizeClassAllocator32 {
}
void *GetMetaData(const void *p) {
CHECK(kMetadataSize);
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);

View file

@ -186,13 +186,13 @@ class SizeClassAllocator64 {
void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p);
if (class_id >= kNumClasses) return nullptr;
uptr size = ClassIdToSize(class_id);
if (!size) return nullptr;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = GetRegionBegin(p);
uptr beg = chunk_idx * size;
uptr next_beg = beg + size;
if (class_id >= kNumClasses) return nullptr;
const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg);
@ -207,6 +207,7 @@ class SizeClassAllocator64 {
static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
void *GetMetaData(const void *p) {
CHECK(kMetadataSize);
uptr class_id = GetSizeClass(p);
uptr size = ClassIdToSize(class_id);
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);

View file

@ -18,8 +18,8 @@
// (currently, 32 bits and internal allocator).
class LargeMmapAllocatorPtrArrayStatic {
public:
INLINE void *Init() { return &p_[0]; }
INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
inline void *Init() { return &p_[0]; }
inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
private:
static const int kMaxNumChunks = 1 << 15;
uptr p_[kMaxNumChunks];
@ -31,14 +31,14 @@ class LargeMmapAllocatorPtrArrayStatic {
// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
class LargeMmapAllocatorPtrArrayDynamic {
public:
INLINE void *Init() {
inline void *Init() {
uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
SecondaryAllocatorName);
CHECK(p);
return reinterpret_cast<void*>(p);
}
INLINE void EnsureSpace(uptr n) {
inline void EnsureSpace(uptr n) {
CHECK_LT(n, kMaxNumChunks);
DCHECK(n <= n_reserved_);
if (UNLIKELY(n == n_reserved_)) {

View file

@ -72,12 +72,12 @@ namespace __sanitizer {
// Clutter-reducing helpers.
template<typename T>
INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
inline typename T::Type atomic_load_relaxed(const volatile T *a) {
return atomic_load(a, memory_order_relaxed);
}
template<typename T>
INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
inline void atomic_store_relaxed(volatile T *a, typename T::Type v) {
atomic_store(a, v, memory_order_relaxed);
}

View file

@ -34,16 +34,16 @@ namespace __sanitizer {
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
INLINE void atomic_signal_fence(memory_order) {
inline void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
INLINE void atomic_thread_fence(memory_order) {
inline void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
template<typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *a,
inline typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -51,7 +51,7 @@ INLINE typename T::Type atomic_fetch_add(volatile T *a,
}
template<typename T>
INLINE typename T::Type atomic_fetch_sub(volatile T *a,
inline typename T::Type atomic_fetch_sub(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -59,7 +59,7 @@ INLINE typename T::Type atomic_fetch_sub(volatile T *a,
}
template<typename T>
INLINE typename T::Type atomic_exchange(volatile T *a,
inline typename T::Type atomic_exchange(volatile T *a,
typename T::Type v, memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
@ -71,7 +71,7 @@ INLINE typename T::Type atomic_exchange(volatile T *a,
}
template <typename T>
INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typedef typename T::Type Type;
@ -84,7 +84,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
}
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {

View file

@ -37,7 +37,7 @@ static struct {
} __attribute__((aligned(32))) lock = {0, {0}};
template <>
INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
@ -55,14 +55,14 @@ INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
}
template <>
INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
inline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
return atomic_fetch_add(ptr, -val, mo);
}
template <>
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type *cmp,
atomic_uint64_t::Type xchg,
memory_order mo) {
@ -87,7 +87,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
}
template <>
INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
@ -100,7 +100,7 @@ INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
}
template <>
INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));

View file

@ -17,12 +17,12 @@
namespace __sanitizer {
INLINE void proc_yield(int cnt) {
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
INLINE typename T::Type atomic_load(
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@ -60,7 +60,7 @@ INLINE typename T::Type atomic_load(
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));

View file

@ -16,7 +16,7 @@
namespace __sanitizer {
INLINE void proc_yield(int cnt) {
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
@ -24,7 +24,7 @@ INLINE void proc_yield(int cnt) {
}
template<typename T>
INLINE typename T::Type atomic_load(
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@ -70,7 +70,7 @@ INLINE typename T::Type atomic_load(
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));

View file

@ -54,21 +54,21 @@ extern "C" long long _InterlockedExchangeAdd64(long long volatile *Addend,
namespace __sanitizer {
INLINE void atomic_signal_fence(memory_order) {
inline void atomic_signal_fence(memory_order) {
_ReadWriteBarrier();
}
INLINE void atomic_thread_fence(memory_order) {
inline void atomic_thread_fence(memory_order) {
_mm_mfence();
}
INLINE void proc_yield(int cnt) {
inline void proc_yield(int cnt) {
for (int i = 0; i < cnt; i++)
_mm_pause();
}
template<typename T>
INLINE typename T::Type atomic_load(
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@ -86,7 +86,7 @@ INLINE typename T::Type atomic_load(
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
@ -102,7 +102,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
atomic_thread_fence(memory_order_seq_cst);
}
INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
inline u32 atomic_fetch_add(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -110,7 +110,7 @@ INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
(long)v);
}
INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
inline uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -123,7 +123,7 @@ INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
#endif
}
INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
inline u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -131,7 +131,7 @@ INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
-(long)v);
}
INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
inline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -144,28 +144,28 @@ INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
#endif
}
INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
inline u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
}
INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
inline u16 atomic_exchange(volatile atomic_uint16_t *a,
u16 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
}
INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
inline u32 atomic_exchange(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
u8 *cmp,
u8 xchgv,
memory_order mo) {
@ -191,7 +191,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp,
uptr xchg,
memory_order mo) {
@ -204,7 +204,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
u16 *cmp,
u16 xchg,
memory_order mo) {
@ -217,7 +217,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
u32 *cmp,
u32 xchg,
memory_order mo) {
@ -230,7 +230,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
u64 *cmp,
u64 xchg,
memory_order mo) {
@ -244,7 +244,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
}
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {

View file

@ -53,25 +53,25 @@ const u64 kExternalPCBit = 1ULL << 60;
extern const char *SanitizerToolName; // Can be changed by the tool.
extern atomic_uint32_t current_verbosity;
INLINE void SetVerbosity(int verbosity) {
inline void SetVerbosity(int verbosity) {
atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
}
INLINE int Verbosity() {
inline int Verbosity() {
return atomic_load(&current_verbosity, memory_order_relaxed);
}
#if SANITIZER_ANDROID
INLINE uptr GetPageSize() {
inline uptr GetPageSize() {
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
return 4096;
}
INLINE uptr GetPageSizeCached() {
inline uptr GetPageSizeCached() {
return 4096;
}
#else
uptr GetPageSize();
extern uptr PageSizeCached;
INLINE uptr GetPageSizeCached() {
inline uptr GetPageSizeCached() {
if (!PageSizeCached)
PageSizeCached = GetPageSize();
return PageSizeCached;
@ -91,7 +91,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
// Memory management
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type, /*raw_report*/ true);
}
void UnmapOrDie(void *addr, uptr size);
@ -121,6 +121,31 @@ bool MprotectReadOnly(uptr addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
#if SANITIZER_LINUX
// Unmap memory. Currently only used on Linux.
void UnmapFromTo(uptr from, uptr to);
#endif
// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
// be aligned to the mmap granularity * 2^shadow_scale, or to
// 2^min_shadow_base_alignment if that is larger. The returned address will
// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
// shadow_size_bytes bytes on the right, which on linux is mapped no access.
// The high_mem_end may be updated if the original shadow size doesn't fit.
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment, uptr &high_mem_end);
// Reserve memory range [beg, end]. If madvise_shadow is true then apply
// madvise (e.g. hugepages, core dumping) requested by options.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
bool madvise_shadow = true);
// Protect size bytes of memory starting at addr. Also try to protect
// several pages at the start of the address space as specified by
// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
uptr zero_base_max_shadow_start);
// Find an available address space.
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found, uptr *max_occupied_addr);
@ -349,7 +374,7 @@ unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
}
#endif
INLINE uptr MostSignificantSetBitIndex(uptr x) {
inline uptr MostSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up;
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
@ -366,7 +391,7 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
return up;
}
INLINE uptr LeastSignificantSetBitIndex(uptr x) {
inline uptr LeastSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up;
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
@ -383,11 +408,11 @@ INLINE uptr LeastSignificantSetBitIndex(uptr x) {
return up;
}
INLINE bool IsPowerOfTwo(uptr x) {
inline bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
INLINE uptr RoundUpToPowerOfTwo(uptr size) {
inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
if (IsPowerOfTwo(size)) return size;
@ -397,20 +422,20 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
return 1ULL << (up + 1);
}
INLINE uptr RoundUpTo(uptr size, uptr boundary) {
inline uptr RoundUpTo(uptr size, uptr boundary) {
RAW_CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
INLINE uptr RoundDownTo(uptr x, uptr boundary) {
inline uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
INLINE bool IsAligned(uptr a, uptr alignment) {
inline bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
INLINE uptr Log2(uptr x) {
inline uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
return LeastSignificantSetBitIndex(x);
}
@ -426,14 +451,14 @@ template<class T> void Swap(T& a, T& b) {
}
// Char handling
INLINE bool IsSpace(int c) {
inline bool IsSpace(int c) {
return (c == ' ') || (c == '\n') || (c == '\t') ||
(c == '\f') || (c == '\r') || (c == '\v');
}
INLINE bool IsDigit(int c) {
inline bool IsDigit(int c) {
return (c >= '0') && (c <= '9');
}
INLINE int ToLower(int c) {
inline int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
@ -649,7 +674,8 @@ enum ModuleArch {
kModuleArchARMV7,
kModuleArchARMV7S,
kModuleArchARMV7K,
kModuleArchARM64
kModuleArchARM64,
kModuleArchRISCV64
};
// Opens the file 'file_name" and reads up to 'max_len' bytes.
@ -693,6 +719,8 @@ inline const char *ModuleArchToString(ModuleArch arch) {
return "armv7k";
case kModuleArchARM64:
return "arm64";
case kModuleArchRISCV64:
return "riscv64";
}
CHECK(0 && "Invalid module arch");
return "";
@ -815,15 +843,15 @@ void WriteToSyslog(const char *buffer);
#if SANITIZER_MAC || SANITIZER_WIN_TRACE
void LogFullErrorReport(const char *buffer);
#else
INLINE void LogFullErrorReport(const char *buffer) {}
inline void LogFullErrorReport(const char *buffer) {}
#endif
#if SANITIZER_LINUX || SANITIZER_MAC
void WriteOneLineToSyslog(const char *s);
void LogMessageOnPrintf(const char *str);
#else
INLINE void WriteOneLineToSyslog(const char *s) {}
INLINE void LogMessageOnPrintf(const char *str) {}
inline void WriteOneLineToSyslog(const char *s) {}
inline void LogMessageOnPrintf(const char *str) {}
#endif
#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
@ -831,21 +859,21 @@ INLINE void LogMessageOnPrintf(const char *str) {}
void AndroidLogInit();
void SetAbortMessage(const char *);
#else
INLINE void AndroidLogInit() {}
inline void AndroidLogInit() {}
// FIXME: MacOS implementation could use CRSetCrashLogMessage.
INLINE void SetAbortMessage(const char *) {}
inline void SetAbortMessage(const char *) {}
#endif
#if SANITIZER_ANDROID
void SanitizerInitializeUnwinder();
AndroidApiLevel AndroidGetApiLevel();
#else
INLINE void AndroidLogWrite(const char *buffer_unused) {}
INLINE void SanitizerInitializeUnwinder() {}
INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
inline void AndroidLogWrite(const char *buffer_unused) {}
inline void SanitizerInitializeUnwinder() {}
inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
#endif
INLINE uptr GetPthreadDestructorIterations() {
inline uptr GetPthreadDestructorIterations() {
#if SANITIZER_ANDROID
return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
#elif SANITIZER_POSIX
@ -951,7 +979,7 @@ RunOnDestruction<Fn> at_scope_exit(Fn fn) {
#if SANITIZER_LINUX && SANITIZER_S390_64
void AvoidCVE_2016_2143();
#else
INLINE void AvoidCVE_2016_2143() {}
inline void AvoidCVE_2016_2143() {}
#endif
struct StackDepotStats {
@ -972,12 +1000,26 @@ bool GetRandom(void *buffer, uptr length, bool blocking = true);
// Returns the number of logical processors on the system.
u32 GetNumberOfCPUs();
extern u32 NumberOfCPUsCached;
INLINE u32 GetNumberOfCPUsCached() {
inline u32 GetNumberOfCPUsCached() {
if (!NumberOfCPUsCached)
NumberOfCPUsCached = GetNumberOfCPUs();
return NumberOfCPUsCached;
}
template <typename T>
class ArrayRef {
public:
ArrayRef() {}
ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
T *begin() { return begin_; }
T *end() { return end_; }
private:
T *begin_ = nullptr;
T *end_ = nullptr;
};
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,

View file

@ -445,8 +445,10 @@ INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
if (common_flags()->intercept_strcmp) {
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
}
int result = CharCmpX(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,
s2, result);
@ -2199,6 +2201,24 @@ INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
#define INIT_CLOCK_GETTIME
#endif
#if SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
INTERCEPTOR(int, clock_getcpuclockid, pid_t pid,
__sanitizer_clockid_t *clockid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_getcpuclockid, pid, clockid);
int res = REAL(clock_getcpuclockid)(pid, clockid);
if (!res && clockid) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid);
}
return res;
}
#define INIT_CLOCK_GETCPUCLOCKID \
COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid);
#else
#define INIT_CLOCK_GETCPUCLOCKID
#endif
#if SANITIZER_INTERCEPT_GETITIMER
INTERCEPTOR(int, getitimer, int which, void *curr_value) {
void *ctx;
@ -3092,6 +3112,34 @@ INTERCEPTOR(int, sendmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,
#define INIT_SENDMMSG
#endif
#if SANITIZER_INTERCEPT_SYSMSG
INTERCEPTOR(int, msgsnd, int msqid, const void *msgp, SIZE_T msgsz,
int msgflg) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, msgsnd, msqid, msgp, msgsz, msgflg);
if (msgp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, msgp, sizeof(long) + msgsz);
int res = REAL(msgsnd)(msqid, msgp, msgsz, msgflg);
return res;
}
INTERCEPTOR(SSIZE_T, msgrcv, int msqid, void *msgp, SIZE_T msgsz,
long msgtyp, int msgflg) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, msgrcv, msqid, msgp, msgsz, msgtyp, msgflg);
SSIZE_T len = REAL(msgrcv)(msqid, msgp, msgsz, msgtyp, msgflg);
if (len != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msgp, sizeof(long) + len);
return len;
}
#define INIT_SYSMSG \
COMMON_INTERCEPT_FUNCTION(msgsnd); \
COMMON_INTERCEPT_FUNCTION(msgrcv);
#else
#define INIT_SYSMSG
#endif
#if SANITIZER_INTERCEPT_GETPEERNAME
INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
void *ctx;
@ -4039,6 +4087,41 @@ INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
#define INIT_SIGSETOPS
#endif
#if SANITIZER_INTERCEPT_SIGSET_LOGICOPS
INTERCEPTOR(int, sigandset, __sanitizer_sigset_t *dst,
__sanitizer_sigset_t *src1, __sanitizer_sigset_t *src2) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigandset, dst, src1, src2);
if (src1)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src1, sizeof(*src1));
if (src2)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src2, sizeof(*src2));
int res = REAL(sigandset)(dst, src1, src2);
if (!res && dst)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
return res;
}
INTERCEPTOR(int, sigorset, __sanitizer_sigset_t *dst,
__sanitizer_sigset_t *src1, __sanitizer_sigset_t *src2) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigorset, dst, src1, src2);
if (src1)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src1, sizeof(*src1));
if (src2)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src2, sizeof(*src2));
int res = REAL(sigorset)(dst, src1, src2);
if (!res && dst)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
return res;
}
#define INIT_SIGSET_LOGICOPS \
COMMON_INTERCEPT_FUNCTION(sigandset); \
COMMON_INTERCEPT_FUNCTION(sigorset);
#else
#define INIT_SIGSET_LOGICOPS
#endif
#if SANITIZER_INTERCEPT_SIGPENDING
INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
void *ctx;
@ -4792,6 +4875,34 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
#define INIT_TMPNAM_R
#endif
#if SANITIZER_INTERCEPT_PTSNAME
INTERCEPTOR(char *, ptsname, int fd) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ptsname, fd);
char *res = REAL(ptsname)(fd);
if (res != nullptr)
COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
#define INIT_PTSNAME COMMON_INTERCEPT_FUNCTION(ptsname);
#else
#define INIT_PTSNAME
#endif
#if SANITIZER_INTERCEPT_PTSNAME_R
INTERCEPTOR(int, ptsname_r, int fd, char *name, SIZE_T namesize) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ptsname_r, fd, name, namesize);
int res = REAL(ptsname_r)(fd, name, namesize);
if (res == 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
return res;
}
#define INIT_PTSNAME_R COMMON_INTERCEPT_FUNCTION(ptsname_r);
#else
#define INIT_PTSNAME_R
#endif
#if SANITIZER_INTERCEPT_TTYNAME
INTERCEPTOR(char *, ttyname, int fd) {
void *ctx;
@ -5763,6 +5874,79 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
#define INIT_XDR
#endif // SANITIZER_INTERCEPT_XDR
#if SANITIZER_INTERCEPT_XDRREC
typedef int (*xdrrec_cb)(char*, char*, int);
struct XdrRecWrapper {
char *handle;
xdrrec_cb rd, wr;
};
typedef AddrHashMap<XdrRecWrapper *, 11> XdrRecWrapMap;
static XdrRecWrapMap *xdrrec_wrap_map;
static int xdrrec_wr_wrap(char *handle, char *buf, int count) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(buf, count);
XdrRecWrapper *wrap = (XdrRecWrapper *)handle;
return wrap->wr(wrap->handle, buf, count);
}
static int xdrrec_rd_wrap(char *handle, char *buf, int count) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
XdrRecWrapper *wrap = (XdrRecWrapper *)handle;
return wrap->rd(wrap->handle, buf, count);
}
// This doesn't apply to the solaris version as it has a different function
// signature.
INTERCEPTOR(void, xdrrec_create, __sanitizer_XDR *xdr, unsigned sndsize,
unsigned rcvsize, char *handle, int (*rd)(char*, char*, int),
int (*wr)(char*, char*, int)) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, xdrrec_create, xdr, sndsize, rcvsize,
handle, rd, wr);
COMMON_INTERCEPTOR_READ_RANGE(ctx, &xdr->x_op, sizeof xdr->x_op);
// We can't allocate a wrapper on the stack, as the handle is used outside
// this stack frame. So we put it on the heap, and keep track of it with
// the HashMap (keyed by x_private). When we later need to xdr_destroy,
// we can index the map, free the wrapper, and then clean the map entry.
XdrRecWrapper *wrap_data =
(XdrRecWrapper *)InternalAlloc(sizeof(XdrRecWrapper));
wrap_data->handle = handle;
wrap_data->rd = rd;
wrap_data->wr = wr;
if (wr)
wr = xdrrec_wr_wrap;
if (rd)
rd = xdrrec_rd_wrap;
handle = (char *)wrap_data;
REAL(xdrrec_create)(xdr, sndsize, rcvsize, handle, rd, wr);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdr, sizeof *xdr);
XdrRecWrapMap::Handle wrap(xdrrec_wrap_map, xdr->x_private, false, true);
*wrap = wrap_data;
}
// We have to intercept this to be able to free wrapper memory;
// otherwise it's not necessary.
INTERCEPTOR(void, xdr_destroy, __sanitizer_XDR *xdr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, xdr_destroy, xdr);
XdrRecWrapMap::Handle wrap(xdrrec_wrap_map, xdr->x_private, true);
InternalFree(*wrap);
REAL(xdr_destroy)(xdr);
}
#define INIT_XDRREC_LINUX \
static u64 xdrrec_wrap_mem[sizeof(XdrRecWrapMap) / sizeof(u64) + 1]; \
xdrrec_wrap_map = new ((void *)&xdrrec_wrap_mem) XdrRecWrapMap(); \
COMMON_INTERCEPT_FUNCTION(xdrrec_create); \
COMMON_INTERCEPT_FUNCTION(xdr_destroy);
#else
#define INIT_XDRREC_LINUX
#endif
#if SANITIZER_INTERCEPT_TSEARCH
INTERCEPTOR(void *, tsearch, void *key, void **rootp,
int (*compar)(const void *, const void *)) {
@ -7271,23 +7455,26 @@ INTERCEPTOR(int, setttyentpath, char *path) {
#endif
#if SANITIZER_INTERCEPT_PROTOENT
static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
SIZE_T pp_size = 1; // One handles the trailing \0
for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
pp_size * sizeof(char **));
}
INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotoent);
struct __sanitizer_protoent *p = REAL(getprotoent)();
if (p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
SIZE_T pp_size = 1; // One handles the trailing \0
for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
pp_size * sizeof(char **));
}
if (p)
write_protoent(ctx, p);
return p;
}
@ -7297,19 +7484,8 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) {
if (name)
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
struct __sanitizer_protoent *p = REAL(getprotobyname)(name);
if (p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
SIZE_T pp_size = 1; // One handles the trailing \0
for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
pp_size * sizeof(char **));
}
if (p)
write_protoent(ctx, p);
return p;
}
@ -7317,19 +7493,8 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobynumber, int proto) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber, proto);
struct __sanitizer_protoent *p = REAL(getprotobynumber)(proto);
if (p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
SIZE_T pp_size = 1; // One handles the trailing \0
for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
pp_size * sizeof(char **));
}
if (p)
write_protoent(ctx, p);
return p;
}
#define INIT_PROTOENT \
@ -7340,6 +7505,58 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobynumber, int proto) {
#define INIT_PROTOENT
#endif
#if SANITIZER_INTERCEPT_PROTOENT_R
INTERCEPTOR(int, getprotoent_r, struct __sanitizer_protoent *result_buf,
char *buf, SIZE_T buflen, struct __sanitizer_protoent **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotoent_r, result_buf, buf, buflen,
result);
int res = REAL(getprotoent_r)(result_buf, buf, buflen, result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
if (!res && *result)
write_protoent(ctx, *result);
return res;
}
INTERCEPTOR(int, getprotobyname_r, const char *name,
struct __sanitizer_protoent *result_buf, char *buf, SIZE_T buflen,
struct __sanitizer_protoent **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname_r, name, result_buf, buf,
buflen, result);
if (name)
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
int res = REAL(getprotobyname_r)(name, result_buf, buf, buflen, result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
if (!res && *result)
write_protoent(ctx, *result);
return res;
}
INTERCEPTOR(int, getprotobynumber_r, int num,
struct __sanitizer_protoent *result_buf, char *buf,
SIZE_T buflen, struct __sanitizer_protoent **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber_r, num, result_buf, buf,
buflen, result);
int res = REAL(getprotobynumber_r)(num, result_buf, buf, buflen, result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
if (!res && *result)
write_protoent(ctx, *result);
return res;
}
#define INIT_PROTOENT_R \
COMMON_INTERCEPT_FUNCTION(getprotoent_r); \
COMMON_INTERCEPT_FUNCTION(getprotobyname_r); \
COMMON_INTERCEPT_FUNCTION(getprotobynumber_r);
#else
#define INIT_PROTOENT_R
#endif
#if SANITIZER_INTERCEPT_NETENT
INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
void *ctx;
@ -9676,12 +9893,25 @@ INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
}
}
qsort_compar_f old_compar = qsort_compar;
qsort_compar = compar;
SIZE_T old_size = qsort_size;
qsort_size = size;
// Handle qsort() implementations that recurse using an
// interposable function call:
bool already_wrapped = compar == wrapped_qsort_compar;
if (already_wrapped) {
// This case should only happen if the qsort() implementation calls itself
// using a preemptible function call (e.g. the FreeBSD libc version).
// Check that the size and comparator arguments are as expected.
CHECK_NE(compar, qsort_compar);
CHECK_EQ(qsort_size, size);
} else {
qsort_compar = compar;
qsort_size = size;
}
REAL(qsort)(base, nmemb, size, wrapped_qsort_compar);
qsort_compar = old_compar;
qsort_size = old_size;
if (!already_wrapped) {
qsort_compar = old_compar;
qsort_size = old_size;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
}
#define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
@ -9714,12 +9944,25 @@ INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
}
}
qsort_r_compar_f old_compar = qsort_r_compar;
qsort_r_compar = compar;
SIZE_T old_size = qsort_r_size;
qsort_r_size = size;
// Handle qsort_r() implementations that recurse using an
// interposable function call:
bool already_wrapped = compar == wrapped_qsort_r_compar;
if (already_wrapped) {
// This case should only happen if the qsort() implementation calls itself
// using a preemptible function call (e.g. the FreeBSD libc version).
// Check that the size and comparator arguments are as expected.
CHECK_NE(compar, qsort_r_compar);
CHECK_EQ(qsort_r_size, size);
} else {
qsort_r_compar = compar;
qsort_r_size = size;
}
REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, arg);
qsort_r_compar = old_compar;
qsort_r_size = old_size;
if (!already_wrapped) {
qsort_r_compar = old_compar;
qsort_r_size = old_size;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
}
#define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
@ -9853,6 +10096,7 @@ static void InitializeCommonInterceptors() {
INIT_FGETGRENT_R;
INIT_SETPWENT;
INIT_CLOCK_GETTIME;
INIT_CLOCK_GETCPUCLOCKID;
INIT_GETITIMER;
INIT_TIME;
INIT_GLOB;
@ -9879,6 +10123,7 @@ static void InitializeCommonInterceptors() {
INIT_SENDMSG;
INIT_RECVMMSG;
INIT_SENDMMSG;
INIT_SYSMSG;
INIT_GETPEERNAME;
INIT_IOCTL;
INIT_INET_ATON;
@ -9915,6 +10160,7 @@ static void InitializeCommonInterceptors() {
INIT_SIGWAITINFO;
INIT_SIGTIMEDWAIT;
INIT_SIGSETOPS;
INIT_SIGSET_LOGICOPS;
INIT_SIGPENDING;
INIT_SIGPROCMASK;
INIT_PTHREAD_SIGMASK;
@ -9956,6 +10202,8 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_BARRIERATTR_GETPSHARED;
INIT_TMPNAM;
INIT_TMPNAM_R;
INIT_PTSNAME;
INIT_PTSNAME_R;
INIT_TTYNAME;
INIT_TTYNAME_R;
INIT_TEMPNAM;
@ -9985,6 +10233,7 @@ static void InitializeCommonInterceptors() {
INIT_BZERO;
INIT_FTIME;
INIT_XDR;
INIT_XDRREC_LINUX;
INIT_TSEARCH;
INIT_LIBIO_INTERNALS;
INIT_FOPEN;
@ -10042,6 +10291,7 @@ static void InitializeCommonInterceptors() {
INIT_STRMODE;
INIT_TTYENT;
INIT_PROTOENT;
INIT_PROTOENT_R;
INIT_NETENT;
INIT_GETMNTINFO;
INIT_MI_VECTOR_HASH;

View file

@ -340,6 +340,12 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
// For %ms/%mc, write the allocated output buffer as well.
if (dir.allocate) {
char *buf = *(char **)argp;
if (buf)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
}
}
}

View file

@ -0,0 +1,56 @@
#if (defined(__riscv) && (__riscv_xlen == 64)) && defined(__linux__)
#include "sanitizer_common/sanitizer_asm.h"
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
.comm _ZN14__interception10real_vforkE,8,8
.globl ASM_WRAPPER_NAME(vfork)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
ASM_WRAPPER_NAME(vfork):
// Save ra in the off-stack spill area.
// allocate space on stack
addi sp, sp, -16
// store ra value
sd ra, 8(sp)
call COMMON_INTERCEPTOR_SPILL_AREA
// restore previous values from stack
ld ra, 8(sp)
// adjust stack
addi sp, sp, 16
// store ra by x10
sd ra, 0(x10)
// Call real vfork. This may return twice. User code that runs between the first and the second return
// may clobber the stack frame of the interceptor; that's why it does not have a frame.
la x10, _ZN14__interception10real_vforkE
ld x10, 0(x10)
jalr x10
// adjust stack
addi sp, sp, -16
// store x10 by adjusted stack
sd x10, 8(sp)
// jump to exit label if x10 is 0
beqz x10, .L_exit
// x0 != 0 => parent process. Clear stack shadow.
// put old sp to x10
addi x10, sp, 16
call COMMON_INTERCEPTOR_HANDLE_VFORK
.L_exit:
// Restore ra
call COMMON_INTERCEPTOR_SPILL_AREA
ld ra, 0(x10)
// load value by stack
ld x10, 8(sp)
// adjust stack
addi sp, sp, 16
ret
ASM_SIZE(vfork)
.weak vfork
.set vfork, ASM_WRAPPER_NAME(vfork)
#endif

View file

@ -139,6 +139,59 @@ uptr ReservedAddressRange::InitAligned(uptr size, uptr align,
return start;
}
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
bool madvise_shadow) {
CHECK_EQ((beg % GetMmapGranularity()), 0);
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
if (madvise_shadow ? !MmapFixedSuperNoReserve(beg, size, name)
: !MmapFixedNoReserve(beg, size, name)) {
Report(
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n",
size);
Abort();
}
if (madvise_shadow && common_flags()->use_madv_dontdump)
DontDumpShadowMemory(beg, size);
}
void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
uptr zero_base_max_shadow_start) {
if (!size)
return;
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
if (addr == zero_base_shadow_start) {
uptr step = GetMmapGranularity();
while (size > step && addr < zero_base_max_shadow_start) {
addr += step;
size -= step;
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
}
}
Report(
"ERROR: Failed to protect the shadow gap. "
"%s cannot proceed correctly. ABORTING.\n",
SanitizerToolName);
DumpProcessMap();
Die();
}
#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,

View file

@ -2294,9 +2294,10 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
#if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
#if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
SANITIZER_RISCV64)
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@ -2315,9 +2316,10 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
#if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
#if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
SANITIZER_RISCV64)
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.

View file

@ -24,6 +24,7 @@ namespace __sanitizer {
#define errno_ENOMEM 12
#define errno_EBUSY 16
#define errno_EINVAL 22
#define errno_ENAMETOOLONG 36
// Those might not present or their value differ on different platforms.
extern const int errno_EOWNERDEAD;

View file

@ -91,7 +91,7 @@ class FlagHandlerInclude : public FlagHandlerBase {
}
return parser_->ParseFile(value, ignore_missing_);
}
bool Format(char *buffer, uptr size) {
bool Format(char *buffer, uptr size) override {
// Note `original_path_` isn't actually what's parsed due to `%`
// substitutions. Printing the substituted path would require holding onto
// mmap'ed memory.

View file

@ -40,7 +40,12 @@ COMMON_FLAG(bool, fast_unwind_on_check, false,
COMMON_FLAG(bool, fast_unwind_on_fatal, false,
"If available, use the fast frame-pointer-based unwinder on fatal "
"errors.")
COMMON_FLAG(bool, fast_unwind_on_malloc, true,
// ARM thumb/thumb2 frame pointer is inconsistent on GCC and Clang [1]
// and fast-unwider is also unreliable with mixing arm and thumb code [2].
// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92172
// [2] https://bugs.llvm.org/show_bug.cgi?id=44158
COMMON_FLAG(bool, fast_unwind_on_malloc,
!(SANITIZER_LINUX && !SANITIZER_ANDROID && SANITIZER_ARM),
"If available, use the fast frame-pointer-based unwinder on "
"malloc/free.")
COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
@ -195,6 +200,9 @@ COMMON_FLAG(bool, intercept_strtok, true,
COMMON_FLAG(bool, intercept_strpbrk, true,
"If set, uses custom wrappers for strpbrk function "
"to find more errors.")
COMMON_FLAG(
bool, intercept_strcmp, true,
"If set, uses custom wrappers for strcmp functions to find more errors.")
COMMON_FLAG(bool, intercept_strlen, true,
"If set, uses custom wrappers for strlen and strnlen functions "
"to find more errors.")

View file

@ -21,8 +21,9 @@
#if SANITIZER_LINUX || SANITIZER_FUCHSIA
# if __GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
SANITIZER_FUCHSIA
# if (__GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
SANITIZER_FUCHSIA) && \
!SANITIZER_GO
# define SANITIZER_USE_GETAUXVAL 1
# else
# define SANITIZER_USE_GETAUXVAL 0

View file

@ -196,9 +196,6 @@ typedef u64 tid_t;
// This header should NOT include any other headers to avoid portability issues.
// Common defs.
#ifndef INLINE
#define INLINE inline
#endif
#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
#define SANITIZER_WEAK_DEFAULT_IMPL \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
@ -333,14 +330,10 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
#define UNIMPLEMENTED() UNREACHABLE("unimplemented")
#define COMPILER_CHECK(pred) IMPL_COMPILER_ASSERT(pred, __LINE__)
#define COMPILER_CHECK(pred) static_assert(pred, "")
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
#define IMPL_PASTE(a, b) a##b
#define IMPL_COMPILER_ASSERT(pred, line) \
typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1]
// Limits for integral types. We have to redefine it in case we don't
// have stdint.h (like in Visual Studio 9).
#undef __INT64_C

View file

@ -154,6 +154,8 @@ namespace __sanitizer {
#if SANITIZER_LINUX && defined(__x86_64__)
#include "sanitizer_syscall_linux_x86_64.inc"
#elif SANITIZER_LINUX && SANITIZER_RISCV64
#include "sanitizer_syscall_linux_riscv64.inc"
#elif SANITIZER_LINUX && defined(__aarch64__)
#include "sanitizer_syscall_linux_aarch64.inc"
#elif SANITIZER_LINUX && defined(__arm__)
@ -187,6 +189,10 @@ uptr internal_munmap(void *addr, uptr length) {
int internal_mprotect(void *addr, uptr length, int prot) {
return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
}
int internal_madvise(uptr addr, uptr length, int advice) {
return internal_syscall(SYSCALL(madvise), addr, length, advice);
}
#endif
uptr internal_close(fd_t fd) {
@ -422,15 +428,6 @@ uptr internal_sched_yield() {
return internal_syscall(SYSCALL(sched_yield));
}
void internal__exit(int exitcode) {
#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
internal_syscall(SYSCALL(exit), exitcode);
#else
internal_syscall(SYSCALL(exit_group), exitcode);
#endif
Die(); // Unreachable.
}
unsigned int internal_sleep(unsigned int seconds) {
struct timespec ts;
ts.tv_sec = seconds;
@ -447,6 +444,17 @@ uptr internal_execve(const char *filename, char *const argv[],
}
#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
#if !SANITIZER_NETBSD
void internal__exit(int exitcode) {
#if SANITIZER_FREEBSD || SANITIZER_OPENBSD || SANITIZER_SOLARIS
internal_syscall(SYSCALL(exit), exitcode);
#else
internal_syscall(SYSCALL(exit_group), exitcode);
#endif
Die(); // Unreachable.
}
#endif // !SANITIZER_NETBSD
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
if (ShouldMockFailureToOpen(filename))
@ -706,7 +714,7 @@ struct linux_dirent {
};
#else
struct linux_dirent {
#if SANITIZER_X32 || defined(__aarch64__)
#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64
u64 d_ino;
u64 d_off;
#else
@ -714,7 +722,7 @@ struct linux_dirent {
unsigned long d_off;
#endif
unsigned short d_reclen;
#ifdef __aarch64__
#if defined(__aarch64__) || SANITIZER_RISCV64
unsigned char d_type;
#endif
char d_name[256];
@ -796,11 +804,29 @@ int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
#if SANITIZER_FREEBSD
int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
const void *newp, uptr newlen) {
static decltype(sysctlbyname) *real = nullptr;
if (!real)
real = (decltype(sysctlbyname) *)dlsym(RTLD_NEXT, "sysctlbyname");
CHECK(real);
return real(sname, oldp, (size_t *)oldlenp, newp, (size_t)newlen);
// Note: this function can be called during startup, so we need to avoid
// calling any interceptable functions. On FreeBSD >= 1300045 sysctlbyname()
// is a real syscall, but for older versions it calls sysctlnametomib()
// followed by sysctl(). To avoid calling the intercepted version and
// asserting if this happens during startup, call the real sysctlnametomib()
// followed by internal_sysctl() if the syscall is not available.
#ifdef SYS___sysctlbyname
return internal_syscall(SYSCALL(__sysctlbyname), sname,
internal_strlen(sname), oldp, (size_t *)oldlenp, newp,
(size_t)newlen);
#else
static decltype(sysctlnametomib) *real_sysctlnametomib = nullptr;
if (!real_sysctlnametomib)
real_sysctlnametomib =
(decltype(sysctlnametomib) *)dlsym(RTLD_NEXT, "sysctlnametomib");
CHECK(real_sysctlnametomib);
int oid[CTL_MAXNAME];
size_t len = CTL_MAXNAME;
if (real_sysctlnametomib(sname, oid, &len) == -1)
return (-1);
return internal_sysctl(oid, len, oldp, oldlenp, newp, newlen);
#endif
}
#endif
#endif
@ -861,9 +887,8 @@ uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
#else
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
__sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how,
(uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0],
sizeof(__sanitizer_kernel_sigset_t));
return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)k_set,
(uptr)k_oldset, sizeof(__sanitizer_kernel_sigset_t));
#endif
}
@ -1046,6 +1071,8 @@ uptr GetMaxVirtualAddress() {
// This should (does) work for both PowerPC64 Endian modes.
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
#elif SANITIZER_RISCV64
return (1ULL << 38) - 1;
# elif defined(__mips64)
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
# elif defined(__s390x__)
@ -1340,6 +1367,55 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "memory", "$29" );
return res;
}
#elif SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
((unsigned long long *)child_stack)[0] = (uptr)fn;
((unsigned long long *)child_stack)[1] = (uptr)arg;
register int (*__fn)(void *) __asm__("a0") = fn;
register void *__stack __asm__("a1") = child_stack;
register int __flags __asm__("a2") = flags;
register void *__arg __asm__("a3") = arg;
register int *__ptid __asm__("a4") = parent_tidptr;
register void *__tls __asm__("a5") = newtls;
register int *__ctid __asm__("a6") = child_tidptr;
__asm__ __volatile__(
"mv a0,a2\n" /* flags */
"mv a2,a4\n" /* ptid */
"mv a3,a5\n" /* tls */
"mv a4,a6\n" /* ctid */
"addi a7, zero, %9\n" /* clone */
"ecall\n"
/* if (%r0 != 0)
* return %r0;
*/
"bnez a0, 1f\n"
/* In the child, now. Call "fn(arg)". */
"ld a0, 8(sp)\n"
"ld a1, 16(sp)\n"
"jalr a1\n"
/* Call _exit(%r0). */
"addi a7, zero, %10\n"
"ecall\n"
"1:\n"
: "=r"(res)
: "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
"r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit)
: "ra", "memory");
return res;
}
#elif defined(__aarch64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
@ -2211,7 +2287,7 @@ void CheckNoDeepBind(const char *filename, int flag) {
if (flag & RTLD_DEEPBIND) {
Report(
"You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
" which is incompatibe with sanitizer runtime "
" which is incompatible with sanitizer runtime "
"(see https://github.com/google/sanitizers/issues/611 for details"
"). If you want to run %s library under sanitizers please remove "
"RTLD_DEEPBIND from dlopen flags.\n",

View file

@ -60,9 +60,9 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
|| defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \
|| defined(__arm__)
#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
defined(__arm__) || SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
@ -109,7 +109,7 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
// Releases memory pages entirely within the [beg, end] address range.
// The pages no longer count toward RSS; reads are guaranteed to return 0.
// Requires (but does not verify!) that pages are MAP_PRIVATE.
INLINE void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
// man madvise on Linux promises zero-fill for anonymous private pages.
// Testing shows the same behaviour for private (but not anonymous) mappings
// of shm_open() files, as long as the underlying file is untouched.

View file

@ -13,7 +13,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
#include "sanitizer_allocator_internal.h"
@ -28,6 +28,10 @@
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#if SANITIZER_NETBSD
#define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
#endif
#include <dlfcn.h> // for dlsym()
#include <link.h>
#include <pthread.h>
@ -149,7 +153,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
#endif // SANITIZER_SOLARIS
#endif // SANITIZER_SOLARIS
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
@ -189,20 +193,20 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
#endif
}
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
!SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_SOLARIS
static uptr g_tls_size;
#ifdef __i386__
# define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
#define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
#else
# define CHECK_GET_TLS_STATIC_INFO_VERSION 0
#define CHECK_GET_TLS_STATIC_INFO_VERSION 0
#endif
#if CHECK_GET_TLS_STATIC_INFO_VERSION
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
#define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
#else
# define DL_INTERNAL_FUNCTION
#define DL_INTERNAL_FUNCTION
#endif
namespace {
@ -262,12 +266,11 @@ void InitTlsSize() {
}
#else
void InitTlsSize() { }
#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
// !SANITIZER_NETBSD && !SANITIZER_SOLARIS
#endif
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
defined(__arm__)) && \
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
defined(__arm__) || SANITIZER_RISCV64) && \
SANITIZER_LINUX && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
@ -307,6 +310,21 @@ uptr ThreadDescriptorSize() {
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
#elif SANITIZER_RISCV64
int major;
int minor;
int patch;
if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
// TODO: consider adding an optional runtime check for an unknown (untested)
// glibc version
if (minor <= 28) // WARNING: the highest tested version is 2.29
val = 1772; // no guarantees for this one
else if (minor <= 31)
val = 1772; // tested against glibc 2.29, 2.31
else
val = 1936; // tested against glibc 2.32
}
#elif defined(__aarch64__)
// The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
val = 1776;
@ -327,15 +345,17 @@ uptr ThreadSelfOffset() {
return kThreadSelfOffset;
}
#if defined(__mips__) || defined(__powerpc64__)
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
# if defined(__mips__)
#if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
# elif defined(__powerpc64__)
#elif defined(__powerpc64__)
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
# endif
#elif SANITIZER_RISCV64
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
#endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
@ -345,11 +365,11 @@ static uptr TlsPreTcbSize() {
uptr ThreadSelf() {
uptr descr_addr;
# if defined(__i386__)
#if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__x86_64__)
#elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__mips__)
#elif defined(__mips__)
// MIPS uses TLS variant I. The thread pointer (in hardware register $29)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
@ -361,12 +381,19 @@ uptr ThreadSelf() {
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
# elif defined(__aarch64__) || defined(__arm__)
#elif defined(__aarch64__) || defined(__arm__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
# elif defined(__s390__)
#elif SANITIZER_RISCV64
uptr tcb_end;
asm volatile("mv %0, tp;\n" : "=r"(tcb_end));
// https://github.com/riscv/riscv-elf-psabi-doc/issues/53
const uptr kTlsTcbOffset = 0x800;
descr_addr =
reinterpret_cast<uptr>(tcb_end - kTlsTcbOffset - TlsPreTcbSize());
#elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
# elif defined(__powerpc64__)
#elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
@ -375,9 +402,9 @@ uptr ThreadSelf() {
uptr thread_pointer;
asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
descr_addr = thread_pointer - TlsPreTcbSize();
# else
# error "unsupported CPU arch"
# endif
#else
#error "unsupported CPU arch"
#endif
return descr_addr;
}
#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
@ -385,15 +412,15 @@ uptr ThreadSelf() {
#if SANITIZER_FREEBSD
static void **ThreadSelfSegbase() {
void **segbase = 0;
# if defined(__i386__)
#if defined(__i386__)
// sysarch(I386_GET_GSBASE, segbase);
__asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
# elif defined(__x86_64__)
#elif defined(__x86_64__)
// sysarch(AMD64_GET_FSBASE, segbase);
__asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
# else
# error "unsupported CPU arch"
# endif
#else
#error "unsupported CPU arch"
#endif
return segbase;
}
@ -404,7 +431,13 @@ uptr ThreadSelf() {
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
return (struct tls_tcb *)_lwp_getprivate();
struct tls_tcb *tcb = nullptr;
#ifdef __HAVE___LWP_GETTCB_FAST
tcb = (struct tls_tcb *)__lwp_gettcb_fast();
#elif defined(__HAVE___LWP_GETPRIVATE_FAST)
tcb = (struct tls_tcb *)__lwp_getprivate_fast();
#endif
return tcb;
}
uptr ThreadSelf() {
@ -428,19 +461,19 @@ int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
#if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
#if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
*addr = ThreadSelf();
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
|| defined(__arm__)
#elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) || \
defined(__arm__) || SANITIZER_RISCV64
*addr = ThreadSelf();
*size = GetTlsSize();
# else
#else
*addr = 0;
*size = 0;
# endif
#endif
#elif SANITIZER_FREEBSD
void** segbase = ThreadSelfSegbase();
*addr = 0;
@ -479,19 +512,19 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = 0;
*size = 0;
#else
# error "Unknown OS"
#error "Unknown OS"
#endif
}
#endif
#if !SANITIZER_GO
uptr GetTlsSize() {
#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
#elif defined(__mips__) || defined(__powerpc64__)
#elif defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
#else
return g_tls_size;
@ -526,11 +559,11 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
typedef ElfW(Phdr) Elf_Phdr;
#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
#define Elf_Phdr XElf32_Phdr
#define dl_phdr_info xdl_phdr_info
#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
struct DlIteratePhdrData {
InternalMmapVectorNoCtor<LoadedModule> *modules;
@ -697,19 +730,15 @@ u32 GetNumberOfCPUs() {
#elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
#else
#if defined(CPU_COUNT)
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
#else
return 1;
#endif
#endif
}
#if SANITIZER_LINUX
# if SANITIZER_ANDROID
#if SANITIZER_ANDROID
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
@ -753,7 +782,7 @@ void SetAbortMessage(const char *str) {
if (&android_set_abort_message)
android_set_abort_message(str);
}
# else
#else
void AndroidLogInit() {}
static bool ShouldLogAfterPrintf() { return true; }
@ -761,7 +790,7 @@ static bool ShouldLogAfterPrintf() { return true; }
void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
void SetAbortMessage(const char *str) {}
# endif // SANITIZER_ANDROID
#endif // SANITIZER_ANDROID
void LogMessageOnPrintf(const char *str) {
if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
@ -776,7 +805,7 @@ void LogMessageOnPrintf(const char *str) {
// initialized after the vDSO function pointers, so if it exists, is not null
// and is not empty, we can use clock_gettime.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
INLINE bool CanUseVDSO() {
inline bool CanUseVDSO() {
// Bionic is safe, it checks for the vDSO function pointers to be initialized.
if (SANITIZER_ANDROID)
return true;
@ -845,6 +874,41 @@ void ReExec() {
}
#endif // !SANITIZER_OPENBSD
void UnmapFromTo(uptr from, uptr to) {
if (to == from)
return;
CHECK(to >= from);
uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
if (UNLIKELY(internal_iserror(res))) {
Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
SanitizerToolName, to - from, to - from, (void *)from);
CHECK("unable to unmap" && 0);
}
}
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment,
UNUSED uptr &high_mem_end) {
const uptr granularity = GetMmapGranularity();
const uptr alignment =
Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
const uptr left_padding =
Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
const uptr map_size = shadow_size + left_padding + alignment;
const uptr map_start = (uptr)MmapNoAccess(map_size);
CHECK_NE(map_start, ~(uptr)0);
const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
UnmapFromTo(map_start, shadow_start - left_padding);
UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
return shadow_start;
}
} // namespace __sanitizer
#endif

View file

@ -27,7 +27,6 @@
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_ptrauth.h"
@ -38,7 +37,7 @@
extern char **environ;
#endif
#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
#if defined(__has_include) && __has_include(<os/trace.h>)
#define SANITIZER_OS_TRACE 1
#include <os/trace.h>
#else
@ -138,6 +137,10 @@ int internal_mprotect(void *addr, uptr length, int prot) {
return mprotect(addr, length, prot);
}
int internal_madvise(uptr addr, uptr length, int advice) {
return madvise((void *)addr, length, advice);
}
uptr internal_close(fd_t fd) {
return close(fd);
}
@ -388,7 +391,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
// pthread_get_stacksize_np() returns an incorrect stack size for the main
// thread on Mavericks. See
// https://github.com/google/sanitizers/issues/261
if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization &&
if ((GetMacosAlignedVersion() >= MacosVersion(10, 9)) && at_initialization &&
stacksize == (1 << 19)) {
struct rlimit rl;
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
@ -607,68 +610,111 @@ HandleSignalMode GetHandleSignalMode(int signum) {
return result;
}
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
// Offset example:
// XNU 17 -- macOS 10.13 -- iOS 11 -- tvOS 11 -- watchOS 4
constexpr u16 GetOSMajorKernelOffset() {
if (TARGET_OS_OSX) return 4;
if (TARGET_OS_IOS || TARGET_OS_TV) return 6;
if (TARGET_OS_WATCH) return 13;
}
MacosVersion GetMacosVersionInternal() {
int mib[2] = { CTL_KERN, KERN_OSRELEASE };
char version[100];
uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
// Get the version length.
CHECK_NE(internal_sysctl(mib, 2, 0, &len, 0, 0), -1);
CHECK_LT(len, maxlen);
CHECK_NE(internal_sysctl(mib, 2, version, &len, 0, 0), -1);
using VersStr = char[64];
// Expect <major>.<minor>(.<patch>)
CHECK_GE(len, 3);
const char *p = version;
int major = internal_simple_strtoll(p, &p, /*base=*/10);
if (*p != '.') return MACOS_VERSION_UNKNOWN;
static void GetOSVersion(VersStr vers) {
uptr len = sizeof(VersStr);
if (SANITIZER_IOSSIM) {
const char *vers_env = GetEnv("SIMULATOR_RUNTIME_VERSION");
if (!vers_env) {
Report("ERROR: Running in simulator but SIMULATOR_RUNTIME_VERSION env "
"var is not set.\n");
Die();
}
len = internal_strlcpy(vers, vers_env, len);
} else {
int res =
internal_sysctlbyname("kern.osproductversion", vers, &len, nullptr, 0);
if (res) {
// Fallback for XNU 17 (macOS 10.13) and below that do not provide the
// `kern.osproductversion` property.
u16 kernel_major = GetDarwinKernelVersion().major;
u16 offset = GetOSMajorKernelOffset();
CHECK_LE(kernel_major, 17);
CHECK_GE(kernel_major, offset);
u16 os_major = kernel_major - offset;
auto format = TARGET_OS_OSX ? "10.%d" : "%d.0";
len = internal_snprintf(vers, len, format, os_major);
}
}
CHECK_LT(len, sizeof(VersStr));
}
void ParseVersion(const char *vers, u16 *major, u16 *minor) {
// Format: <major>.<minor>[.<patch>]\0
CHECK_GE(internal_strlen(vers), 3);
const char *p = vers;
*major = internal_simple_strtoll(p, &p, /*base=*/10);
CHECK_EQ(*p, '.');
p += 1;
int minor = internal_simple_strtoll(p, &p, /*base=*/10);
if (*p != '.') return MACOS_VERSION_UNKNOWN;
*minor = internal_simple_strtoll(p, &p, /*base=*/10);
}
switch (major) {
case 11: return MACOS_VERSION_LION;
case 12: return MACOS_VERSION_MOUNTAIN_LION;
case 13: return MACOS_VERSION_MAVERICKS;
case 14: return MACOS_VERSION_YOSEMITE;
case 15: return MACOS_VERSION_EL_CAPITAN;
case 16: return MACOS_VERSION_SIERRA;
case 17: return MACOS_VERSION_HIGH_SIERRA;
case 18: return MACOS_VERSION_MOJAVE;
case 19: return MACOS_VERSION_CATALINA;
default:
if (major < 9) return MACOS_VERSION_UNKNOWN;
return MACOS_VERSION_UNKNOWN_NEWER;
// Aligned versions example:
// macOS 10.15 -- iOS 13 -- tvOS 13 -- watchOS 6
static void MapToMacos(u16 *major, u16 *minor) {
if (TARGET_OS_OSX)
return;
if (TARGET_OS_IOS || TARGET_OS_TV)
*major += 2;
else if (TARGET_OS_WATCH)
*major += 9;
else
UNREACHABLE("unsupported platform");
if (*major >= 16) { // macOS 11+
*major -= 5;
} else { // macOS 10.15 and below
*minor = *major;
*major = 10;
}
}
MacosVersion GetMacosVersion() {
atomic_uint32_t *cache =
reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
MacosVersion result =
static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
if (result == MACOS_VERSION_UNINITIALIZED) {
result = GetMacosVersionInternal();
atomic_store(cache, result, memory_order_release);
static MacosVersion GetMacosAlignedVersionInternal() {
VersStr vers;
GetOSVersion(vers);
u16 major, minor;
ParseVersion(vers, &major, &minor);
MapToMacos(&major, &minor);
return MacosVersion(major, minor);
}
static_assert(sizeof(MacosVersion) == sizeof(atomic_uint32_t::Type),
"MacosVersion cache size");
static atomic_uint32_t cached_macos_version;
MacosVersion GetMacosAlignedVersion() {
atomic_uint32_t::Type result =
atomic_load(&cached_macos_version, memory_order_acquire);
if (!result) {
MacosVersion version = GetMacosAlignedVersionInternal();
result = *reinterpret_cast<atomic_uint32_t::Type *>(&version);
atomic_store(&cached_macos_version, result, memory_order_release);
}
return result;
return *reinterpret_cast<MacosVersion *>(&result);
}
DarwinKernelVersion GetDarwinKernelVersion() {
char buf[100];
size_t len = sizeof(buf);
int res = internal_sysctlbyname("kern.osrelease", buf, &len, nullptr, 0);
VersStr vers;
uptr len = sizeof(VersStr);
int res = internal_sysctlbyname("kern.osrelease", vers, &len, nullptr, 0);
CHECK_EQ(res, 0);
CHECK_LT(len, sizeof(VersStr));
// Format: <major>.<minor>.<patch>\0
CHECK_GE(len, 6);
const char *p = buf;
u16 major = internal_simple_strtoll(p, &p, /*base=*/10);
CHECK_EQ(*p, '.');
p += 1;
u16 minor = internal_simple_strtoll(p, &p, /*base=*/10);
u16 major, minor;
ParseVersion(vers, &major, &minor);
return DarwinKernelVersion(major, minor);
}
@ -719,7 +765,7 @@ void LogFullErrorReport(const char *buffer) {
#if !SANITIZER_GO
// Log with os_trace. This will make it into the crash log.
#if SANITIZER_OS_TRACE
if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) {
if (GetMacosAlignedVersion() >= MacosVersion(10, 10)) {
// os_trace requires the message (format parameter) to be a string literal.
if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
sizeof("AddressSanitizer") - 1) == 0)
@ -808,6 +854,19 @@ void SignalContext::InitPcSpBp() {
GetPcSpBp(context, &pc, &sp, &bp);
}
// ASan/TSan use mmap in a way that creates “deallocation gaps” which triggers
// EXC_GUARD exceptions on macOS 10.15+ (XNU 19.0+).
static void DisableMmapExcGuardExceptions() {
using task_exc_guard_behavior_t = uint32_t;
using task_set_exc_guard_behavior_t =
kern_return_t(task_t task, task_exc_guard_behavior_t behavior);
auto *set_behavior = (task_set_exc_guard_behavior_t *)dlsym(
RTLD_DEFAULT, "task_set_exc_guard_behavior");
if (set_behavior == nullptr) return;
const task_exc_guard_behavior_t task_exc_guard_none = 0;
set_behavior(mach_task_self(), task_exc_guard_none);
}
void InitializePlatformEarly() {
// Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
use_xnu_fast_mmap =
@ -816,6 +875,8 @@ void InitializePlatformEarly() {
#else
false;
#endif
if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))
DisableMmapExcGuardExceptions();
}
#if !SANITIZER_GO
@ -856,20 +917,10 @@ bool ReexecDisabled() {
return false;
}
extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
static const double kMinDyldVersionWithAutoInterposition = 360.0;
bool DyldNeedsEnvVariable() {
// Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
// still may want use them on older systems. On older Darwin platforms, dyld
// doesn't export dyldVersionNumber symbol and we simply return true.
if (!&dyldVersionNumber) return true;
static bool DyldNeedsEnvVariable() {
// If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
// DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
// GetMacosVersion() doesn't work for the simulator. Let's instead check
// `dyldVersionNumber`, which is exported by dyld, against a known version
// number from the first OS release where this appeared.
return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
// DYLD_INSERT_LIBRARIES is not set.
return GetMacosAlignedVersion() < MacosVersion(10, 11);
}
void MaybeReexec() {
@ -1082,6 +1133,53 @@ uptr GetMaxVirtualAddress() {
return GetMaxUserVirtualAddress();
}
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment, uptr &high_mem_end) {
const uptr granularity = GetMmapGranularity();
const uptr alignment =
Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
const uptr left_padding =
Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
uptr space_size = shadow_size_bytes + left_padding;
uptr largest_gap_found = 0;
uptr max_occupied_addr = 0;
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
uptr shadow_start =
FindAvailableMemoryRange(space_size, alignment, granularity,
&largest_gap_found, &max_occupied_addr);
// If the shadow doesn't fit, restrict the address space to make it fit.
if (shadow_start == 0) {
VReport(
2,
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
largest_gap_found, max_occupied_addr);
uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
if (new_max_vm < max_occupied_addr) {
Report("Unable to find a memory range for dynamic shadow.\n");
Report(
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
"new_max_vm = %p\n",
space_size, largest_gap_found, max_occupied_addr, new_max_vm);
CHECK(0 && "cannot place shadow");
}
RestrictMemoryToMaxAddress(new_max_vm);
high_mem_end = new_max_vm - 1;
space_size = (high_mem_end >> shadow_scale) + left_padding;
VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
nullptr, nullptr);
if (shadow_start == 0) {
Report("Unable to find a memory range after restricting VM.\n");
CHECK(0 && "cannot place shadow after restricting vm");
}
}
CHECK_NE((uptr)0, shadow_start);
CHECK(IsAligned(shadow_start, alignment));
return shadow_start;
}
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {

View file

@ -30,37 +30,32 @@ struct MemoryMappingLayoutData {
bool current_instrumented;
};
enum MacosVersion {
MACOS_VERSION_UNINITIALIZED = 0,
MACOS_VERSION_UNKNOWN,
MACOS_VERSION_LION, // macOS 10.7; oldest currently supported
MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS,
MACOS_VERSION_YOSEMITE,
MACOS_VERSION_EL_CAPITAN,
MACOS_VERSION_SIERRA,
MACOS_VERSION_HIGH_SIERRA,
MACOS_VERSION_MOJAVE,
MACOS_VERSION_CATALINA,
MACOS_VERSION_UNKNOWN_NEWER
};
struct DarwinKernelVersion {
template <typename VersionType>
struct VersionBase {
u16 major;
u16 minor;
DarwinKernelVersion(u16 major, u16 minor) : major(major), minor(minor) {}
VersionBase(u16 major, u16 minor) : major(major), minor(minor) {}
bool operator==(const DarwinKernelVersion &other) const {
bool operator==(const VersionType &other) const {
return major == other.major && minor == other.minor;
}
bool operator>=(const DarwinKernelVersion &other) const {
return major >= other.major ||
bool operator>=(const VersionType &other) const {
return major > other.major ||
(major == other.major && minor >= other.minor);
}
bool operator<(const VersionType &other) const { return !(*this >= other); }
};
MacosVersion GetMacosVersion();
struct MacosVersion : VersionBase<MacosVersion> {
MacosVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
};
struct DarwinKernelVersion : VersionBase<DarwinKernelVersion> {
DarwinKernelVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
};
MacosVersion GetMacosAlignedVersion();
DarwinKernelVersion GetDarwinKernelVersion();
char **GetEnviron();
@ -80,7 +75,7 @@ asm(".desc ___crashreporter_info__, 0x10");
namespace __sanitizer {
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
INLINE void CRAppendCrashLogMessage(const char *msg) {
inline void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
internal_strlcat(__crashreporter_info_buff__, msg,
sizeof(__crashreporter_info_buff__)); }

View file

@ -110,6 +110,11 @@ int internal_mprotect(void *addr, uptr length, int prot) {
return _REAL(mprotect, addr, length, prot);
}
int internal_madvise(uptr addr, uptr length, int advice) {
DEFINE__REAL(int, madvise, void *a, uptr b, int c);
return _REAL(madvise, (void *)addr, length, advice);
}
uptr internal_close(fd_t fd) {
CHECK(&_sys_close);
return _sys_close(fd);

View file

@ -52,6 +52,10 @@ int internal_mprotect(void *addr, uptr length, int prot) {
return mprotect(addr, length, prot);
}
int internal_madvise(uptr addr, uptr length, int advice) {
return madvise((void *)addr, length, advice);
}
int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
const void *newp, uptr newlen) {
Printf("internal_sysctlbyname not implemented for OpenBSD");

View file

@ -132,6 +132,12 @@
# define SANITIZER_X32 0
#endif
#if defined(__i386__) || defined(_M_IX86)
# define SANITIZER_I386 1
#else
# define SANITIZER_I386 0
#endif
#if defined(__mips__)
# define SANITIZER_MIPS 1
# if defined(__mips64)
@ -213,6 +219,12 @@
# define SANITIZER_MYRIAD2 0
#endif
#if defined(__riscv) && (__riscv_xlen == 64)
#define SANITIZER_RISCV64 1
#else
#define SANITIZER_RISCV64 0
#endif
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
// does not work well and we need to fallback to SizeClassAllocator32.
@ -233,6 +245,8 @@
// will still work but will consume more memory for TwoLevelByteMap.
#if defined(__mips__)
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
#elif SANITIZER_RISCV64
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
# if SANITIZER_MAC
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM

View file

@ -15,6 +15,7 @@
#include "sanitizer_glibc_version.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
#if SANITIZER_POSIX
# define SI_POSIX 1
@ -240,6 +241,7 @@
(SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
(SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS
@ -270,16 +272,17 @@
#define SANITIZER_INTERCEPT_SENDMSG SI_POSIX
#define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX
#define SANITIZER_INTERCEPT_SENDMMSG SI_LINUX
#define SANITIZER_INTERCEPT_SYSMSG SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPEERNAME SI_POSIX
#define SANITIZER_INTERCEPT_IOCTL SI_POSIX
#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
#define SANITIZER_INTERCEPT_READDIR SI_POSIX
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
#if SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
defined(__s390__))
#if SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
defined(__s390__) || SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_PTRACE 1
#else
#define SANITIZER_INTERCEPT_PTRACE 0
@ -331,6 +334,7 @@
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_SIGSETOPS \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SIGSET_LOGICOPS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGPENDING SI_POSIX
#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX
@ -341,7 +345,7 @@
#define SANITIZER_INTERCEPT_STATFS \
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_STATFS64 \
((SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID)
(((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS \
(SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
@ -381,6 +385,8 @@
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_PTSNAME SI_LINUX
#define SANITIZER_INTERCEPT_PTSNAME_R SI_LINUX
#define SANITIZER_INTERCEPT_TTYNAME SI_POSIX
#define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX
#define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX
@ -437,6 +443,7 @@
#define SANITIZER_INTERCEPT_FTIME \
(!SI_FREEBSD && !SI_NETBSD && !SI_OPENBSD && SI_POSIX)
#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_XDRREC SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TSEARCH \
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
@ -494,17 +501,17 @@
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
SI_NOT_RTEMS)
SI_NOT_RTEMS && !SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_MEMALIGN \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_RTEMS)
#define SANITIZER_INTERCEPT_PVALLOC \
#define SANITIZER_INTERCEPT_PVALLOC \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
SI_NOT_RTEMS)
#define SANITIZER_INTERCEPT_CFREE \
SI_NOT_RTEMS && !SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_CFREE \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
SI_NOT_RTEMS)
SI_NOT_RTEMS && !SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE \
@ -544,7 +551,8 @@
#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
#define SANITIZER_INTERCEPT_PROTOENT SI_NETBSD
#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_PROTOENT_R (SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
#define SANITIZER_INTERCEPT_SETVBUF (SI_NETBSD || SI_FREEBSD || \
SI_LINUX || SI_MAC)
@ -596,7 +604,10 @@
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R (SI_LINUX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_SIGALTSTACK SI_POSIX
// sigaltstack on i386 macOS cannot be intercepted due to setjmp()
// calling it and assuming that it does not clobber registers.
#define SANITIZER_INTERCEPT_SIGALTSTACK \
(SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD

View file

@ -81,8 +81,6 @@
#include <sys/shm.h>
#undef _KERNEL
#undef INLINE // to avoid clashes with sanitizers' definitions
#undef IOC_DIRMASK
// Include these after system headers to avoid name clashes and ambiguities.

View file

@ -26,12 +26,9 @@
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
// Also, for some platforms (e.g. mips) there are additional members in the
// <sys/stat.h> struct stat:s.
// fine with newer headers, too.
#include <linux/posix_types.h>
#if defined(__x86_64__)
#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
#define ino_t __kernel_ino_t

View file

@ -34,6 +34,7 @@
#include <sys/chio.h>
#include <sys/clockctl.h>
#include <sys/cpuio.h>
#include <sys/dkbad.h>
#include <sys/dkio.h>
#include <sys/drvctlio.h>
#include <sys/dvdio.h>
@ -83,6 +84,7 @@
#include <sys/resource.h>
#include <sys/sem.h>
#include <sys/scsiio.h>
#include <sys/sha1.h>
#include <sys/sha2.h>
#include <sys/shm.h>
@ -139,7 +141,158 @@
#include <dev/ir/irdaio.h>
#include <dev/isa/isvio.h>
#include <dev/isa/wtreg.h>
#if __has_include(<dev/iscsi/iscsi_ioctl.h>)
#include <dev/iscsi/iscsi_ioctl.h>
#else
/* Fallback for MKISCSI=no */
typedef struct {
uint32_t status;
uint32_t session_id;
uint32_t connection_id;
} iscsi_conn_status_parameters_t;
typedef struct {
uint32_t status;
uint16_t interface_version;
uint16_t major;
uint16_t minor;
uint8_t version_string[224];
} iscsi_get_version_parameters_t;
typedef struct {
uint32_t status;
uint32_t session_id;
uint32_t connection_id;
struct {
unsigned int immediate : 1;
} options;
uint64_t lun;
scsireq_t req; /* from <sys/scsiio.h> */
} iscsi_iocommand_parameters_t;
typedef enum {
ISCSI_AUTH_None = 0,
ISCSI_AUTH_CHAP = 1,
ISCSI_AUTH_KRB5 = 2,
ISCSI_AUTH_SRP = 3
} iscsi_auth_types_t;
typedef enum {
ISCSI_LOGINTYPE_DISCOVERY = 0,
ISCSI_LOGINTYPE_NOMAP = 1,
ISCSI_LOGINTYPE_MAP = 2
} iscsi_login_session_type_t;
typedef enum { ISCSI_DIGEST_None = 0, ISCSI_DIGEST_CRC32C = 1 } iscsi_digest_t;
typedef enum {
ISCSI_SESSION_TERMINATED = 1,
ISCSI_CONNECTION_TERMINATED,
ISCSI_RECOVER_CONNECTION,
ISCSI_DRIVER_TERMINATING
} iscsi_event_t;
typedef struct {
unsigned int mutual_auth : 1;
unsigned int is_secure : 1;
unsigned int auth_number : 4;
iscsi_auth_types_t auth_type[4];
} iscsi_auth_info_t;
typedef struct {
uint32_t status;
int socket;
struct {
unsigned int HeaderDigest : 1;
unsigned int DataDigest : 1;
unsigned int MaxConnections : 1;
unsigned int DefaultTime2Wait : 1;
unsigned int DefaultTime2Retain : 1;
unsigned int MaxRecvDataSegmentLength : 1;
unsigned int auth_info : 1;
unsigned int user_name : 1;
unsigned int password : 1;
unsigned int target_password : 1;
unsigned int TargetName : 1;
unsigned int TargetAlias : 1;
unsigned int ErrorRecoveryLevel : 1;
} is_present;
iscsi_auth_info_t auth_info;
iscsi_login_session_type_t login_type;
iscsi_digest_t HeaderDigest;
iscsi_digest_t DataDigest;
uint32_t session_id;
uint32_t connection_id;
uint32_t MaxRecvDataSegmentLength;
uint16_t MaxConnections;
uint16_t DefaultTime2Wait;
uint16_t DefaultTime2Retain;
uint16_t ErrorRecoveryLevel;
void *user_name;
void *password;
void *target_password;
void *TargetName;
void *TargetAlias;
} iscsi_login_parameters_t;
typedef struct {
uint32_t status;
uint32_t session_id;
} iscsi_logout_parameters_t;
typedef struct {
uint32_t status;
uint32_t event_id;
} iscsi_register_event_parameters_t;
typedef struct {
uint32_t status;
uint32_t session_id;
uint32_t connection_id;
} iscsi_remove_parameters_t;
typedef struct {
uint32_t status;
uint32_t session_id;
void *response_buffer;
uint32_t response_size;
uint32_t response_used;
uint32_t response_total;
uint8_t key[224];
} iscsi_send_targets_parameters_t;
typedef struct {
uint32_t status;
uint8_t InitiatorName[224];
uint8_t InitiatorAlias[224];
uint8_t ISID[6];
} iscsi_set_node_name_parameters_t;
typedef struct {
uint32_t status;
uint32_t event_id;
iscsi_event_t event_kind;
uint32_t session_id;
uint32_t connection_id;
uint32_t reason;
} iscsi_wait_event_parameters_t;
#define ISCSI_GET_VERSION _IOWR(0, 1, iscsi_get_version_parameters_t)
#define ISCSI_LOGIN _IOWR(0, 2, iscsi_login_parameters_t)
#define ISCSI_LOGOUT _IOWR(0, 3, iscsi_logout_parameters_t)
#define ISCSI_ADD_CONNECTION _IOWR(0, 4, iscsi_login_parameters_t)
#define ISCSI_RESTORE_CONNECTION _IOWR(0, 5, iscsi_login_parameters_t)
#define ISCSI_REMOVE_CONNECTION _IOWR(0, 6, iscsi_remove_parameters_t)
#define ISCSI_CONNECTION_STATUS _IOWR(0, 7, iscsi_conn_status_parameters_t)
#define ISCSI_SEND_TARGETS _IOWR(0, 8, iscsi_send_targets_parameters_t)
#define ISCSI_SET_NODE_NAME _IOWR(0, 9, iscsi_set_node_name_parameters_t)
#define ISCSI_IO_COMMAND _IOWR(0, 10, iscsi_iocommand_parameters_t)
#define ISCSI_REGISTER_EVENT _IOWR(0, 11, iscsi_register_event_parameters_t)
#define ISCSI_DEREGISTER_EVENT _IOWR(0, 12, iscsi_register_event_parameters_t)
#define ISCSI_WAIT_EVENT _IOWR(0, 13, iscsi_wait_event_parameters_t)
#define ISCSI_POLL_EVENT _IOWR(0, 14, iscsi_wait_event_parameters_t)
#endif
#include <dev/ofw/openfirmio.h>
#include <dev/pci/amrio.h>
#include <dev/pci/mlyreg.h>
@ -372,7 +525,7 @@ struct urio_command {
#include "sanitizer_platform_limits_netbsd.h"
namespace __sanitizer {
void *__sanitizer_get_link_map_by_dlopen_handle(void* handle) {
void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {
void *p = nullptr;
return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
}

View file

@ -21,8 +21,8 @@
namespace __sanitizer {
void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
(link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
(link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
@ -1024,12 +1024,10 @@ extern unsigned struct_RF_ProgressInfo_sz;
extern unsigned struct_nvlist_ref_sz;
extern unsigned struct_StringList_sz;
// A special value to mark ioctls that are not present on the target platform,
// when it can not be determined without including any system headers.
extern const unsigned IOCTL_NOT_PRESENT;
extern unsigned IOCTL_AFM_ADDFMAP;
extern unsigned IOCTL_AFM_DELFMAP;
extern unsigned IOCTL_AFM_CLEANFMAP;

View file

@ -90,7 +90,8 @@
#if SANITIZER_LINUX
# include <utime.h>
# include <sys/ptrace.h>
# if defined(__mips64) || defined(__aarch64__) || defined(__arm__)
#if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
SANITIZER_RISCV64
# include <asm/ptrace.h>
# ifdef __arm__
typedef struct user_fpregs elf_fpregset_t;
@ -170,9 +171,9 @@ typedef struct user_fpregs elf_fpregset_t;
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
#if !SANITIZER_IOS
#if !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
unsigned struct_stat64_sz = sizeof(struct stat64);
#endif // !SANITIZER_IOS
#endif // !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
@ -197,9 +198,9 @@ namespace __sanitizer {
unsigned struct_regex_sz = sizeof(regex_t);
unsigned struct_regmatch_sz = sizeof(regmatch_t);
#if SANITIZER_MAC && !SANITIZER_IOS
#if (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // SANITIZER_MAC && !SANITIZER_IOS
#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
#if !SANITIZER_ANDROID
unsigned struct_fstab_sz = sizeof(struct fstab);
@ -229,9 +230,9 @@ namespace __sanitizer {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
// Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
// has been removed from glibc 2.28.
#if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \
|| defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \
|| defined(__x86_64__) || (defined(__riscv) && __riscv_xlen == 64)
#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \
defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \
defined(__x86_64__) || SANITIZER_RISCV64
#define SIZEOF_STRUCT_USTAT 32
#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
|| defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
@ -303,13 +304,16 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
defined(__s390__))
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
defined(__s390__) || SANITIZER_RISCV64)
#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
#elif SANITIZER_RISCV64
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct __riscv_q_ext_state);
#elif defined(__aarch64__)
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
@ -321,7 +325,8 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
#endif // __mips64 || __powerpc64__ || __aarch64__
#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
defined(__aarch64__) || defined(__arm__) || defined(__s390__)
defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \
SANITIZER_RISCV64
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);

View file

@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__mips__)
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
? FIRST_32_SECOND_64(104, 128)
: FIRST_32_SECOND_64(144, 216);
: FIRST_32_SECOND_64(160, 216);
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
@ -99,9 +99,9 @@ const unsigned struct_kernel_stat64_sz = 144;
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__riscv) && __riscv_xlen == 64
#elif SANITIZER_RISCV64
const unsigned struct_kernel_stat_sz = 128;
const unsigned struct_kernel_stat64_sz = 104;
const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
@ -704,6 +704,12 @@ struct __sanitizer_dl_phdr_info {
extern unsigned struct_ElfW_Phdr_sz;
#endif
struct __sanitizer_protoent {
char *p_name;
char **p_aliases;
int p_proto;
};
struct __sanitizer_addrinfo {
int ai_flags;
int ai_family;
@ -798,7 +804,7 @@ typedef void __sanitizer_FILE;
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
defined(__s390__))
defined(__s390__) || SANITIZER_RISCV64)
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;

View file

@ -202,7 +202,8 @@ CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
CHECK_TYPE_SIZE(glob_t);
// There are additional fields we are not interested in.
COMPILER_CHECK(sizeof(__sanitizer_glob_t) <= sizeof(glob_t));
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);

View file

@ -293,7 +293,7 @@ uptr SignalContext::GetAddress() const {
bool SignalContext::IsMemoryAccess() const {
auto si = static_cast<const siginfo_t *>(siginfo);
return si->si_signo == SIGSEGV;
return si->si_signo == SIGSEGV || si->si_signo == SIGBUS;
}
int SignalContext::GetType() const {
@ -354,11 +354,11 @@ int GetNamedMappingFd(const char *name, uptr size, int *flags) {
int fd = ReserveStandardFds(
internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));
CHECK_GE(fd, 0);
if (!o_cloexec) {
int res = fcntl(fd, F_SETFD, FD_CLOEXEC);
CHECK_EQ(0, res);
}
int res = internal_ftruncate(fd, size);
#if !defined(O_CLOEXEC)
res = fcntl(fd, F_SETFD, FD_CLOEXEC);
CHECK_EQ(0, res);
#endif
CHECK_EQ(0, res);
res = internal_unlink(shmname);
CHECK_EQ(0, res);

View file

@ -42,6 +42,7 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length);
int internal_mprotect(void *addr, uptr length, int prot);
int internal_madvise(uptr addr, uptr length, int advice);
// OS
uptr internal_filesize(fd_t fd); // -1 on error.

View file

@ -61,27 +61,24 @@ void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
uptr beg_aligned = RoundUpTo(beg, page_size);
uptr end_aligned = RoundDownTo(end, page_size);
if (beg_aligned < end_aligned)
// In the default Solaris compilation environment, madvise() is declared
// to take a caddr_t arg; casting it to void * results in an invalid
// conversion error, so use char * instead.
madvise((char *)beg_aligned, end_aligned - beg_aligned,
SANITIZER_MADVISE_DONTNEED);
internal_madvise(beg_aligned, end_aligned - beg_aligned,
SANITIZER_MADVISE_DONTNEED);
}
void SetShadowRegionHugePageMode(uptr addr, uptr size) {
#ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
if (common_flags()->no_huge_pages_for_shadow)
madvise((char *)addr, size, MADV_NOHUGEPAGE);
internal_madvise(addr, size, MADV_NOHUGEPAGE);
else
madvise((char *)addr, size, MADV_HUGEPAGE);
internal_madvise(addr, size, MADV_HUGEPAGE);
#endif // MADV_NOHUGEPAGE
}
bool DontDumpShadowMemory(uptr addr, uptr length) {
#if defined(MADV_DONTDUMP)
return madvise((char *)addr, length, MADV_DONTDUMP) == 0;
return internal_madvise(addr, length, MADV_DONTDUMP) == 0;
#elif defined(MADV_NOCORE)
return madvise((char *)addr, length, MADV_NOCORE) == 0;
return internal_madvise(addr, length, MADV_NOCORE) == 0;
#else
return true;
#endif // MADV_DONTDUMP

View file

@ -35,7 +35,8 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
if (data_.current >= last) return false;
prxmap_t *xmapentry = (prxmap_t*)data_.current;
prxmap_t *xmapentry =
const_cast<prxmap_t *>(reinterpret_cast<const prxmap_t *>(data_.current));
segment->start = (uptr)xmapentry->pr_vaddr;
segment->end = (uptr)(xmapentry->pr_vaddr + xmapentry->pr_size);

View file

@ -18,4 +18,6 @@
#define ptrauth_string_discriminator(__string) ((int)0)
#endif
#define STRIP_PC(pc) ((uptr)ptrauth_strip(pc, 0))
#endif // SANITIZER_PTRAUTH_H

View file

@ -53,7 +53,10 @@ INTERCEPTOR(uptr, signal, int signum, uptr handler) {
INTERCEPTOR(int, sigaction_symname, int signum,
const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) {
if (!oldact) return 0;
act = nullptr;
}
SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact);
}
#define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction_symname)

View file

@ -74,6 +74,20 @@ DECLARE__REAL_AND_INTERNAL(int, mprotect, void *addr, uptr length, int prot) {
return _REAL(mprotect)(addr, length, prot);
}
// Illumos' declaration of madvise cannot be made visible if _XOPEN_SOURCE
// is defined as g++ does on Solaris.
//
// This declaration is consistent with Solaris 11.4. Both Illumos and Solaris
// versions older than 11.4 declared madvise with a caddr_t as the first
// argument, but we don't currently support Solaris versions older than 11.4,
// and as mentioned above the declaration is not visible on Illumos so we can
// use any declaration we like on Illumos.
extern "C" int madvise(void *, size_t, int);
int internal_madvise(uptr addr, uptr length, int advice) {
return madvise((void *)addr, length, advice);
}
DECLARE__REAL_AND_INTERNAL(uptr, close, fd_t fd) {
return _REAL(close)(fd);
}
@ -146,10 +160,6 @@ DECLARE__REAL_AND_INTERNAL(uptr, sched_yield, void) {
return sched_yield();
}
DECLARE__REAL_AND_INTERNAL(void, _exit, int exitcode) {
_exit(exitcode);
}
DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
char *const argv[], char *const envp[]) {
return _REAL(execve)(filename, argv, envp);

View file

@ -115,6 +115,12 @@ void StackDepotUnlockAll() {
theDepot.UnlockAll();
}
void StackDepotPrintAll() {
#if !SANITIZER_GO
theDepot.PrintAll();
#endif
}
bool StackDepotReverseMap::IdDescPair::IdComparator(
const StackDepotReverseMap::IdDescPair &a,
const StackDepotReverseMap::IdDescPair &b) {

View file

@ -41,6 +41,7 @@ StackTrace StackDepotGet(u32 id);
void StackDepotLockAll();
void StackDepotUnlockAll();
void StackDepotPrintAll();
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with

View file

@ -13,9 +13,11 @@
#ifndef SANITIZER_STACKDEPOTBASE_H
#define SANITIZER_STACKDEPOTBASE_H
#include <stdio.h>
#include "sanitizer_atomic.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
#include "sanitizer_atomic.h"
#include "sanitizer_persistent_allocator.h"
namespace __sanitizer {
@ -34,6 +36,7 @@ class StackDepotBase {
void LockAll();
void UnlockAll();
void PrintAll();
private:
static Node *find(Node *s, args_type args, u32 hash);
@ -172,6 +175,21 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
}
}
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {
for (int i = 0; i < kTabSize; ++i) {
atomic_uintptr_t *p = &tab[i];
lock(p);
uptr v = atomic_load(p, memory_order_relaxed);
Node *s = (Node *)(v & ~1UL);
for (; s; s = s->link) {
Printf("Stack for id %u:\n", s->id);
s->load().Print();
}
unlock(p, s);
}
}
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOTBASE_H

View file

@ -10,9 +10,11 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_platform.h"
namespace __sanitizer {
@ -21,6 +23,28 @@ uptr StackTrace::GetNextInstructionPc(uptr pc) {
return pc + 8;
#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__)
return pc + 4;
#elif SANITIZER_RISCV64
// Current check order is 4 -> 2 -> 6 -> 8
u8 InsnByte = *(u8 *)(pc);
if (((InsnByte & 0x3) == 0x3) && ((InsnByte & 0x1c) != 0x1c)) {
// xxxxxxxxxxxbbb11 | 32 bit | bbb != 111
return pc + 4;
}
if ((InsnByte & 0x3) != 0x3) {
// xxxxxxxxxxxxxxaa | 16 bit | aa != 11
return pc + 2;
}
// RISC-V encoding allows instructions to be up to 8 bytes long
if ((InsnByte & 0x3f) == 0x1f) {
// xxxxxxxxxx011111 | 48 bit |
return pc + 6;
}
if ((InsnByte & 0x7f) == 0x3f) {
// xxxxxxxxx0111111 | 64 bit |
return pc + 8;
}
// bail-out if could not figure out the instruction size
return 0;
#else
return pc + 1;
#endif
@ -60,8 +84,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
// Nope, this does not look right either. This means the frame after next does
// not have a valid frame pointer, but we can still extract the caller PC.
// Unfortunately, there is no way to decide between GCC and LLVM frame
// layouts. Assume GCC.
return bp_prev - 1;
// layouts. Assume LLVM.
return bp_prev;
#else
return (uhwptr*)bp;
#endif
@ -84,23 +108,19 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
#ifdef __powerpc__
// PowerPC ABIs specify that the return address is saved on the
// *caller's* stack frame. Thus we must dereference the back chain
// to find the caller frame before extracting it.
// PowerPC ABIs specify that the return address is saved at offset
// 16 of the *caller's* stack frame. Thus we must dereference the
// back chain to find the caller frame before extracting it.
uhwptr *caller_frame = (uhwptr*)frame[0];
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
break;
// For most ABIs the offset where the return address is saved is two
// register sizes. The exception is the SVR4 ABI, which uses an
// offset of only one register size.
#ifdef _CALL_SYSV
uhwptr pc1 = caller_frame[1];
#else
uhwptr pc1 = caller_frame[2];
#endif
#elif defined(__s390__)
uhwptr pc1 = frame[14];
#elif defined(__riscv)
// frame[-1] contains the return address
uhwptr pc1 = frame[-1];
#else
uhwptr pc1 = frame[1];
#endif
@ -113,7 +133,13 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
trace_buffer[size++] = (uptr) pc1;
}
bottom = (uptr)frame;
frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
#if defined(__riscv)
// frame[-2] contain fp of the previous frame
uptr new_bp = (uptr)frame[-2];
#else
uptr new_bp = (uptr)frame[0];
#endif
frame = GetCanonicFrame(new_bp, stack_top, bottom);
}
}

View file

@ -13,6 +13,7 @@
#define SANITIZER_STACKTRACE_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
namespace __sanitizer {
@ -85,6 +86,14 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
return pc - 4;
#elif defined(__sparc__) || defined(__mips__)
return pc - 8;
#elif SANITIZER_RISCV64
// RV-64 has variable instruciton length...
// C extentions gives us 2-byte instructoins
// RV-64 has 4-byte instructions
// + RISCV architecture allows instructions up to 8 bytes
// It seems difficult to figure out the exact instruction length -
// pc - 2 seems like a safe option for the purposes of stack tracing
return pc - 2;
#else
return pc - 1;
#endif
@ -143,9 +152,17 @@ struct BufferedStackTrace : public StackTrace {
friend class FastUnwindTest;
};
#if defined(__s390x__)
static const uptr kFrameSize = 160;
#elif defined(__s390__)
static const uptr kFrameSize = 96;
#else
static const uptr kFrameSize = 2 * sizeof(uhwptr);
#endif
// Check if given pointer points into allocated stack area.
static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
return frame > stack_bottom && frame < stack_top - kFrameSize;
}
} // namespace __sanitizer

View file

@ -26,17 +26,23 @@ void StackTrace::Print() const {
InternalScopedString frame_desc(GetPageSizeCached() * 2);
InternalScopedString dedup_token(GetPageSizeCached());
int dedup_frames = common_flags()->dedup_token_length;
bool symbolize = RenderNeedsSymbolization(common_flags()->stack_trace_format);
uptr frame_num = 0;
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(trace[i]);
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
SymbolizedStack *frames;
if (symbolize)
frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
else
frames = SymbolizedStack::New(pc);
CHECK(frames);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
frame_desc.clear();
RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
cur->info, common_flags()->symbolize_vs_style,
cur->info.address, symbolize ? &cur->info : nullptr,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
Printf("%s\n", frame_desc.data());
if (dedup_frames-- > 0) {
@ -108,7 +114,12 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
uptr out_buf_size) {
if (!out_buf_size) return;
pc = StackTrace::GetPreviousInstructionPc(pc);
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
SymbolizedStack *frame;
bool symbolize = RenderNeedsSymbolization(fmt);
if (symbolize)
frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
else
frame = SymbolizedStack::New(pc);
if (!frame) {
internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
out_buf[out_buf_size - 1] = 0;
@ -121,7 +132,8 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
for (SymbolizedStack *cur = frame; cur && out_buf < out_end;
cur = cur->next) {
frame_desc.clear();
RenderFrame(&frame_desc, fmt, frame_num++, cur->info,
RenderFrame(&frame_desc, fmt, frame_num++, cur->info.address,
symbolize ? &cur->info : nullptr,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
if (!frame_desc.length())
@ -134,6 +146,7 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
}
CHECK(out_buf <= out_end);
*out_buf = 0;
frame->ClearAll();
}
SANITIZER_INTERFACE_ATTRIBUTE

View file

@ -107,8 +107,14 @@ static const char *DemangleFunctionName(const char *function) {
static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
const AddressInfo &info, bool vs_style,
uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix, const char *strip_func_prefix) {
// info will be null in the case where symbolization is not needed for the
// given format. This ensures that the code below will get a hard failure
// rather than print incorrect information in case RenderNeedsSymbolization
// ever ends up out of sync with this function. If non-null, the addresses
// should match.
CHECK(!info || address == info->address);
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
@ -126,71 +132,70 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
buffer->append("%zu", frame_no);
break;
case 'p':
buffer->append("0x%zx", info.address);
buffer->append("0x%zx", address);
break;
case 'm':
buffer->append("%s", StripPathPrefix(info.module, strip_path_prefix));
buffer->append("%s", StripPathPrefix(info->module, strip_path_prefix));
break;
case 'o':
buffer->append("0x%zx", info.module_offset);
buffer->append("0x%zx", info->module_offset);
break;
case 'f':
buffer->append("%s",
DemangleFunctionName(
StripFunctionName(info.function, strip_func_prefix)));
buffer->append("%s", DemangleFunctionName(StripFunctionName(
info->function, strip_func_prefix)));
break;
case 'q':
buffer->append("0x%zx", info.function_offset != AddressInfo::kUnknown
? info.function_offset
buffer->append("0x%zx", info->function_offset != AddressInfo::kUnknown
? info->function_offset
: 0x0);
break;
case 's':
buffer->append("%s", StripPathPrefix(info.file, strip_path_prefix));
buffer->append("%s", StripPathPrefix(info->file, strip_path_prefix));
break;
case 'l':
buffer->append("%d", info.line);
buffer->append("%d", info->line);
break;
case 'c':
buffer->append("%d", info.column);
buffer->append("%d", info->column);
break;
// Smarter special cases.
case 'F':
// Function name and offset, if file is unknown.
if (info.function) {
buffer->append("in %s",
DemangleFunctionName(
StripFunctionName(info.function, strip_func_prefix)));
if (!info.file && info.function_offset != AddressInfo::kUnknown)
buffer->append("+0x%zx", info.function_offset);
if (info->function) {
buffer->append("in %s", DemangleFunctionName(StripFunctionName(
info->function, strip_func_prefix)));
if (!info->file && info->function_offset != AddressInfo::kUnknown)
buffer->append("+0x%zx", info->function_offset);
}
break;
case 'S':
// File/line information.
RenderSourceLocation(buffer, info.file, info.line, info.column, vs_style,
strip_path_prefix);
RenderSourceLocation(buffer, info->file, info->line, info->column,
vs_style, strip_path_prefix);
break;
case 'L':
// Source location, or module location.
if (info.file) {
RenderSourceLocation(buffer, info.file, info.line, info.column,
if (info->file) {
RenderSourceLocation(buffer, info->file, info->line, info->column,
vs_style, strip_path_prefix);
} else if (info.module) {
RenderModuleLocation(buffer, info.module, info.module_offset,
info.module_arch, strip_path_prefix);
} else if (info->module) {
RenderModuleLocation(buffer, info->module, info->module_offset,
info->module_arch, strip_path_prefix);
} else {
buffer->append("(<unknown module>)");
}
break;
case 'M':
// Module basename and offset, or PC.
if (info.address & kExternalPCBit)
{} // There PCs are not meaningful.
else if (info.module)
if (address & kExternalPCBit) {
// There PCs are not meaningful.
} else if (info->module) {
// Always strip the module name for %M.
RenderModuleLocation(buffer, StripModuleName(info.module),
info.module_offset, info.module_arch, "");
else
buffer->append("(%p)", (void *)info.address);
RenderModuleLocation(buffer, StripModuleName(info->module),
info->module_offset, info->module_arch, "");
} else {
buffer->append("(%p)", (void *)address);
}
break;
default:
Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
@ -200,6 +205,29 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
}
}
bool RenderNeedsSymbolization(const char *format) {
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%')
continue;
p++;
switch (*p) {
case '%':
break;
case 'n':
// frame_no
break;
case 'p':
// address
break;
default:
return true;
}
}
return false;
}
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI, const char *strip_path_prefix) {
for (const char *p = format; *p != '\0'; p++) {

View file

@ -47,10 +47,12 @@ namespace __sanitizer {
// module+offset if it is known, or (<unknown module>) string.
// %M - prints module basename and offset, if it is known, or PC.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
const AddressInfo &info, bool vs_style,
uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix = "",
const char *strip_func_prefix = "");
bool RenderNeedsSymbolization(const char *format);
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix);

View file

@ -32,13 +32,11 @@ class SuspendedThreadsList {
// Can't declare pure virtual functions in sanitizer runtimes:
// __cxa_pure_virtual might be unavailable. Use UNIMPLEMENTED() instead.
virtual PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
uptr *sp) const {
virtual PtraceRegistersStatus GetRegistersAndSP(
uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
UNIMPLEMENTED();
}
// The buffer in GetRegistersAndSP should be at least this big.
virtual uptr RegisterCount() const { UNIMPLEMENTED(); }
virtual uptr ThreadCount() const { UNIMPLEMENTED(); }
virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); }

View file

@ -13,10 +13,10 @@
#include "sanitizer_platform.h"
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
defined(__aarch64__) || defined(__powerpc64__) || \
defined(__s390__) || defined(__i386__) || \
defined(__arm__))
#if SANITIZER_LINUX && \
(defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
defined(__arm__) || SANITIZER_RISCV64)
#include "sanitizer_stoptheworld.h"
@ -31,7 +31,7 @@
#include <sys/types.h> // for pid_t
#include <sys/uio.h> // for iovec
#include <elf.h> // for NT_PRSTATUS
#if defined(__aarch64__) && !SANITIZER_ANDROID
#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID
// GLIBC 2.20+ sys/user does not include asm/ptrace.h
# include <asm/ptrace.h>
#endif
@ -89,14 +89,14 @@ class SuspendedThreadsListLinux : public SuspendedThreadsList {
public:
SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
tid_t GetThreadID(uptr index) const;
uptr ThreadCount() const;
tid_t GetThreadID(uptr index) const override;
uptr ThreadCount() const override;
bool ContainsTid(tid_t thread_id) const;
void Append(tid_t tid);
PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
uptr *sp) const;
uptr RegisterCount() const;
PtraceRegistersStatus GetRegistersAndSP(uptr index,
InternalMmapVector<uptr> *buffer,
uptr *sp) const override;
private:
InternalMmapVector<tid_t> thread_ids_;
@ -485,6 +485,9 @@ typedef user_regs_struct regs_struct;
#else
#define REG_SP rsp
#endif
#define ARCH_IOVEC_FOR_GETREGSET
// Compiler may use FP registers to store pointers.
static constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET};
#elif defined(__powerpc__) || defined(__powerpc64__)
typedef pt_regs regs_struct;
@ -501,11 +504,19 @@ typedef struct user regs_struct;
#elif defined(__aarch64__)
typedef struct user_pt_regs regs_struct;
#define REG_SP sp
static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
#elif SANITIZER_RISCV64
typedef struct user_regs_struct regs_struct;
#define REG_SP sp
static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
#elif defined(__s390__)
typedef _user_regs_struct regs_struct;
#define REG_SP gprs[15]
static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
#else
@ -533,24 +544,58 @@ void SuspendedThreadsListLinux::Append(tid_t tid) {
}
PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
uptr index, uptr *buffer, uptr *sp) const {
uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
pid_t tid = GetThreadID(index);
regs_struct regs;
constexpr uptr uptr_sz = sizeof(uptr);
int pterrno;
#ifdef ARCH_IOVEC_FOR_GETREGSET
struct iovec regset_io;
regset_io.iov_base = &regs;
regset_io.iov_len = sizeof(regs_struct);
bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
(void*)NT_PRSTATUS, (void*)&regset_io),
&pterrno);
auto append = [&](uptr regset) {
uptr size = buffer->size();
// NT_X86_XSTATE requires 64bit alignment.
uptr size_up = RoundUpTo(size, 8 / uptr_sz);
buffer->reserve(Max<uptr>(1024, size_up));
struct iovec regset_io;
for (;; buffer->resize(buffer->capacity() * 2)) {
buffer->resize(buffer->capacity());
uptr available_bytes = (buffer->size() - size_up) * uptr_sz;
regset_io.iov_base = buffer->data() + size_up;
regset_io.iov_len = available_bytes;
bool fail =
internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
(void *)regset, (void *)&regset_io),
&pterrno);
if (fail) {
VReport(1, "Could not get regset %p from thread %d (errno %d).\n",
(void *)regset, tid, pterrno);
buffer->resize(size);
return false;
}
// Far enough from the buffer size, no need to resize and repeat.
if (regset_io.iov_len + 64 < available_bytes)
break;
}
buffer->resize(size_up + RoundUpTo(regset_io.iov_len, uptr_sz) / uptr_sz);
return true;
};
buffer->clear();
bool fail = !append(NT_PRSTATUS);
if (!fail) {
// Accept the first available and do not report errors.
for (uptr regs : kExtraRegs)
if (regs && append(regs))
break;
}
#else
bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr,
&regs), &pterrno);
#endif
if (isErr) {
buffer->resize(RoundUpTo(sizeof(regs_struct), uptr_sz) / uptr_sz);
bool fail = internal_iserror(
internal_ptrace(PTRACE_GETREGS, tid, nullptr, buffer->data()), &pterrno);
if (fail)
VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
pterrno);
#endif
if (fail) {
// ESRCH means that the given thread is not suspended or already dead.
// Therefore it's unsafe to inspect its data (e.g. walk through stack) and
// we should notify caller about this.
@ -558,14 +603,10 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
: REGISTERS_UNAVAILABLE;
}
*sp = regs.REG_SP;
internal_memcpy(buffer, &regs, sizeof(regs));
*sp = reinterpret_cast<regs_struct *>(buffer->data())[0].REG_SP;
return REGISTERS_AVAILABLE;
}
uptr SuspendedThreadsListLinux::RegisterCount() const {
return sizeof(regs_struct) / sizeof(uptr);
}
} // namespace __sanitizer
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)

View file

@ -31,15 +31,15 @@ class SuspendedThreadsListMac : public SuspendedThreadsList {
public:
SuspendedThreadsListMac() : threads_(1024) {}
tid_t GetThreadID(uptr index) const;
tid_t GetThreadID(uptr index) const override;
thread_t GetThread(uptr index) const;
uptr ThreadCount() const;
uptr ThreadCount() const override;
bool ContainsThread(thread_t thread) const;
void Append(thread_t thread);
PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
uptr *sp) const;
uptr RegisterCount() const;
PtraceRegistersStatus GetRegistersAndSP(uptr index,
InternalMmapVector<uptr> *buffer,
uptr *sp) const override;
private:
InternalMmapVector<SuspendedThreadInfo> threads_;
@ -142,7 +142,7 @@ void SuspendedThreadsListMac::Append(thread_t thread) {
}
PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
uptr index, uptr *buffer, uptr *sp) const {
uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
thread_t thread = GetThread(index);
regs_struct regs;
int err;
@ -159,7 +159,8 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
: REGISTERS_UNAVAILABLE;
}
internal_memcpy(buffer, &regs, sizeof(regs));
buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
internal_memcpy(buffer->data(), &regs, sizeof(regs));
#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)
*sp = arm_thread_state64_get_sp(regs);
#else
@ -173,9 +174,6 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
return REGISTERS_AVAILABLE;
}
uptr SuspendedThreadsListMac::RegisterCount() const {
return MACHINE_THREAD_STATE_COUNT;
}
} // namespace __sanitizer
#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||

View file

@ -57,9 +57,9 @@ class SuspendedThreadsListNetBSD : public SuspendedThreadsList {
bool ContainsTid(tid_t thread_id) const;
void Append(tid_t tid);
PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
PtraceRegistersStatus GetRegistersAndSP(uptr index,
InternalMmapVector<uptr> *buffer,
uptr *sp) const;
uptr RegisterCount() const;
private:
InternalMmapVector<tid_t> thread_ids_;
@ -131,7 +131,7 @@ bool ThreadSuspender::SuspendAllThreads() {
pl.pl_lwpid = 0;
int val;
while ((val = ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&
while ((val = internal_ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&
pl.pl_lwpid != 0) {
suspended_threads_list_.Append(pl.pl_lwpid);
VReport(2, "Appended thread %d in process %d.\n", pl.pl_lwpid, pid_);
@ -335,7 +335,7 @@ void SuspendedThreadsListNetBSD::Append(tid_t tid) {
}
PtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(
uptr index, uptr *buffer, uptr *sp) const {
uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
lwpid_t tid = GetThreadID(index);
pid_t ppid = internal_getppid();
struct reg regs;
@ -351,14 +351,12 @@ PtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(
}
*sp = PTRACE_REG_SP(&regs);
internal_memcpy(buffer, &regs, sizeof(regs));
buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
internal_memcpy(buffer->data(), &regs, sizeof(regs));
return REGISTERS_AVAILABLE;
}
uptr SuspendedThreadsListNetBSD::RegisterCount() const {
return sizeof(struct reg) / sizeof(uptr);
}
} // namespace __sanitizer
#endif

View file

@ -12,6 +12,7 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
@ -258,6 +259,8 @@ class LLVMSymbolizerProcess : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=x86_64";
#elif defined(__i386__)
const char* const kSymbolizerArch = "--default-arch=i386";
#elif SANITIZER_RISCV64
const char *const kSymbolizerArch = "--default-arch=riscv64";
#elif defined(__aarch64__)
const char* const kSymbolizerArch = "--default-arch=arm64";
#elif defined(__arm__)
@ -275,8 +278,8 @@ class LLVMSymbolizerProcess : public SymbolizerProcess {
#endif
const char *const inline_flag = common_flags()->symbolize_inline_frames
? "--inlining=true"
: "--inlining=false";
? "--inlines"
: "--no-inlines";
int i = 0;
argv[i++] = path_to_binary;
argv[i++] = inline_flag;

View file

@ -33,8 +33,15 @@ bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
int result = dladdr((const void *)addr, &info);
if (!result) return false;
CHECK(addr >= reinterpret_cast<uptr>(info.dli_saddr));
stack->info.function_offset = addr - reinterpret_cast<uptr>(info.dli_saddr);
// Compute offset if possible. `dladdr()` doesn't always ensure that `addr >=
// sym_addr` so only compute the offset when this holds. Failure to find the
// function offset is not treated as a failure because it might still be
// possible to get the symbol name.
uptr sym_addr = reinterpret_cast<uptr>(info.dli_saddr);
if (addr >= sym_addr) {
stack->info.function_offset = addr - sym_addr;
}
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
if (!demangled) return false;
stack->info.function = internal_strdup(demangled);
@ -123,7 +130,7 @@ class AtosSymbolizerProcess : public SymbolizerProcess {
argv[i++] = path_to_binary;
argv[i++] = "-p";
argv[i++] = &pid_str_[0];
if (GetMacosVersion() == MACOS_VERSION_MAVERICKS) {
if (GetMacosAlignedVersion() == MacosVersion(10, 9)) {
// On Mavericks atos prints a deprecation warning which we suppress by
// passing -d. The warning isn't present on other OSX versions, even the
// newer ones.
@ -219,10 +226,10 @@ bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
start_address = reinterpret_cast<uptr>(info.dli_saddr);
}
// Only assig to `function_offset` if we were able to get the function's
// start address.
if (start_address != AddressInfo::kUnknown) {
CHECK(addr >= start_address);
// Only assign to `function_offset` if we were able to get the function's
// start address and we got a sensible `start_address` (dladdr doesn't always
// ensure that `addr >= sym_addr`).
if (start_address != AddressInfo::kUnknown && addr >= start_address) {
stack->info.function_offset = addr - start_address;
}
return true;

View file

@ -83,11 +83,14 @@ void RenderData(InternalScopedString *buffer, const char *format,
buffer->append(kFormatData, DI->start);
}
bool RenderNeedsSymbolization(const char *format) { return false; }
// We don't support the stack_trace_format flag at all.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
const AddressInfo &info, bool vs_style,
uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix, const char *strip_func_prefix) {
buffer->append(kFormatFrame, frame_no, info.address);
CHECK(!RenderNeedsSymbolization(format));
buffer->append(kFormatFrame, frame_no, address);
}
Symbolizer *Symbolizer::PlatformInit() {

View file

@ -78,13 +78,6 @@ static void InitializeSwiftDemangler() {
// Attempts to demangle a Swift name. The demangler will return nullptr if a
// non-Swift name is passed in.
const char *DemangleSwift(const char *name) {
if (!name) return nullptr;
// Check if we are dealing with a Swift mangled name first.
if (name[0] != '_' || name[1] != 'T') {
return nullptr;
}
if (swift_demangle_f)
return swift_demangle_f(name, internal_strlen(name), 0, 0, 0);
@ -321,9 +314,10 @@ class Addr2LinePool : public SymbolizerTool {
#if SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
bool __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength,
bool SymbolizeInlineFrames);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength);
@ -346,7 +340,8 @@ class InternalSymbolizer : public SymbolizerTool {
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
bool result = __sanitizer_symbolize_code(
stack->info.module, stack->info.module_offset, buffer_, kBufferSize);
stack->info.module, stack->info.module_offset, buffer_, kBufferSize,
common_flags()->symbolize_inline_frames);
if (result) ParseSymbolizePCOutput(buffer_, stack);
return result;
}

View file

@ -33,7 +33,8 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info,
if (!common_flags()->print_summary) return;
InternalScopedString buff(kMaxSummaryLength);
buff.append("%s ", error_type);
RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
RenderFrame(&buff, "%L %F", 0, info.address, &info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
ReportErrorSummary(buff.data(), alt_tool_name);
}
@ -47,14 +48,14 @@ bool ReportFile::SupportsColors() {
return SupportsColoredOutput(fd);
}
static INLINE bool ReportSupportsColors() {
static inline bool ReportSupportsColors() {
return report_file.SupportsColors();
}
#else // SANITIZER_FUCHSIA
// Fuchsia's logs always go through post-processing that handles colorization.
static INLINE bool ReportSupportsColors() { return true; }
static inline bool ReportSupportsColors() { return true; }
#endif // !SANITIZER_FUCHSIA

View file

@ -0,0 +1,174 @@
//===-- sanitizer_syscall_linux_riscv64.inc ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implementations of internal_syscall and internal_iserror for Linux/riscv64.
//
//===----------------------------------------------------------------------===//
// About local register variables:
// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
//
// Kernel ABI...
// To my surprise I haven't found much information regarding it.
// Kernel source and internet browsing shows that:
// syscall number is passed in a7
// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
// are passed in: a0-a7 (see below)
//
// Regarding the arguments. The only "documentation" I could find is
// this comment (!!!) by Bruce Hold on google forums (!!!):
// https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/exbrzM3GZDQ
// Confirmed by inspecting glibc sources.
// Great way to document things.
#define SYSCALL(name) __NR_##name
#define INTERNAL_SYSCALL_CLOBBERS "memory"
static uptr __internal_syscall(u64 nr) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0");
__asm__ volatile("ecall\n\t"
: "=r"(a0)
: "r"(a7)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall0(n) (__internal_syscall)(n)
static uptr __internal_syscall(u64 nr, u64 arg1) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall2(n, a1, a2) \
(__internal_syscall)(n, (u64)(a1), (long)(a2))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall3(n, a1, a2, a3) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
u64 arg4) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
register u64 a3 asm("a3") = arg4;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall4(n, a1, a2, a3, a4) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
register u64 a3 asm("a3") = arg4;
register u64 a4 asm("a4") = arg5;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5, long arg6) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
register u64 a3 asm("a3") = arg4;
register u64 a4 asm("a4") = arg5;
register u64 a5 asm("a5") = arg6;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5), (long)(a6))
static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
long arg5, long arg6, long arg7) {
register u64 a7 asm("a7") = nr;
register u64 a0 asm("a0") = arg1;
register u64 a1 asm("a1") = arg2;
register u64 a2 asm("a2") = arg3;
register u64 a3 asm("a3") = arg4;
register u64 a4 asm("a4") = arg5;
register u64 a5 asm("a5") = arg6;
register u64 a6 asm("a6") = arg7;
__asm__ volatile("ecall\n\t"
: "+r"(a0)
: "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
"r"(a6)
: INTERNAL_SYSCALL_CLOBBERS);
return a0;
}
#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
(__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
(u64)(a5), (long)(a6), (long)(a7))
#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
#define __SYSCALL_NARGS(...) \
__SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
#define __SYSCALL_CONCAT_X(a, b) a##b
#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
#define __SYSCALL_DISP(b, ...) \
__SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
// Helper function used to avoid clobbering of errno.
bool internal_iserror(uptr retval, int *rverrno) {
if (retval >= (uptr)-4095) {
if (rverrno)
*rverrno = -retval;
return true;
}
return false;
}

Some files were not shown because too many files have changed in this diff Show more