[libsanitizer] merge from upstream r168699
From-SVN: r193849
This commit is contained in:
parent
169d8507ca
commit
4ba5ca4650
26 changed files with 415 additions and 158 deletions
|
@ -1,3 +1,7 @@
|
|||
2012-11-27 Kostya Serebryany <kcc@google.com>
|
||||
|
||||
* All files: Merge from upstream r168699.
|
||||
|
||||
2012-11-24 Kostya Serebryany kcc@google.com
|
||||
Jack Howarth <howarth@bromo.med.uc.edu>
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
168514
|
||||
168699
|
||||
|
||||
The first line of this file holds the svn revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
|
|
@ -130,7 +130,7 @@ static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
|
|||
}
|
||||
|
||||
static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
|
||||
CHECK(IsAligned(size, kPageSize));
|
||||
CHECK(IsAligned(size, GetPageSizeCached()));
|
||||
u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
|
||||
PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
|
||||
if (flags()->debug) {
|
||||
|
@ -532,12 +532,13 @@ class MallocInfo {
|
|||
uptr mmap_size = Max(size, kMinMmapSize);
|
||||
uptr n_chunks = mmap_size / size;
|
||||
CHECK(n_chunks * size == mmap_size);
|
||||
if (size < kPageSize) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
if (size < PageSize) {
|
||||
// Size is small, just poison the last chunk.
|
||||
n_chunks--;
|
||||
} else {
|
||||
// Size is large, allocate an extra page at right and poison it.
|
||||
mmap_size += kPageSize;
|
||||
mmap_size += PageSize;
|
||||
}
|
||||
CHECK(n_chunks > 0);
|
||||
u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
|
||||
|
@ -811,18 +812,19 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) {
|
|||
}
|
||||
|
||||
void *asan_valloc(uptr size, StackTrace *stack) {
|
||||
void *ptr = (void*)Allocate(kPageSize, size, stack);
|
||||
void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack);
|
||||
__asan_malloc_hook(ptr, size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *asan_pvalloc(uptr size, StackTrace *stack) {
|
||||
size = RoundUpTo(size, kPageSize);
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
size = RoundUpTo(size, PageSize);
|
||||
if (size == 0) {
|
||||
// pvalloc(0) should allocate one page.
|
||||
size = kPageSize;
|
||||
size = PageSize;
|
||||
}
|
||||
void *ptr = (void*)Allocate(kPageSize, size, stack);
|
||||
void *ptr = (void*)Allocate(PageSize, size, stack);
|
||||
__asan_malloc_hook(ptr, size);
|
||||
return ptr;
|
||||
}
|
||||
|
@ -941,7 +943,7 @@ uptr FakeStack::ClassMmapSize(uptr size_class) {
|
|||
}
|
||||
|
||||
void FakeStack::AllocateOneSizeClass(uptr size_class) {
|
||||
CHECK(ClassMmapSize(size_class) >= kPageSize);
|
||||
CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
|
||||
uptr new_mem = (uptr)MmapOrDie(
|
||||
ClassMmapSize(size_class), __FUNCTION__);
|
||||
// Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
|
||||
|
|
|
@ -174,9 +174,10 @@ void ClearShadowMemoryForContext(void *context) {
|
|||
uptr sp = (uptr)ucp->uc_stack.ss_sp;
|
||||
uptr size = ucp->uc_stack.ss_size;
|
||||
// Align to page size.
|
||||
uptr bottom = sp & ~(kPageSize - 1);
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr bottom = sp & ~(PageSize - 1);
|
||||
size += sp - bottom;
|
||||
size = RoundUpTo(size, kPageSize);
|
||||
size = RoundUpTo(size, PageSize);
|
||||
PoisonShadow(bottom, size, 0);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -182,11 +182,11 @@ void ClearShadowMemoryForContext(void *context) {
|
|||
static void *island_allocator_pos = 0;
|
||||
|
||||
#if SANITIZER_WORDSIZE == 32
|
||||
# define kIslandEnd (0xffdf0000 - kPageSize)
|
||||
# define kIslandBeg (kIslandEnd - 256 * kPageSize)
|
||||
# define kIslandEnd (0xffdf0000 - GetPageSizeCached())
|
||||
# define kIslandBeg (kIslandEnd - 256 * GetPageSizeCached())
|
||||
#else
|
||||
# define kIslandEnd (0x7fffffdf0000 - kPageSize)
|
||||
# define kIslandBeg (kIslandEnd - 256 * kPageSize)
|
||||
# define kIslandEnd (0x7fffffdf0000 - GetPageSizeCached())
|
||||
# define kIslandBeg (kIslandEnd - 256 * GetPageSizeCached())
|
||||
#endif
|
||||
|
||||
extern "C"
|
||||
|
@ -210,7 +210,7 @@ mach_error_t __interception_allocate_island(void **ptr,
|
|||
internal_memset(island_allocator_pos, 0xCC, kIslandEnd - kIslandBeg);
|
||||
};
|
||||
*ptr = island_allocator_pos;
|
||||
island_allocator_pos = (char*)island_allocator_pos + kPageSize;
|
||||
island_allocator_pos = (char*)island_allocator_pos + GetPageSizeCached();
|
||||
if (flags()->verbosity) {
|
||||
Report("Branch island allocated at %p\n", *ptr);
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
|
|||
return malloc_zone_valloc(system_malloc_zone, size);
|
||||
}
|
||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
||||
return asan_memalign(kPageSize, size, &stack);
|
||||
return asan_memalign(GetPageSizeCached(), size, &stack);
|
||||
}
|
||||
|
||||
#define GET_ZONE_FOR_PTR(ptr) \
|
||||
|
|
|
@ -66,7 +66,12 @@ extern __attribute__((visibility("default"))) uptr __asan_mapping_offset;
|
|||
#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
|
||||
#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
|
||||
|
||||
#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 : 16 * kPageSize)
|
||||
// With the zero shadow base we can not actually map pages starting from 0.
|
||||
// This constant is somewhat arbitrary.
|
||||
#define kZeroBaseShadowStart (1 << 18)
|
||||
|
||||
#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
|
||||
: kZeroBaseShadowStart)
|
||||
#define kShadowGapEnd (kHighShadowBeg - 1)
|
||||
|
||||
#define kGlobalAndStackRedzone \
|
||||
|
|
|
@ -163,8 +163,8 @@ void ShowStatsAndAbort() {
|
|||
// ---------------------- mmap -------------------- {{{1
|
||||
// Reserve memory range [beg, end].
|
||||
static void ReserveShadowMemoryRange(uptr beg, uptr end) {
|
||||
CHECK((beg % kPageSize) == 0);
|
||||
CHECK(((end + 1) % kPageSize) == 0);
|
||||
CHECK((beg % GetPageSizeCached()) == 0);
|
||||
CHECK(((end + 1) % GetPageSizeCached()) == 0);
|
||||
uptr size = end - beg + 1;
|
||||
void *res = MmapFixedNoReserve(beg, size);
|
||||
if (res != (void*)beg) {
|
||||
|
@ -269,8 +269,9 @@ void NOINLINE __asan_handle_no_return() {
|
|||
int local_stack;
|
||||
AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
|
||||
CHECK(curr_thread);
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr top = curr_thread->stack_top();
|
||||
uptr bottom = ((uptr)&local_stack - kPageSize) & ~(kPageSize-1);
|
||||
uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
|
||||
PoisonShadow(bottom, top - bottom, 0);
|
||||
}
|
||||
|
||||
|
@ -347,12 +348,13 @@ void __asan_init() {
|
|||
}
|
||||
|
||||
uptr shadow_start = kLowShadowBeg;
|
||||
if (kLowShadowBeg > 0) shadow_start -= kMmapGranularity;
|
||||
if (kLowShadowBeg > 0) shadow_start -= GetMmapGranularity();
|
||||
uptr shadow_end = kHighShadowEnd;
|
||||
if (MemoryRangeIsAvailable(shadow_start, shadow_end)) {
|
||||
if (kLowShadowBeg != kLowShadowEnd) {
|
||||
// mmap the low shadow plus at least one page.
|
||||
ReserveShadowMemoryRange(kLowShadowBeg - kMmapGranularity, kLowShadowEnd);
|
||||
ReserveShadowMemoryRange(kLowShadowBeg - GetMmapGranularity(),
|
||||
kLowShadowEnd);
|
||||
}
|
||||
// mmap the high shadow.
|
||||
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd);
|
||||
|
|
|
@ -41,7 +41,7 @@ void AsanStats::Print() {
|
|||
Printf("Stats: %zuM really freed by %zu calls\n",
|
||||
really_freed>>20, real_frees);
|
||||
Printf("Stats: %zuM (%zu full pages) mmaped in %zu calls\n",
|
||||
mmaped>>20, mmaped / kPageSize, mmaps);
|
||||
mmaped>>20, mmaped / GetPageSizeCached(), mmaps);
|
||||
|
||||
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
|
||||
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
|
||||
|
|
|
@ -26,15 +26,16 @@ AsanThread::AsanThread(LinkerInitialized x)
|
|||
|
||||
AsanThread *AsanThread::Create(u32 parent_tid, thread_callback_t start_routine,
|
||||
void *arg, StackTrace *stack) {
|
||||
uptr size = RoundUpTo(sizeof(AsanThread), kPageSize);
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
|
||||
AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
|
||||
thread->start_routine_ = start_routine;
|
||||
thread->arg_ = arg;
|
||||
|
||||
const uptr kSummaryAllocSize = kPageSize;
|
||||
const uptr kSummaryAllocSize = PageSize;
|
||||
CHECK_LE(sizeof(AsanThreadSummary), kSummaryAllocSize);
|
||||
AsanThreadSummary *summary =
|
||||
(AsanThreadSummary*)MmapOrDie(kPageSize, "AsanThreadSummary");
|
||||
(AsanThreadSummary*)MmapOrDie(PageSize, "AsanThreadSummary");
|
||||
summary->Init(parent_tid, stack);
|
||||
summary->set_thread(thread);
|
||||
thread->set_summary(summary);
|
||||
|
@ -64,7 +65,7 @@ void AsanThread::Destroy() {
|
|||
// and we don't want it to have any poisoned stack.
|
||||
ClearShadowForThreadStack();
|
||||
fake_stack().Cleanup();
|
||||
uptr size = RoundUpTo(sizeof(AsanThread), kPageSize);
|
||||
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
|
||||
UnmapOrDie(this, size);
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ void *LowLevelAllocator::Allocate(uptr size) {
|
|||
// Align allocation size.
|
||||
size = RoundUpTo(size, 8);
|
||||
if (allocated_end_ - allocated_current_ < (sptr)size) {
|
||||
uptr size_to_allocate = Max(size, kPageSize);
|
||||
uptr size_to_allocate = Max(size, GetPageSizeCached());
|
||||
allocated_current_ =
|
||||
(char*)MmapOrDie(size_to_allocate, __FUNCTION__);
|
||||
allocated_end_ = allocated_current_ + size_to_allocate;
|
||||
|
|
|
@ -215,7 +215,6 @@ class SizeClassAllocator64 {
|
|||
}
|
||||
|
||||
static uptr AllocBeg() { return kSpaceBeg; }
|
||||
static uptr AllocEnd() { return kSpaceBeg + kSpaceSize + AdditionalSize(); }
|
||||
static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
|
||||
|
||||
static const uptr kNumClasses = 256; // Power of two <= 256
|
||||
|
@ -241,7 +240,7 @@ class SizeClassAllocator64 {
|
|||
|
||||
static uptr AdditionalSize() {
|
||||
uptr res = sizeof(RegionInfo) * kNumClasses;
|
||||
CHECK_EQ(res % kPageSize, 0);
|
||||
CHECK_EQ(res % GetPageSizeCached(), 0);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -364,17 +363,18 @@ class LargeMmapAllocator {
|
|||
public:
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
page_size_ = GetPageSizeCached();
|
||||
}
|
||||
void *Allocate(uptr size, uptr alignment) {
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = RoundUpMapSize(size);
|
||||
if (alignment > kPageSize)
|
||||
if (alignment > page_size_)
|
||||
map_size += alignment;
|
||||
if (map_size < size) return 0; // Overflow.
|
||||
uptr map_beg = reinterpret_cast<uptr>(
|
||||
MmapOrDie(map_size, "LargeMmapAllocator"));
|
||||
uptr map_end = map_beg + map_size;
|
||||
uptr res = map_beg + kPageSize;
|
||||
uptr res = map_beg + page_size_;
|
||||
if (res & (alignment - 1)) // Align.
|
||||
res += alignment - (res & (alignment - 1));
|
||||
CHECK_EQ(0, res & (alignment - 1));
|
||||
|
@ -421,7 +421,7 @@ class LargeMmapAllocator {
|
|||
|
||||
bool PointerIsMine(void *p) {
|
||||
// Fast check.
|
||||
if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
|
||||
if ((reinterpret_cast<uptr>(p) & (page_size_ - 1))) return false;
|
||||
SpinMutexLock l(&mutex_);
|
||||
for (Header *l = list_; l; l = l->next) {
|
||||
if (GetUser(l) == p) return true;
|
||||
|
@ -430,10 +430,10 @@ class LargeMmapAllocator {
|
|||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
return RoundUpMapSize(GetHeader(p)->size) - kPageSize;
|
||||
return RoundUpMapSize(GetHeader(p)->size) - page_size_;
|
||||
}
|
||||
|
||||
// At least kPageSize/2 metadata bytes is available.
|
||||
// At least page_size_/2 metadata bytes is available.
|
||||
void *GetMetaData(void *p) {
|
||||
return GetHeader(p) + 1;
|
||||
}
|
||||
|
@ -457,17 +457,20 @@ class LargeMmapAllocator {
|
|||
Header *prev;
|
||||
};
|
||||
|
||||
Header *GetHeader(uptr p) { return reinterpret_cast<Header*>(p - kPageSize); }
|
||||
Header *GetHeader(uptr p) {
|
||||
return reinterpret_cast<Header*>(p - page_size_);
|
||||
}
|
||||
Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
|
||||
|
||||
void *GetUser(Header *h) {
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
|
||||
}
|
||||
|
||||
uptr RoundUpMapSize(uptr size) {
|
||||
return RoundUpTo(size, kPageSize) + kPageSize;
|
||||
return RoundUpTo(size, page_size_) + page_size_;
|
||||
}
|
||||
|
||||
uptr page_size_;
|
||||
Header *list_;
|
||||
SpinMutex mutex_;
|
||||
};
|
||||
|
|
|
@ -14,6 +14,13 @@
|
|||
|
||||
namespace __sanitizer {
|
||||
|
||||
uptr GetPageSizeCached() {
|
||||
static uptr PageSize;
|
||||
if (!PageSize)
|
||||
PageSize = GetPageSize();
|
||||
return PageSize;
|
||||
}
|
||||
|
||||
// By default, dump to stderr. If report_fd is kInvalidFd, try to obtain file
|
||||
// descriptor by opening file in report_path.
|
||||
static fd_t report_fd = kStderrFd;
|
||||
|
@ -75,7 +82,8 @@ void RawWrite(const char *buffer) {
|
|||
|
||||
uptr ReadFileToBuffer(const char *file_name, char **buff,
|
||||
uptr *buff_size, uptr max_len) {
|
||||
const uptr kMinFileLen = kPageSize;
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr kMinFileLen = PageSize;
|
||||
uptr read_len = 0;
|
||||
*buff = 0;
|
||||
*buff_size = 0;
|
||||
|
@ -89,8 +97,8 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
|
|||
// Read up to one page at a time.
|
||||
read_len = 0;
|
||||
bool reached_eof = false;
|
||||
while (read_len + kPageSize <= size) {
|
||||
uptr just_read = internal_read(fd, *buff + read_len, kPageSize);
|
||||
while (read_len + PageSize <= size) {
|
||||
uptr just_read = internal_read(fd, *buff + read_len, PageSize);
|
||||
if (just_read == 0) {
|
||||
reached_eof = true;
|
||||
break;
|
||||
|
|
|
@ -21,25 +21,16 @@ namespace __sanitizer {
|
|||
// Constants.
|
||||
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
|
||||
const uptr kWordSizeInBits = 8 * kWordSize;
|
||||
|
||||
#if defined(__powerpc__) || defined(__powerpc64__)
|
||||
// Current PPC64 kernels use 64K pages sizes, but they can be
|
||||
// configured with 4K or even other sizes.
|
||||
// We may want to use getpagesize() or sysconf(_SC_PAGESIZE) here rather than
|
||||
// hardcoding the values, but today these values need to be compile-time
|
||||
// constants.
|
||||
const uptr kPageSize = 1UL << 16;
|
||||
const uptr kCacheLineSize = 128;
|
||||
const uptr kMmapGranularity = kPageSize;
|
||||
#elif !defined(_WIN32)
|
||||
const uptr kPageSize = 1UL << 12;
|
||||
const uptr kCacheLineSize = 64;
|
||||
const uptr kMmapGranularity = kPageSize;
|
||||
#else
|
||||
const uptr kPageSize = 1UL << 12;
|
||||
const uptr kCacheLineSize = 64;
|
||||
const uptr kMmapGranularity = 1UL << 16;
|
||||
#endif
|
||||
|
||||
uptr GetPageSize();
|
||||
uptr GetPageSizeCached();
|
||||
uptr GetMmapGranularity();
|
||||
// Threads
|
||||
int GetPid();
|
||||
uptr GetTid();
|
||||
|
|
|
@ -30,6 +30,13 @@
|
|||
namespace __sanitizer {
|
||||
|
||||
// ------------- sanitizer_common.h
|
||||
uptr GetPageSize() {
|
||||
return sysconf(_SC_PAGESIZE);
|
||||
}
|
||||
|
||||
uptr GetMmapGranularity() {
|
||||
return GetPageSize();
|
||||
}
|
||||
|
||||
int GetPid() {
|
||||
return getpid();
|
||||
|
@ -40,7 +47,7 @@ uptr GetThreadSelf() {
|
|||
}
|
||||
|
||||
void *MmapOrDie(uptr size, const char *mem_type) {
|
||||
size = RoundUpTo(size, kPageSize);
|
||||
size = RoundUpTo(size, GetPageSizeCached());
|
||||
void *res = internal_mmap(0, size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
|
@ -72,8 +79,9 @@ void UnmapOrDie(void *addr, uptr size) {
|
|||
}
|
||||
|
||||
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
|
||||
void *p = internal_mmap((void*)(fixed_addr & ~(kPageSize - 1)),
|
||||
RoundUpTo(size, kPageSize),
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
|
||||
RoundUpTo(size, PageSize),
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
|
||||
-1, 0);
|
||||
|
@ -96,7 +104,7 @@ void *MapFileToMemory(const char *file_name, uptr *buff_size) {
|
|||
uptr fsize = internal_filesize(fd);
|
||||
CHECK_NE(fsize, (uptr)-1);
|
||||
CHECK_GT(fsize, 0);
|
||||
*buff_size = RoundUpTo(fsize, kPageSize);
|
||||
*buff_size = RoundUpTo(fsize, GetPageSizeCached());
|
||||
void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
||||
return (map == MAP_FAILED) ? 0 : map;
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ void StackTrace::PrintStack(const uptr *addr, uptr size,
|
|||
bool symbolize, const char *strip_file_prefix,
|
||||
SymbolizeCallback symbolize_callback ) {
|
||||
MemoryMappingLayout proc_maps;
|
||||
InternalScopedBuffer<char> buff(kPageSize * 2);
|
||||
InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
|
||||
InternalScopedBuffer<AddressInfo> addr_frames(64);
|
||||
uptr frame_num = 0;
|
||||
for (uptr i = 0; i < size && addr[i]; i++) {
|
||||
|
|
|
@ -21,6 +21,14 @@
|
|||
namespace __sanitizer {
|
||||
|
||||
// --------------------- sanitizer_common.h
|
||||
uptr GetPageSize() {
|
||||
return 1U << 14; // FIXME: is this configurable?
|
||||
}
|
||||
|
||||
uptr GetMmapGranularity() {
|
||||
return 1U << 16; // FIXME: is this configurable?
|
||||
}
|
||||
|
||||
bool FileExists(const char *filename) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
|
|
@ -564,13 +564,13 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
|
|||
|
||||
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
|
||||
SCOPED_TSAN_INTERCEPTOR(valloc, sz);
|
||||
return user_alloc(thr, pc, sz, kPageSize);
|
||||
return user_alloc(thr, pc, sz, GetPageSizeCached());
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pvalloc, sz);
|
||||
sz = RoundUp(sz, kPageSize);
|
||||
return user_alloc(thr, pc, sz, kPageSize);
|
||||
sz = RoundUp(sz, GetPageSizeCached());
|
||||
return user_alloc(thr, pc, sz, GetPageSizeCached());
|
||||
}
|
||||
|
||||
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
|
||||
|
|
|
@ -42,6 +42,9 @@ void __tsan_vptr_update(void **vptr_p, void *new_val);
|
|||
void __tsan_func_entry(void *call_pc);
|
||||
void __tsan_func_exit();
|
||||
|
||||
void __tsan_read_range(void *addr, unsigned long size); // NOLINT
|
||||
void __tsan_write_range(void *addr, unsigned long size); // NOLINT
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
|
|
@ -9,6 +9,14 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// ThreadSanitizer atomic operations are based on C++11/C1x standards.
|
||||
// For background see C++11 standard. A slightly older, publically
|
||||
// available draft of the standard (not entirely up-to-date, but close enough
|
||||
// for casual browsing) is available here:
|
||||
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
|
||||
// The following page contains more background information:
|
||||
// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
|
||||
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "tsan_interface_atomic.h"
|
||||
#include "tsan_flags.h"
|
||||
|
@ -37,6 +45,7 @@ typedef __tsan_atomic8 a8;
|
|||
typedef __tsan_atomic16 a16;
|
||||
typedef __tsan_atomic32 a32;
|
||||
typedef __tsan_atomic64 a64;
|
||||
typedef __tsan_atomic128 a128;
|
||||
const morder mo_relaxed = __tsan_memory_order_relaxed;
|
||||
const morder mo_consume = __tsan_memory_order_consume;
|
||||
const morder mo_acquire = __tsan_memory_order_acquire;
|
||||
|
@ -50,7 +59,8 @@ static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
|
|||
StatInc(thr, size == 1 ? StatAtomic1
|
||||
: size == 2 ? StatAtomic2
|
||||
: size == 4 ? StatAtomic4
|
||||
: StatAtomic8);
|
||||
: size == 8 ? StatAtomic8
|
||||
: StatAtomic16);
|
||||
StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
|
||||
: mo == mo_consume ? StatAtomicConsume
|
||||
: mo == mo_acquire ? StatAtomicAcquire
|
||||
|
@ -77,6 +87,10 @@ static bool IsAcquireOrder(morder mo) {
|
|||
|| mo == mo_acq_rel || mo == mo_seq_cst;
|
||||
}
|
||||
|
||||
static bool IsAcqRelOrder(morder mo) {
|
||||
return mo == mo_acq_rel || mo == mo_seq_cst;
|
||||
}
|
||||
|
||||
static morder ConvertOrder(morder mo) {
|
||||
if (mo > (morder)100500) {
|
||||
mo = morder(mo - 100500);
|
||||
|
@ -98,6 +112,34 @@ static morder ConvertOrder(morder mo) {
|
|||
return mo;
|
||||
}
|
||||
|
||||
template<typename T> T func_xchg(T v, T op) {
|
||||
return op;
|
||||
}
|
||||
|
||||
template<typename T> T func_add(T v, T op) {
|
||||
return v + op;
|
||||
}
|
||||
|
||||
template<typename T> T func_sub(T v, T op) {
|
||||
return v - op;
|
||||
}
|
||||
|
||||
template<typename T> T func_and(T v, T op) {
|
||||
return v & op;
|
||||
}
|
||||
|
||||
template<typename T> T func_or(T v, T op) {
|
||||
return v | op;
|
||||
}
|
||||
|
||||
template<typename T> T func_xor(T v, T op) {
|
||||
return v ^ op;
|
||||
}
|
||||
|
||||
template<typename T> T func_nand(T v, T op) {
|
||||
return ~v & op;
|
||||
}
|
||||
|
||||
#define SCOPED_ATOMIC(func, ...) \
|
||||
mo = ConvertOrder(mo); \
|
||||
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
|
||||
|
@ -113,9 +155,15 @@ template<typename T>
|
|||
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
|
||||
morder mo) {
|
||||
CHECK(IsLoadOrder(mo));
|
||||
// This fast-path is critical for performance.
|
||||
// Assume the access is atomic.
|
||||
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
|
||||
return *a;
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->clock.acquire(&s->clock);
|
||||
T v = *a;
|
||||
if (IsAcquireOrder(mo))
|
||||
Acquire(thr, pc, (uptr)a);
|
||||
s->mtx.ReadUnlock();
|
||||
return v;
|
||||
}
|
||||
|
||||
|
@ -123,100 +171,112 @@ template<typename T>
|
|||
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||
morder mo) {
|
||||
CHECK(IsStoreOrder(mo));
|
||||
if (IsReleaseOrder(mo))
|
||||
ReleaseStore(thr, pc, (uptr)a);
|
||||
// This fast-path is critical for performance.
|
||||
// Assume the access is atomic.
|
||||
// Strictly saying even relaxed store cuts off release sequence,
|
||||
// so must reset the clock.
|
||||
if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
|
||||
*a = v;
|
||||
return;
|
||||
}
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
thr->clock.ReleaseStore(&s->clock);
|
||||
*a = v;
|
||||
s->mtx.Unlock();
|
||||
}
|
||||
|
||||
template<typename T, T (*F)(T v, T op)>
|
||||
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
if (IsAcqRelOrder(mo))
|
||||
thr->clock.acq_rel(&s->clock);
|
||||
else if (IsReleaseOrder(mo))
|
||||
thr->clock.release(&s->clock);
|
||||
else if (IsAcquireOrder(mo))
|
||||
thr->clock.acquire(&s->clock);
|
||||
T c = *a;
|
||||
*a = F(c, v);
|
||||
s->mtx.Unlock();
|
||||
return c;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||
morder mo) {
|
||||
if (IsReleaseOrder(mo))
|
||||
Release(thr, pc, (uptr)a);
|
||||
v = __sync_lock_test_and_set(a, v);
|
||||
if (IsAcquireOrder(mo))
|
||||
Acquire(thr, pc, (uptr)a);
|
||||
return v;
|
||||
return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||
morder mo) {
|
||||
if (IsReleaseOrder(mo))
|
||||
Release(thr, pc, (uptr)a);
|
||||
v = __sync_fetch_and_add(a, v);
|
||||
if (IsAcquireOrder(mo))
|
||||
Acquire(thr, pc, (uptr)a);
|
||||
return v;
|
||||
return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||
morder mo) {
|
||||
if (IsReleaseOrder(mo))
|
||||
Release(thr, pc, (uptr)a);
|
||||
v = __sync_fetch_and_sub(a, v);
|
||||
if (IsAcquireOrder(mo))
|
||||
Acquire(thr, pc, (uptr)a);
|
||||
return v;
|
||||
return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||
morder mo) {
|
||||
if (IsReleaseOrder(mo))
|
||||
Release(thr, pc, (uptr)a);
|
||||
v = __sync_fetch_and_and(a, v);
|
||||
if (IsAcquireOrder(mo))
|
||||
Acquire(thr, pc, (uptr)a);
|
||||
return v;
|
||||
return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||
morder mo) {
|
||||
if (IsReleaseOrder(mo))
|
||||
Release(thr, pc, (uptr)a);
|
||||
v = __sync_fetch_and_or(a, v);
|
||||
if (IsAcquireOrder(mo))
|
||||
Acquire(thr, pc, (uptr)a);
|
||||
return v;
|
||||
return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||
morder mo) {
|
||||
if (IsReleaseOrder(mo))
|
||||
Release(thr, pc, (uptr)a);
|
||||
v = __sync_fetch_and_xor(a, v);
|
||||
if (IsAcquireOrder(mo))
|
||||
Acquire(thr, pc, (uptr)a);
|
||||
return v;
|
||||
return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||
morder mo) {
|
||||
return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static bool AtomicCAS(ThreadState *thr, uptr pc,
|
||||
volatile T *a, T *c, T v, morder mo) {
|
||||
if (IsReleaseOrder(mo))
|
||||
Release(thr, pc, (uptr)a);
|
||||
T cc = *c;
|
||||
T pr = __sync_val_compare_and_swap(a, cc, v);
|
||||
if (IsAcquireOrder(mo))
|
||||
Acquire(thr, pc, (uptr)a);
|
||||
if (pr == cc)
|
||||
return true;
|
||||
*c = pr;
|
||||
return false;
|
||||
volatile T *a, T *c, T v, morder mo, morder fmo) {
|
||||
(void)fmo; // Unused because llvm does not pass it yet.
|
||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
|
||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||
if (IsAcqRelOrder(mo))
|
||||
thr->clock.acq_rel(&s->clock);
|
||||
else if (IsReleaseOrder(mo))
|
||||
thr->clock.release(&s->clock);
|
||||
else if (IsAcquireOrder(mo))
|
||||
thr->clock.acquire(&s->clock);
|
||||
T cur = *a;
|
||||
bool res = false;
|
||||
if (cur == *c) {
|
||||
*a = v;
|
||||
res = true;
|
||||
} else {
|
||||
*c = cur;
|
||||
}
|
||||
s->mtx.Unlock();
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static T AtomicCAS(ThreadState *thr, uptr pc,
|
||||
volatile T *a, T c, T v, morder mo) {
|
||||
AtomicCAS(thr, pc, a, &c, v, mo);
|
||||
volatile T *a, T c, T v, morder mo, morder fmo) {
|
||||
AtomicCAS(thr, pc, a, &c, v, mo, fmo);
|
||||
return c;
|
||||
}
|
||||
|
||||
static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
|
||||
// FIXME(dvyukov): not implemented.
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
|
@ -236,6 +296,12 @@ a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
|
|||
SCOPED_ATOMIC(Load, a, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
|
||||
SCOPED_ATOMIC(Load, a, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
|
||||
SCOPED_ATOMIC(Store, a, v, mo);
|
||||
}
|
||||
|
@ -252,6 +318,12 @@ void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
|
|||
SCOPED_ATOMIC(Store, a, v, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
|
||||
SCOPED_ATOMIC(Store, a, v, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
|
||||
SCOPED_ATOMIC(Exchange, a, v, mo);
|
||||
}
|
||||
|
@ -268,6 +340,12 @@ a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
|
|||
SCOPED_ATOMIC(Exchange, a, v, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
|
||||
SCOPED_ATOMIC(Exchange, a, v, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchAdd, a, v, mo);
|
||||
}
|
||||
|
@ -284,6 +362,12 @@ a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
|
|||
SCOPED_ATOMIC(FetchAdd, a, v, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchAdd, a, v, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchSub, a, v, mo);
|
||||
}
|
||||
|
@ -300,6 +384,12 @@ a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
|
|||
SCOPED_ATOMIC(FetchSub, a, v, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchSub, a, v, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchAnd, a, v, mo);
|
||||
}
|
||||
|
@ -316,6 +406,12 @@ a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
|
|||
SCOPED_ATOMIC(FetchAnd, a, v, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchAnd, a, v, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchOr, a, v, mo);
|
||||
}
|
||||
|
@ -332,6 +428,12 @@ a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
|
|||
SCOPED_ATOMIC(FetchOr, a, v, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchOr, a, v, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchXor, a, v, mo);
|
||||
}
|
||||
|
@ -348,65 +450,114 @@ a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
|
|||
SCOPED_ATOMIC(FetchXor, a, v, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchXor, a, v, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchNand, a, v, mo);
|
||||
}
|
||||
|
||||
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchNand, a, v, mo);
|
||||
}
|
||||
|
||||
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchNand, a, v, mo);
|
||||
}
|
||||
|
||||
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchNand, a, v, mo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
|
||||
SCOPED_ATOMIC(FetchNand, a, v, mo);
|
||||
}
|
||||
#endif
|
||||
|
||||
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
#endif
|
||||
|
||||
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
#endif
|
||||
|
||||
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
|
||||
morder mo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo);
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
|
||||
#if __TSAN_HAS_INT128
|
||||
a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
|
||||
morder mo, morder fmo) {
|
||||
SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
|
||||
}
|
||||
#endif
|
||||
|
||||
void __tsan_atomic_thread_fence(morder mo) {
|
||||
char* a;
|
||||
SCOPED_ATOMIC(Fence, mo);
|
||||
|
|
|
@ -15,10 +15,19 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef char __tsan_atomic8;
|
||||
typedef short __tsan_atomic16; // NOLINT
|
||||
typedef int __tsan_atomic32;
|
||||
typedef long __tsan_atomic64; // NOLINT
|
||||
typedef char __tsan_atomic8;
|
||||
typedef short __tsan_atomic16; // NOLINT
|
||||
typedef int __tsan_atomic32;
|
||||
typedef long __tsan_atomic64; // NOLINT
|
||||
|
||||
#if defined(__SIZEOF_INT128__) \
|
||||
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
|
||||
typedef __int128 __tsan_atomic128;
|
||||
#define __TSAN_HAS_INT128 1
|
||||
#else
|
||||
typedef char __tsan_atomic128;
|
||||
#define __TSAN_HAS_INT128 0
|
||||
#endif
|
||||
|
||||
// Part of ABI, do not change.
|
||||
// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
|
||||
|
@ -39,6 +48,8 @@ __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
|
|||
__tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
|
||||
__tsan_memory_order mo);
|
||||
|
||||
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
|
||||
__tsan_memory_order mo);
|
||||
|
@ -48,6 +59,8 @@ void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
|
|||
__tsan_memory_order mo);
|
||||
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
|
||||
__tsan_memory_order mo);
|
||||
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
|
||||
__tsan_memory_order mo);
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
|
@ -57,6 +70,8 @@ __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
|
|||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
|
@ -66,6 +81,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
|
|||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
|
@ -75,6 +92,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
|
|||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
|
@ -84,6 +103,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
|
|||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
|
@ -93,6 +114,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
|
|||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
|
@ -102,37 +125,67 @@ __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
|
|||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 v, __tsan_memory_order mo);
|
||||
|
||||
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
|
||||
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
|
||||
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
|
||||
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
|
||||
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
|
||||
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
|
||||
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
|
||||
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
|
||||
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
|
||||
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
|
||||
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
|
||||
__tsan_memory_order fail_mo);
|
||||
|
||||
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
|
||||
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
|
||||
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
|
||||
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
|
||||
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
|
||||
__tsan_memory_order mo);
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
|
||||
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
|
||||
__tsan_memory_order mo, __tsan_memory_order fail_mo);
|
||||
|
||||
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
|
||||
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
|
||||
|
|
|
@ -61,3 +61,11 @@ void __tsan_func_entry(void *pc) {
|
|||
void __tsan_func_exit() {
|
||||
FuncExit(cur_thread());
|
||||
}
|
||||
|
||||
void __tsan_read_range(void *addr, uptr size) {
|
||||
MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
|
||||
}
|
||||
|
||||
void __tsan_write_range(void *addr, uptr size) {
|
||||
MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
|
|||
|
||||
static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
|
||||
static const uptr kLinuxShadowEnd =
|
||||
MemToShadow(kLinuxAppMemEnd) | (kPageSize - 1);
|
||||
MemToShadow(kLinuxAppMemEnd) | 0xff;
|
||||
|
||||
static inline bool IsAppMem(uptr mem) {
|
||||
return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
|
||||
|
|
|
@ -521,6 +521,7 @@ void AfterSleep(ThreadState *thr, uptr pc);
|
|||
#define HACKY_CALL(f) \
|
||||
__asm__ __volatile__("sub $1024, %%rsp;" \
|
||||
"/*.cfi_adjust_cfa_offset 1024;*/" \
|
||||
".hidden " #f "_thunk;" \
|
||||
"call " #f "_thunk;" \
|
||||
"add $1024, %%rsp;" \
|
||||
"/*.cfi_adjust_cfa_offset -1024;*/" \
|
||||
|
|
|
@ -75,6 +75,11 @@ void StatOutput(u64 *stat) {
|
|||
name[StatAtomicStore] = " store ";
|
||||
name[StatAtomicExchange] = " exchange ";
|
||||
name[StatAtomicFetchAdd] = " fetch_add ";
|
||||
name[StatAtomicFetchSub] = " fetch_sub ";
|
||||
name[StatAtomicFetchAnd] = " fetch_and ";
|
||||
name[StatAtomicFetchOr] = " fetch_or ";
|
||||
name[StatAtomicFetchXor] = " fetch_xor ";
|
||||
name[StatAtomicFetchNand] = " fetch_nand ";
|
||||
name[StatAtomicCAS] = " compare_exchange ";
|
||||
name[StatAtomicFence] = " fence ";
|
||||
name[StatAtomicRelaxed] = " Including relaxed ";
|
||||
|
@ -87,6 +92,7 @@ void StatOutput(u64 *stat) {
|
|||
name[StatAtomic2] = " size 2 ";
|
||||
name[StatAtomic4] = " size 4 ";
|
||||
name[StatAtomic8] = " size 8 ";
|
||||
name[StatAtomic16] = " size 16 ";
|
||||
|
||||
name[StatInterceptor] = "Interceptors ";
|
||||
name[StatInt_longjmp] = " longjmp ";
|
||||
|
|
|
@ -75,6 +75,7 @@ enum StatType {
|
|||
StatAtomicFetchAnd,
|
||||
StatAtomicFetchOr,
|
||||
StatAtomicFetchXor,
|
||||
StatAtomicFetchNand,
|
||||
StatAtomicCAS,
|
||||
StatAtomicFence,
|
||||
StatAtomicRelaxed,
|
||||
|
@ -87,6 +88,7 @@ enum StatType {
|
|||
StatAtomic2,
|
||||
StatAtomic4,
|
||||
StatAtomic8,
|
||||
StatAtomic16,
|
||||
|
||||
// Interceptors.
|
||||
StatInterceptor,
|
||||
|
|
Loading…
Add table
Reference in a new issue