From 58c62fd9768594ec8dd57e8320ba2396bf8b87e5 Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Mon, 29 Mar 2021 22:14:29 -0700 Subject: [PATCH] [sanitizer] Improve accuracy of GetTls on x86/s390 The previous code may underestimate the static TLS surplus part, which may cause false positives to LeakSanitizer if a dynamically loaded module uses the surplus and there is an allocation only referenced by a thread's TLS. --- .../sanitizer_linux_libcdep.cpp | 30 +++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp index 18441e4ab1a0e4..bd8f5d330b30cf 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -304,7 +304,7 @@ static int CollectStaticTlsRanges(struct dl_phdr_info *info, size_t size, return 0; } -static void GetStaticTlsRange(uptr *addr, uptr *size) { +static void GetStaticTlsRange(uptr *addr, uptr *size, uptr *align) { InternalMmapVector ranges; dl_iterate_phdr(CollectStaticTlsRanges, &ranges); uptr len = ranges.size(); @@ -318,17 +318,19 @@ static void GetStaticTlsRange(uptr *addr, uptr *size) { // This may happen with musl if no module uses PT_TLS. *addr = 0; *size = 0; + *align = 1; return; } // Find the maximum consecutive ranges. We consider two modules consecutive if // the gap is smaller than the alignment. The dynamic loader places static TLS // blocks this way not to waste space. uptr l = one; + *align = ranges[l].align; while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align) - --l; + *align = Max(*align, ranges[--l].align); uptr r = one + 1; while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align) - ++r; + *align = Max(*align, ranges[r++].align); *addr = ranges[l].begin; *size = ranges[r - 1].end - ranges[l].begin; } @@ -406,21 +408,31 @@ static void GetTls(uptr *addr, uptr *size) { *size = 0; } #elif SANITIZER_LINUX - GetStaticTlsRange(addr, size); + uptr align; + GetStaticTlsRange(addr, size, &align); #if defined(__x86_64__) || defined(__i386__) || defined(__s390__) + if (SANITIZER_GLIBC) { +#if defined(__s390__) + align = Max(align, 16); +#else + align = Max(align, 64); +#endif + } + const uptr tp = RoundUpTo(*addr + *size, align); + // lsan requires the range to additionally cover the static TLS surplus // (elf/dl-tls.c defines 1664). Otherwise there may be false positives for // allocations only referenced by tls in dynamically loaded modules. - if (SANITIZER_GLIBC) { - *addr -= 1664; - *size += 1664; - } + if (SANITIZER_GLIBC) + *size += 1644; + // Extend the range to include the thread control block. On glibc, lsan needs // the range to include pthread::{specific_1stblock,specific} so that // allocations only referenced by pthread_setspecific can be scanned. This may // underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine // because the number of bytes after pthread::specific is larger. - *size += ThreadDescriptorSize(); + *addr = tp - RoundUpTo(*size, align); + *size = tp - *addr + ThreadDescriptorSize(); #else if (SANITIZER_GLIBC) *size += 1664;