Kaido Kert | 788710a | 2023-06-05 07:50:22 -0700 | [diff] [blame] | 1 | //===-- asan_fake_stack.cpp -----------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is a part of AddressSanitizer, an address sanity checker. |
| 10 | // |
| 11 | // FakeStack is used to detect use-after-return bugs. |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "asan_allocator.h" |
| 15 | #include "asan_poisoning.h" |
| 16 | #include "asan_thread.h" |
| 17 | |
| 18 | namespace __asan { |
| 19 | |
| 20 | static const u64 kMagic1 = kAsanStackAfterReturnMagic; |
| 21 | static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; |
| 22 | static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; |
| 23 | static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; |
| 24 | |
| 25 | static const u64 kAllocaRedzoneSize = 32UL; |
| 26 | static const u64 kAllocaRedzoneMask = 31UL; |
| 27 | |
| 28 | // For small size classes inline PoisonShadow for better performance. |
| 29 | ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { |
| 30 | u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr)); |
| 31 | if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) { |
| 32 | // This code expects ASAN_SHADOW_SCALE=3. |
| 33 | for (uptr i = 0; i < (((uptr)1) << class_id); i++) { |
| 34 | shadow[i] = magic; |
| 35 | // Make sure this does not become memset. |
| 36 | SanitizerBreakOptimization(nullptr); |
| 37 | } |
| 38 | } else { |
| 39 | // The size class is too big, it's cheaper to poison only size bytes. |
| 40 | PoisonShadow(ptr, size, static_cast<u8>(magic)); |
| 41 | } |
| 42 | } |
| 43 | |
| 44 | FakeStack *FakeStack::Create(uptr stack_size_log) { |
| 45 | static uptr kMinStackSizeLog = 16; |
| 46 | static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28); |
| 47 | if (stack_size_log < kMinStackSizeLog) |
| 48 | stack_size_log = kMinStackSizeLog; |
| 49 | if (stack_size_log > kMaxStackSizeLog) |
| 50 | stack_size_log = kMaxStackSizeLog; |
| 51 | uptr size = RequiredSize(stack_size_log); |
| 52 | FakeStack *res = reinterpret_cast<FakeStack *>( |
| 53 | flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack") |
| 54 | : MmapOrDie(size, "FakeStack")); |
| 55 | res->stack_size_log_ = stack_size_log; |
| 56 | u8 *p = reinterpret_cast<u8 *>(res); |
| 57 | VReport(1, |
| 58 | "T%d: FakeStack created: %p -- %p stack_size_log: %zd; " |
| 59 | "mmapped %zdK, noreserve=%d \n", |
| 60 | GetCurrentTidOrInvalid(), (void *)p, |
| 61 | (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log, |
| 62 | size >> 10, flags()->uar_noreserve); |
| 63 | return res; |
| 64 | } |
| 65 | |
| 66 | void FakeStack::Destroy(int tid) { |
| 67 | PoisonAll(0); |
| 68 | if (Verbosity() >= 2) { |
| 69 | InternalScopedString str; |
| 70 | for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) |
| 71 | str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], |
| 72 | NumberOfFrames(stack_size_log(), class_id)); |
| 73 | Report("T%d: FakeStack destroyed: %s\n", tid, str.data()); |
| 74 | } |
| 75 | uptr size = RequiredSize(stack_size_log_); |
| 76 | FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size); |
| 77 | UnmapOrDie(this, size); |
| 78 | } |
| 79 | |
| 80 | void FakeStack::PoisonAll(u8 magic) { |
| 81 | PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()), |
| 82 | magic); |
| 83 | } |
| 84 | |
| 85 | #if !defined(_MSC_VER) || defined(__clang__) |
| 86 | ALWAYS_INLINE USED |
| 87 | #endif |
| 88 | FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, |
| 89 | uptr real_stack) { |
| 90 | CHECK_LT(class_id, kNumberOfSizeClasses); |
| 91 | if (needs_gc_) |
| 92 | GC(real_stack); |
| 93 | uptr &hint_position = hint_position_[class_id]; |
| 94 | const int num_iter = NumberOfFrames(stack_size_log, class_id); |
| 95 | u8 *flags = GetFlags(stack_size_log, class_id); |
| 96 | for (int i = 0; i < num_iter; i++) { |
| 97 | uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++); |
| 98 | // This part is tricky. On one hand, checking and setting flags[pos] |
| 99 | // should be atomic to ensure async-signal safety. But on the other hand, |
| 100 | // if the signal arrives between checking and setting flags[pos], the |
| 101 | // signal handler's fake stack will start from a different hint_position |
| 102 | // and so will not touch this particular byte. So, it is safe to do this |
| 103 | // with regular non-atomic load and store (at least I was not able to make |
| 104 | // this code crash). |
| 105 | if (flags[pos]) continue; |
| 106 | flags[pos] = 1; |
| 107 | FakeFrame *res = reinterpret_cast<FakeFrame *>( |
| 108 | GetFrame(stack_size_log, class_id, pos)); |
| 109 | res->real_stack = real_stack; |
| 110 | *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos]; |
| 111 | return res; |
| 112 | } |
| 113 | return nullptr; // We are out of fake stack. |
| 114 | } |
| 115 | |
| 116 | uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { |
| 117 | uptr stack_size_log = this->stack_size_log(); |
| 118 | uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0)); |
| 119 | uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log); |
| 120 | if (ptr < beg || ptr >= end) return 0; |
| 121 | uptr class_id = (ptr - beg) >> stack_size_log; |
| 122 | uptr base = beg + (class_id << stack_size_log); |
| 123 | CHECK_LE(base, ptr); |
| 124 | CHECK_LT(ptr, base + (((uptr)1) << stack_size_log)); |
| 125 | uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id); |
| 126 | uptr res = base + pos * BytesInSizeClass(class_id); |
| 127 | *frame_end = res + BytesInSizeClass(class_id); |
| 128 | *frame_beg = res + sizeof(FakeFrame); |
| 129 | return res; |
| 130 | } |
| 131 | |
| 132 | void FakeStack::HandleNoReturn() { |
| 133 | needs_gc_ = true; |
| 134 | } |
| 135 | |
| 136 | // When throw, longjmp or some such happens we don't call OnFree() and |
| 137 | // as the result may leak one or more fake frames, but the good news is that |
| 138 | // we are notified about all such events by HandleNoReturn(). |
| 139 | // If we recently had such no-return event we need to collect garbage frames. |
| 140 | // We do it based on their 'real_stack' values -- everything that is lower |
| 141 | // than the current real_stack is garbage. |
| 142 | NOINLINE void FakeStack::GC(uptr real_stack) { |
| 143 | for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { |
| 144 | u8 *flags = GetFlags(stack_size_log(), class_id); |
| 145 | for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; |
| 146 | i++) { |
| 147 | if (flags[i] == 0) continue; // not allocated. |
| 148 | FakeFrame *ff = reinterpret_cast<FakeFrame *>( |
| 149 | GetFrame(stack_size_log(), class_id, i)); |
| 150 | if (ff->real_stack < real_stack) { |
| 151 | flags[i] = 0; |
| 152 | } |
| 153 | } |
| 154 | } |
| 155 | needs_gc_ = false; |
| 156 | } |
| 157 | |
| 158 | void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) { |
| 159 | for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) { |
| 160 | u8 *flags = GetFlags(stack_size_log(), class_id); |
| 161 | for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n; |
| 162 | i++) { |
| 163 | if (flags[i] == 0) continue; // not allocated. |
| 164 | FakeFrame *ff = reinterpret_cast<FakeFrame *>( |
| 165 | GetFrame(stack_size_log(), class_id, i)); |
| 166 | uptr begin = reinterpret_cast<uptr>(ff); |
| 167 | callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg); |
| 168 | } |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | #if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA |
| 173 | static THREADLOCAL FakeStack *fake_stack_tls; |
| 174 | |
| 175 | FakeStack *GetTLSFakeStack() { |
| 176 | return fake_stack_tls; |
| 177 | } |
| 178 | void SetTLSFakeStack(FakeStack *fs) { |
| 179 | fake_stack_tls = fs; |
| 180 | } |
| 181 | #else |
| 182 | FakeStack *GetTLSFakeStack() { return 0; } |
| 183 | void SetTLSFakeStack(FakeStack *fs) { } |
| 184 | #endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA |
| 185 | |
| 186 | static FakeStack *GetFakeStack() { |
| 187 | AsanThread *t = GetCurrentThread(); |
| 188 | if (!t) return nullptr; |
| 189 | return t->get_or_create_fake_stack(); |
| 190 | } |
| 191 | |
| 192 | static FakeStack *GetFakeStackFast() { |
| 193 | if (FakeStack *fs = GetTLSFakeStack()) |
| 194 | return fs; |
| 195 | if (!__asan_option_detect_stack_use_after_return) |
| 196 | return nullptr; |
| 197 | return GetFakeStack(); |
| 198 | } |
| 199 | |
| 200 | static FakeStack *GetFakeStackFastAlways() { |
| 201 | if (FakeStack *fs = GetTLSFakeStack()) |
| 202 | return fs; |
| 203 | return GetFakeStack(); |
| 204 | } |
| 205 | |
| 206 | static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { |
| 207 | FakeStack *fs = GetFakeStackFast(); |
| 208 | if (!fs) return 0; |
| 209 | uptr local_stack; |
| 210 | uptr real_stack = reinterpret_cast<uptr>(&local_stack); |
| 211 | FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); |
| 212 | if (!ff) return 0; // Out of fake stack. |
| 213 | uptr ptr = reinterpret_cast<uptr>(ff); |
| 214 | SetShadow(ptr, size, class_id, 0); |
| 215 | return ptr; |
| 216 | } |
| 217 | |
| 218 | static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) { |
| 219 | FakeStack *fs = GetFakeStackFastAlways(); |
| 220 | if (!fs) |
| 221 | return 0; |
| 222 | uptr local_stack; |
| 223 | uptr real_stack = reinterpret_cast<uptr>(&local_stack); |
| 224 | FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); |
| 225 | if (!ff) |
| 226 | return 0; // Out of fake stack. |
| 227 | uptr ptr = reinterpret_cast<uptr>(ff); |
| 228 | SetShadow(ptr, size, class_id, 0); |
| 229 | return ptr; |
| 230 | } |
| 231 | |
| 232 | static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { |
| 233 | FakeStack::Deallocate(ptr, class_id); |
| 234 | SetShadow(ptr, size, class_id, kMagic8); |
| 235 | } |
| 236 | |
| 237 | } // namespace __asan |
| 238 | |
| 239 | // ---------------------- Interface ---------------- {{{1 |
| 240 | using namespace __asan; |
| 241 | #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ |
| 242 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ |
| 243 | __asan_stack_malloc_##class_id(uptr size) { \ |
| 244 | return OnMalloc(class_id, size); \ |
| 245 | } \ |
| 246 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ |
| 247 | __asan_stack_malloc_always_##class_id(uptr size) { \ |
| 248 | return OnMallocAlways(class_id, size); \ |
| 249 | } \ |
| 250 | extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ |
| 251 | uptr ptr, uptr size) { \ |
| 252 | OnFree(ptr, class_id, size); \ |
| 253 | } |
| 254 | |
| 255 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) |
| 256 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1) |
| 257 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2) |
| 258 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3) |
| 259 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4) |
| 260 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5) |
| 261 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6) |
| 262 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7) |
| 263 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8) |
| 264 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9) |
| 265 | DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10) |
| 266 | |
| 267 | extern "C" { |
| 268 | // TODO: remove this method and fix tests that use it by setting |
| 269 | // -asan-use-after-return=never, after modal UAR flag lands |
| 270 | // (https://github.com/google/sanitizers/issues/1394) |
| 271 | SANITIZER_INTERFACE_ATTRIBUTE |
| 272 | void *__asan_get_current_fake_stack() { return GetFakeStackFast(); } |
| 273 | |
| 274 | SANITIZER_INTERFACE_ATTRIBUTE |
| 275 | void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, |
| 276 | void **end) { |
| 277 | FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack); |
| 278 | if (!fs) return nullptr; |
| 279 | uptr frame_beg, frame_end; |
| 280 | FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack( |
| 281 | reinterpret_cast<uptr>(addr), &frame_beg, &frame_end)); |
| 282 | if (!frame) return nullptr; |
| 283 | if (frame->magic != kCurrentStackFrameMagic) |
| 284 | return nullptr; |
| 285 | if (beg) *beg = reinterpret_cast<void*>(frame_beg); |
| 286 | if (end) *end = reinterpret_cast<void*>(frame_end); |
| 287 | return reinterpret_cast<void*>(frame->real_stack); |
| 288 | } |
| 289 | |
| 290 | SANITIZER_INTERFACE_ATTRIBUTE |
| 291 | void __asan_alloca_poison(uptr addr, uptr size) { |
| 292 | uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; |
| 293 | uptr PartialRzAddr = addr + size; |
| 294 | uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; |
| 295 | uptr PartialRzAligned = PartialRzAddr & ~(ASAN_SHADOW_GRANULARITY - 1); |
| 296 | FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic); |
| 297 | FastPoisonShadowPartialRightRedzone( |
| 298 | PartialRzAligned, PartialRzAddr % ASAN_SHADOW_GRANULARITY, |
| 299 | RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic); |
| 300 | FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic); |
| 301 | } |
| 302 | |
| 303 | SANITIZER_INTERFACE_ATTRIBUTE |
| 304 | void __asan_allocas_unpoison(uptr top, uptr bottom) { |
| 305 | if ((!top) || (top > bottom)) return; |
| 306 | REAL(memset) |
| 307 | (reinterpret_cast<void *>(MemToShadow(top)), 0, |
| 308 | (bottom - top) / ASAN_SHADOW_GRANULARITY); |
| 309 | } |
| 310 | } // extern "C" |