(root)/
gcc-13.2.0/
libsanitizer/
sanitizer_common/
sanitizer_stacktrace.h
       1  //===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//
       2  //
       3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
       4  // See https://llvm.org/LICENSE.txt for license information.
       5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
       6  //
       7  //===----------------------------------------------------------------------===//
       8  //
       9  // This file is shared between AddressSanitizer and ThreadSanitizer
      10  // run-time libraries.
      11  //===----------------------------------------------------------------------===//
      12  #ifndef SANITIZER_STACKTRACE_H
      13  #define SANITIZER_STACKTRACE_H
      14  
      15  #include "sanitizer_common.h"
      16  #include "sanitizer_internal_defs.h"
      17  #include "sanitizer_platform.h"
      18  
      19  namespace __sanitizer {
      20  
      21  struct BufferedStackTrace;
      22  
      23  static const u32 kStackTraceMax = 255;
      24  
      25  #if SANITIZER_LINUX && defined(__mips__)
      26  # define SANITIZER_CAN_FAST_UNWIND 0
      27  #elif SANITIZER_WINDOWS
      28  # define SANITIZER_CAN_FAST_UNWIND 0
      29  #else
      30  # define SANITIZER_CAN_FAST_UNWIND 1
      31  #endif
      32  
      33  // Fast unwind is the only option on Mac for now; we will need to
      34  // revisit this macro when slow unwind works on Mac, see
      35  // https://github.com/google/sanitizers/issues/137
      36  #if SANITIZER_APPLE
      37  #  define SANITIZER_CAN_SLOW_UNWIND 0
      38  #else
      39  # define SANITIZER_CAN_SLOW_UNWIND 1
      40  #endif
      41  
      42  struct StackTrace {
      43    const uptr *trace;
      44    u32 size;
      45    u32 tag;
      46  
      47    static const int TAG_UNKNOWN = 0;
      48    static const int TAG_ALLOC = 1;
      49    static const int TAG_DEALLOC = 2;
      50    static const int TAG_CUSTOM = 100; // Tool specific tags start here.
      51  
      52    StackTrace() : trace(nullptr), size(0), tag(0) {}
      53    StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}
      54    StackTrace(const uptr *trace, u32 size, u32 tag)
      55        : trace(trace), size(size), tag(tag) {}
      56  
      57    // Prints a symbolized stacktrace, followed by an empty line.
      58    void Print() const;
      59  
      60    // Prints a symbolized stacktrace to the output string, followed by an empty
      61    // line.
      62    void PrintTo(InternalScopedString *output) const;
      63  
      64    // Prints a symbolized stacktrace to the output buffer, followed by an empty
      65    // line. Returns the number of symbols that should have been written to buffer
      66    // (not including trailing '\0'). Thus, the string is truncated iff return
      67    // value is not less than "out_buf_size".
      68    uptr PrintTo(char *out_buf, uptr out_buf_size) const;
      69  
      70    static bool WillUseFastUnwind(bool request_fast_unwind) {
      71      if (!SANITIZER_CAN_FAST_UNWIND)
      72        return false;
      73      if (!SANITIZER_CAN_SLOW_UNWIND)
      74        return true;
      75      return request_fast_unwind;
      76    }
      77  
      78    static uptr GetCurrentPc();
      79    static inline uptr GetPreviousInstructionPc(uptr pc);
      80    static uptr GetNextInstructionPc(uptr pc);
      81  };
      82  
      83  // Performance-critical, must be in the header.
      84  ALWAYS_INLINE
      85  uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
      86  #if defined(__arm__)
      87    // T32 (Thumb) branch instructions might be 16 or 32 bit long,
      88    // so we return (pc-2) in that case in order to be safe.
      89    // For A32 mode we return (pc-4) because all instructions are 32 bit long.
      90    return (pc - 3) & (~1);
      91  #elif defined(__sparc__) || defined(__mips__)
      92    return pc - 8;
      93  #elif SANITIZER_RISCV64
      94    // RV-64 has variable instruciton length...
      95    // C extentions gives us 2-byte instructoins
      96    // RV-64 has 4-byte instructions
      97    // + RISCV architecture allows instructions up to 8 bytes
      98    // It seems difficult to figure out the exact instruction length -
      99    // pc - 2 seems like a safe option for the purposes of stack tracing
     100    return pc - 2;
     101  #elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64
     102    return pc - 1;
     103  #else
     104    return pc - 4;
     105  #endif
     106  }
     107  
     108  // StackTrace that owns the buffer used to store the addresses.
     109  struct BufferedStackTrace : public StackTrace {
     110    uptr trace_buffer[kStackTraceMax];
     111    uptr top_frame_bp;  // Optional bp of a top frame.
     112  
     113    BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
     114  
     115    void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
     116  
     117    // Get the stack trace with the given pc and bp.
     118    // The pc will be in the position 0 of the resulting stack trace.
     119    // The bp may refer to the current frame or to the caller's frame.
     120    void Unwind(uptr pc, uptr bp, void *context, bool request_fast,
     121                u32 max_depth = kStackTraceMax) {
     122      top_frame_bp = (max_depth > 0) ? bp : 0;
     123      // Small max_depth optimization
     124      if (max_depth <= 1) {
     125        if (max_depth == 1)
     126          trace_buffer[0] = pc;
     127        size = max_depth;
     128        return;
     129      }
     130      UnwindImpl(pc, bp, context, request_fast, max_depth);
     131    }
     132  
     133    void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
     134                uptr stack_bottom, bool request_fast_unwind);
     135  
     136    void Reset() {
     137      *static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);
     138      top_frame_bp = 0;
     139    }
     140  
     141   private:
     142    // Every runtime defines its own implementation of this method
     143    void UnwindImpl(uptr pc, uptr bp, void *context, bool request_fast,
     144                    u32 max_depth);
     145  
     146    // UnwindFast/Slow have platform-specific implementations
     147    void UnwindFast(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
     148                    u32 max_depth);
     149    void UnwindSlow(uptr pc, u32 max_depth);
     150    void UnwindSlow(uptr pc, void *context, u32 max_depth);
     151  
     152    void PopStackFrames(uptr count);
     153    uptr LocatePcInTrace(uptr pc);
     154  
     155    BufferedStackTrace(const BufferedStackTrace &) = delete;
     156    void operator=(const BufferedStackTrace &) = delete;
     157  
     158    friend class FastUnwindTest;
     159  };
     160  
     161  #if defined(__s390x__)
     162  static const uptr kFrameSize = 160;
     163  #elif defined(__s390__)
     164  static const uptr kFrameSize = 96;
     165  #else
     166  static const uptr kFrameSize = 2 * sizeof(uhwptr);
     167  #endif
     168  
     169  // Check if given pointer points into allocated stack area.
     170  static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
     171    return frame > stack_bottom && frame < stack_top - kFrameSize;
     172  }
     173  
     174  }  // namespace __sanitizer
     175  
     176  // Use this macro if you want to print stack trace with the caller
     177  // of the current function in the top frame.
     178  #define GET_CALLER_PC_BP \
     179    uptr bp = GET_CURRENT_FRAME();              \
     180    uptr pc = GET_CALLER_PC();
     181  
     182  #define GET_CALLER_PC_BP_SP \
     183    GET_CALLER_PC_BP;                           \
     184    uptr local_stack;                           \
     185    uptr sp = (uptr)&local_stack
     186  
     187  // Use this macro if you want to print stack trace with the current
     188  // function in the top frame.
     189  #define GET_CURRENT_PC_BP \
     190    uptr bp = GET_CURRENT_FRAME();              \
     191    uptr pc = StackTrace::GetCurrentPc()
     192  
     193  #define GET_CURRENT_PC_BP_SP \
     194    GET_CURRENT_PC_BP;                          \
     195    uptr local_stack;                           \
     196    uptr sp = (uptr)&local_stack
     197  
     198  // GET_CURRENT_PC() is equivalent to StackTrace::GetCurrentPc().
     199  // Optimized x86 version is faster than GetCurrentPc because
     200  // it does not involve a function call, instead it reads RIP register.
     201  // Reads of RIP by an instruction return RIP pointing to the next
     202  // instruction, which is exactly what we want here, thus 0 offset.
     203  // It needs to be a macro because otherwise we will get the name
     204  // of this function on the top of most stacks. Attribute artificial
     205  // does not do what it claims to do, unfortunatley. And attribute
     206  // __nodebug__ is clang-only. If we would have an attribute that
     207  // would remove this function from debug info, we could simply make
     208  // StackTrace::GetCurrentPc() faster.
     209  #if defined(__x86_64__)
     210  #  define GET_CURRENT_PC()                \
     211      (__extension__({                      \
     212        uptr pc;                            \
     213        asm("lea 0(%%rip), %0" : "=r"(pc)); \
     214        pc;                                 \
     215      }))
     216  #else
     217  #  define GET_CURRENT_PC() StackTrace::GetCurrentPc()
     218  #endif
     219  
     220  #endif  // SANITIZER_STACKTRACE_H