(root)/
gcc-13.2.0/
libgcc/
config/
rs6000/
aix-unwind.h
       1  /* DWARF2 EH unwinding support for AIX.
       2     Copyright (C) 2011-2023 Free Software Foundation, Inc.
       3  
       4     This file is part of GCC.
       5  
       6     GCC is free software; you can redistribute it and/or modify it
       7     under the terms of the GNU General Public License as published by
       8     the Free Software Foundation; either version 3, or (at your option)
       9     any later version.
      10  
      11     GCC is distributed in the hope that it will be useful, but WITHOUT
      12     ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
      13     or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
      14     License for more details.
      15  
      16     Under Section 7 of GPL version 3, you are granted additional
      17     permissions described in the GCC Runtime Library Exception, version
      18     3.1, as published by the Free Software Foundation.
      19  
      20     You should have received a copy of the GNU General Public License and
      21     a copy of the GCC Runtime Library Exception along with this program;
      22     see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
      23     <http://www.gnu.org/licenses/>.  */
      24  
      25  /* Useful register numbers.  */
      26  
      27  #define R_LR             65
      28  #define R_CR2            70
      29  #define R_XER            76
      30  #define R_FIRST_ALTIVEC  77
      31  #define R_VRSAVE        109
      32  #define R_VSCR          110
      33  
      34  /* If the current unwind info (FS) does not contain explicit info
      35     saving R2, then we have to do a minor amount of code reading to
      36     figure out if it was saved.  The big problem here is that the
      37     code that does the save/restore is generated by the linker, so
      38     we have no good way to determine at compile time what to do.  */
      39  
      40  #ifdef __64BIT__
      41  #define MD_FROB_UPDATE_CONTEXT(CTX, FS)					\
      42    do {									\
      43      if ((FS)->regs.how[2] == REG_UNSAVED)				\
      44        {									\
      45  	unsigned int *insn						\
      46  	  = (unsigned int *)						\
      47  	    _Unwind_GetGR ((CTX), R_LR);				\
      48  	if (*insn == 0xE8410028)					\
      49  	  _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 40);			\
      50        }									\
      51    } while (0)
      52  #else
      53  #define MD_FROB_UPDATE_CONTEXT(CTX, FS)					\
      54    do {									\
      55      if ((FS)->regs.how[2] == REG_UNSAVED)				\
      56        {									\
      57  	unsigned int *insn						\
      58  	  = (unsigned int *)						\
      59  	    _Unwind_GetGR ((CTX), R_LR);				\
      60  	if (*insn == 0x80410014)					\
      61  	  _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 20);			\
      62        }									\
      63    } while (0)
      64  #endif
      65  
      66  /* Now on to MD_FALLBACK_FRAME_STATE_FOR.
      67     32bit AIX 5.2, 5.3, 6.1, 7.X and
      68     64bit AIX 6.1, 7.X only at this stage.  */
      69  
      70  #include <stdlib.h>
      71  #include <stddef.h>
      72  #include <signal.h>
      73  #include <sys/machine.h>
      74  
      75  #ifdef __64BIT__
      76  
      77  typedef struct __context64 mstate_t;
      78  
      79  #else
      80  
      81  typedef struct mstsave mstate_t;
      82  
      83  #endif
      84  
      85  #define MD_FALLBACK_FRAME_STATE_FOR ppc_aix_fallback_frame_state
      86  
      87  /* If we are compiling on AIX < 5.3, the VMX related datastructs are not
      88     defined and we take measures to obtain proper runtime behavior if the
      89     compiled code happens to run on a later version with VMX enabled.  */
      90  
      91  #ifndef MSR_VMX
      92  #define MSR_VMX 0x2000000
      93  #endif
      94  
      95  typedef unsigned int uint;
      96  typedef struct { uint v[4]; } vreg_t;
      97  typedef struct {
      98    vreg_t regs[32];
      99    uint   pad1 [3];
     100    uint   vscr;
     101    uint   vrsave;
     102    uint   pad2 [3];
     103  } vstate_t;
     104  
     105  #define EXT_CONTEXT_MARK 0x45435458
     106  #define EXT_CONTEXT_SIZE 4096
     107  #define BUMPER_SIZE (EXT_CONTEXT_SIZE - sizeof(vstate_t) - (5 * sizeof(int)))
     108  
     109  typedef struct {
     110    uint     pad1 [4];
     111    vstate_t vstate;
     112    char     bumper [BUMPER_SIZE];
     113    int      mark; 
     114  } extended_context_t;
     115  
     116  typedef struct {
     117    char bumper [offsetof (ucontext_t, uc_stack) + sizeof (stack_t)];
     118    extended_context_t * ectx;
     119    int mark;
     120  } vmx_ucontext_t;
     121  
     122  /* Determine whether CONTEXT designates a signal handler, and return the
     123     associated ucontext_t address if so.  Return NULL otherwise.  */
     124  
     125  static ucontext_t *
     126  ucontext_for (struct _Unwind_Context *context)
     127  {
     128    const unsigned int * ra = context->ra;
     129  
     130    /* AIX 5.2, 5.3, 6.1 and 7.X, threaded or not, share common patterns
     131       and feature variants depending on the configured kernel (unix_mp
     132       or unix_64).  */
     133  
     134  #ifdef __64BIT__
     135    if (*(ra - 5) == 0x4c00012c     /* isync             */
     136        && *(ra - 4) == 0xe8ec0000  /* ld      r7,0(r12) */
     137        && *(ra - 3) == 0xe84c0008  /* ld      r2,8(r12) */
     138        && *(ra - 2) == 0x7ce903a6  /* mtctr   r7        */
     139        && *(ra - 1) == 0x4e800421  /* bctrl             */
     140        && *(ra - 0) == 0x7de27b78) /* mr      r2,r15   <-- context->ra */
     141      {
     142        /* unix_64 */
     143        if (*(ra - 6) == 0x7d000164)  /* mtmsrd  r8 */
     144  	{
     145  	  /* AIX 6.1, 7.1 and 7.2 */
     146  	  return (ucontext_t *)(context->cfa + 0x70);
     147  	}
     148      }
     149  #else
     150    if (*(ra - 5) == 0x4c00012c     /* isync             */
     151        && *(ra - 4) == 0x80ec0000  /* lwz     r7,0(r12) */
     152        && *(ra - 3) == 0x804c0004  /* lwz     r2,4(r12) */
     153        && *(ra - 2) == 0x7ce903a6  /* mtctr   r7        */
     154        && *(ra - 1) == 0x4e800421  /* bctrl             */
     155        && *(ra - 0) == 0x7dc37378) /* mr      r3,r14   <-- context->ra */
     156      {
     157        /* unix_64 */
     158        if (*(ra - 6) == 0x7d000164)  /* mtmsrd  r8 */
     159  	{
     160  	  switch (*(ra + 18))
     161  	    {
     162  	      /* AIX 5.2 */
     163  	    case 0x835a0520: /* lwz r26,1312(r26) */ 
     164  	      return (ucontext_t *)(context->cfa + 0x70);
     165  
     166  	      /* AIX 5.3 */
     167  	    case 0x835a0570:  /* lwz r26,1392(r26) */
     168  	      return (ucontext_t *)(context->cfa + 0x40);
     169  
     170  	      /* AIX 6.1 and 7.1 */
     171  	    case 0x2c1a0000:  /* cmpwi   r26,0 */
     172  	      return (ucontext_t *)(context->cfa + 0x40);
     173  
     174  	      /* AIX 7.2 */
     175  	    case 0x3800000a:  /* li   r0,A */
     176  	      return (ucontext_t *)(context->cfa + 0x40);
     177  
     178  	    default:
     179  	      return 0;
     180  	    }
     181  	}
     182  
     183        /* unix_mp */
     184        if (*(ra - 6) == 0x7d000124)  /* mtmsr  r8 */
     185  	{
     186  	  typedef struct {
     187  	    char pad[56];
     188  	    ucontext_t ucontext;
     189  	    siginfo_t siginfo;
     190  	  } aix52_stack_t;
     191  
     192  	  aix52_stack_t * frame = (aix52_stack_t *) context->cfa;
     193  	  return &frame->ucontext;
     194  	}
     195      }
     196  #endif
     197    return 0;
     198  }
     199  
     200  /* The fallback proper.  */
     201  
     202  #ifdef __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__
     203  #define RETURN_COLUMN __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__
     204  #else
     205  #define RETURN_COLUMN ARG_POINTER_REGNUM
     206  #endif
     207  
     208  #define REGISTER_CFA_OFFSET_FOR(FS,REGNO,ADDR,CFA)\
     209  do { \
     210  (FS)->regs.how[REGNO] = REG_SAVED_OFFSET; \
     211  (FS)->regs.reg[REGNO].loc.offset = (long) (ADDR) - (CFA); \
     212  } while (0)
     213  
     214  static _Unwind_Reason_Code
     215  ppc_aix_fallback_frame_state (struct _Unwind_Context *context,
     216  			      _Unwind_FrameState *fs)
     217  {
     218    ucontext_t * uctx = ucontext_for (context);
     219    mstate_t * mctx;
     220  
     221    long new_cfa;
     222    int i;
     223  
     224    if (uctx == NULL)
     225      return _URC_END_OF_STACK;
     226  
     227    mctx = &uctx->uc_mcontext.jmp_context;
     228  
     229    /* The "kernel" frame cfa is the stack pointer at the signal occurrence
     230       point.  */
     231    new_cfa = mctx->gpr[__LIBGCC_STACK_POINTER_REGNUM__];
     232  
     233    fs->regs.cfa_how = CFA_REG_OFFSET;
     234    fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__;
     235    fs->regs.cfa_offset = new_cfa - (long) context->cfa;
     236  
     237    /* And we state how to find the various registers it has saved with
     238       relative offset rules from there.  */
     239  
     240    for (i = 0; i < 32; i++)
     241      if (i != __LIBGCC_STACK_POINTER_REGNUM__)
     242        REGISTER_CFA_OFFSET_FOR (fs, i, &mctx->gpr[i], new_cfa);
     243  
     244    REGISTER_CFA_OFFSET_FOR (fs, R_CR2, &mctx->cr, new_cfa);
     245    REGISTER_CFA_OFFSET_FOR (fs, R_XER, &mctx->xer, new_cfa);
     246    REGISTER_CFA_OFFSET_FOR (fs, R_LR, &mctx->lr, new_cfa);
     247  
     248    fs->retaddr_column = RETURN_COLUMN;
     249    REGISTER_CFA_OFFSET_FOR (fs, RETURN_COLUMN, &mctx->iar, new_cfa);
     250    fs->signal_frame = 1;
     251  
     252    /* Honor FP Ever Used ...   */
     253    if (mctx->fpeu)
     254      {
     255        for (i = 0; i < 32; i++)
     256  	REGISTER_CFA_OFFSET_FOR (fs, i+32, &mctx->fpr[i], new_cfa);
     257      }
     258  
     259    /* Honor VMX context, if any.  We expect the msr bit never to be set in
     260       environments where there is no VMX support, e.g. on AIX < 5.3.  */
     261    if (mctx->msr & MSR_VMX)
     262      {
     263        vmx_ucontext_t * uc = (vmx_ucontext_t *) uctx;
     264  
     265        if (uc->mark == EXT_CONTEXT_MARK && uc->ectx->mark == EXT_CONTEXT_MARK)
     266  	{
     267  	  vstate_t * vstate = &uc->ectx->vstate;
     268  
     269  	  for (i = 0; i < 32; i++)
     270  	    REGISTER_CFA_OFFSET_FOR
     271  	    (fs, i+R_FIRST_ALTIVEC, &vstate->regs[i], new_cfa);
     272  
     273  	  REGISTER_CFA_OFFSET_FOR (fs, R_VSCR, &vstate->vscr, new_cfa);
     274  	  REGISTER_CFA_OFFSET_FOR (fs, R_VRSAVE, &vstate->vrsave, new_cfa);
     275  	}
     276      }
     277  
     278    return _URC_NO_REASON;
     279  }