(root)/
glibc-2.38/
sysdeps/
unix/
sysv/
linux/
aarch64/
cpu-features.c
       1  /* Initialize CPU feature data.  AArch64 version.
       2     This file is part of the GNU C Library.
       3     Copyright (C) 2017-2023 Free Software Foundation, Inc.
       4  
       5     The GNU C Library is free software; you can redistribute it and/or
       6     modify it under the terms of the GNU Lesser General Public
       7     License as published by the Free Software Foundation; either
       8     version 2.1 of the License, or (at your option) any later version.
       9  
      10     The GNU C Library is distributed in the hope that it will be useful,
      11     but WITHOUT ANY WARRANTY; without even the implied warranty of
      12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      13     Lesser General Public License for more details.
      14  
      15     You should have received a copy of the GNU Lesser General Public
      16     License along with the GNU C Library; if not, see
      17     <https://www.gnu.org/licenses/>.  */
      18  
      19  #include <cpu-features.h>
      20  #include <sys/auxv.h>
      21  #include <elf/dl-hwcaps.h>
      22  #include <sys/prctl.h>
      23  
      24  #define DCZID_DZP_MASK (1 << 4)
      25  #define DCZID_BS_MASK (0xf)
      26  
      27  /* The maximal set of permitted tags that the MTE random tag generation
      28     instruction may use.  We exclude tag 0 because a) we want to reserve
      29     that for the libc heap structures and b) because it makes it easier
      30     to see when pointer have been correctly tagged.  */
      31  #define MTE_ALLOWED_TAGS (0xfffe << PR_MTE_TAG_SHIFT)
      32  
      33  struct cpu_list
      34  {
      35    const char *name;
      36    uint64_t midr;
      37  };
      38  
      39  static struct cpu_list cpu_list[] = {
      40        {"falkor",	 0x510FC000},
      41        {"thunderxt88",	 0x430F0A10},
      42        {"thunderx2t99",   0x431F0AF0},
      43        {"thunderx2t99p1", 0x420F5160},
      44        {"phecda",	 0x680F0000},
      45        {"ares",		 0x411FD0C0},
      46        {"emag",		 0x503F0001},
      47        {"kunpeng920", 	 0x481FD010},
      48        {"a64fx",		 0x460F0010},
      49        {"generic", 	 0x0}
      50  };
      51  
      52  static uint64_t
      53  get_midr_from_mcpu (const char *mcpu)
      54  {
      55    for (int i = 0; i < sizeof (cpu_list) / sizeof (struct cpu_list); i++)
      56      if (strcmp (mcpu, cpu_list[i].name) == 0)
      57        return cpu_list[i].midr;
      58  
      59    return UINT64_MAX;
      60  }
      61  
      62  static inline void
      63  init_cpu_features (struct cpu_features *cpu_features)
      64  {
      65    register uint64_t midr = UINT64_MAX;
      66  
      67    /* Get the tunable override.  */
      68    const char *mcpu = TUNABLE_GET (glibc, cpu, name, const char *, NULL);
      69    if (mcpu != NULL)
      70      midr = get_midr_from_mcpu (mcpu);
      71  
      72    /* If there was no useful tunable override, query the MIDR if the kernel
      73       allows it.  */
      74    if (midr == UINT64_MAX)
      75      {
      76        if (GLRO (dl_hwcap) & HWCAP_CPUID)
      77  	asm volatile ("mrs %0, midr_el1" : "=r"(midr));
      78        else
      79  	midr = 0;
      80      }
      81  
      82    cpu_features->midr_el1 = midr;
      83  
      84    /* Check if ZVA is enabled.  */
      85    unsigned dczid;
      86    asm volatile ("mrs %0, dczid_el0" : "=r"(dczid));
      87  
      88    if ((dczid & DCZID_DZP_MASK) == 0)
      89      cpu_features->zva_size = 4 << (dczid & DCZID_BS_MASK);
      90  
      91    /* Check if BTI is supported.  */
      92    cpu_features->bti = GLRO (dl_hwcap2) & HWCAP2_BTI;
      93  
      94    /* Setup memory tagging support if the HW and kernel support it, and if
      95       the user has requested it.  */
      96    cpu_features->mte_state = 0;
      97  
      98  #ifdef USE_MTAG
      99    int mte_state = TUNABLE_GET (glibc, mem, tagging, unsigned, 0);
     100    cpu_features->mte_state = (GLRO (dl_hwcap2) & HWCAP2_MTE) ? mte_state : 0;
     101    /* If we lack the MTE feature, disable the tunable, since it will
     102       otherwise cause instructions that won't run on this CPU to be used.  */
     103    TUNABLE_SET (glibc, mem, tagging, cpu_features->mte_state);
     104  
     105    if (cpu_features->mte_state & 4)
     106      /* Enable choosing system-preferred faulting mode.  */
     107      __prctl (PR_SET_TAGGED_ADDR_CTRL,
     108  	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC
     109  	      | MTE_ALLOWED_TAGS),
     110  	     0, 0, 0);
     111    else if (cpu_features->mte_state & 2)
     112      __prctl (PR_SET_TAGGED_ADDR_CTRL,
     113  	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | MTE_ALLOWED_TAGS),
     114  	     0, 0, 0);
     115    else if (cpu_features->mte_state)
     116      __prctl (PR_SET_TAGGED_ADDR_CTRL,
     117  	     (PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_ASYNC | MTE_ALLOWED_TAGS),
     118  	     0, 0, 0);
     119  #endif
     120  
     121    /* Check if SVE is supported.  */
     122    cpu_features->sve = GLRO (dl_hwcap) & HWCAP_SVE;
     123  }