(root)/
sed-4.9/
gnulib-tests/
vma-iter.c
       1  /* Iteration over virtual memory areas.
       2     Copyright (C) 2011-2022 Free Software Foundation, Inc.
       3     Written by Bruno Haible <bruno@clisp.org>, 2011-2017.
       4  
       5     This program is free software: you can redistribute it and/or modify
       6     it under the terms of the GNU General Public License as published by
       7     the Free Software Foundation, either version 3 of the License, or
       8     (at your option) any later version.
       9  
      10     This program is distributed in the hope that it will be useful,
      11     but WITHOUT ANY WARRANTY; without even the implied warranty of
      12     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      13     GNU General Public License for more details.
      14  
      15     You should have received a copy of the GNU General Public License
      16     along with this program.  If not, see <https://www.gnu.org/licenses/>.  */
      17  
      18  #include <config.h>
      19  
      20  /* On Solaris in 32-bit mode, when gnulib module 'largefile' is in use,
      21     prevent a compilation error
      22       "Cannot use procfs in the large file compilation environment"
      23     On Android, when targeting Android 4.4 or older with a GCC toolchain,
      24     prevent a compilation error
      25       "error: call to 'mmap' declared with attribute error: mmap is not
      26        available with _FILE_OFFSET_BITS=64 when using GCC until android-21.
      27        Either raise your minSdkVersion, disable _FILE_OFFSET_BITS=64, or
      28        switch to Clang."
      29     The files that we access in this compilation unit are less than 2 GB
      30     large.  */
      31  #if defined __sun || defined __ANDROID__
      32  # undef _FILE_OFFSET_BITS
      33  #endif
      34  
      35  /* Specification.  */
      36  #include "vma-iter.h"
      37  
      38  #include <errno.h> /* errno */
      39  #include <stdlib.h> /* size_t */
      40  #include <fcntl.h> /* open, O_RDONLY */
      41  #include <unistd.h> /* getpagesize, lseek, read, close, getpid */
      42  
      43  #if defined __linux__ || defined __ANDROID__
      44  # include <limits.h> /* PATH_MAX */
      45  #endif
      46  
      47  #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
      48  # include <sys/types.h>
      49  # include <sys/mman.h> /* mmap, munmap */
      50  #endif
      51  #if defined __minix
      52  # include <string.h> /* memcpy */
      53  #endif
      54  
      55  #if defined __FreeBSD__ || defined __FreeBSD_kernel__ /* FreeBSD, GNU/kFreeBSD */
      56  # include <sys/types.h>
      57  # include <sys/mman.h> /* mmap, munmap */
      58  # include <sys/user.h> /* struct kinfo_vmentry */
      59  # include <sys/sysctl.h> /* sysctl */
      60  #endif
      61  #if defined __NetBSD__ || defined __OpenBSD__ /* NetBSD, OpenBSD */
      62  # include <sys/types.h>
      63  # include <sys/mman.h> /* mmap, munmap */
      64  # include <sys/sysctl.h> /* sysctl, struct kinfo_vmentry */
      65  #endif
      66  
      67  #if defined _AIX /* AIX */
      68  # include <string.h> /* memcpy */
      69  # include <sys/types.h>
      70  # include <sys/mman.h> /* mmap, munmap */
      71  # include <sys/procfs.h> /* prmap_t */
      72  #endif
      73  
      74  #if defined __sgi || defined __osf__ /* IRIX, OSF/1 */
      75  # include <string.h> /* memcpy */
      76  # include <sys/types.h>
      77  # include <sys/mman.h> /* mmap, munmap */
      78  # include <sys/procfs.h> /* PIOC*, prmap_t */
      79  #endif
      80  
      81  #if defined __sun /* Solaris */
      82  # include <string.h> /* memcpy */
      83  # include <sys/types.h>
      84  # include <sys/mman.h> /* mmap, munmap */
      85  /* Try to use the newer ("structured") /proc filesystem API, if supported.  */
      86  # define _STRUCTURED_PROC 1
      87  # include <sys/procfs.h> /* prmap_t, optionally PIOC* */
      88  #endif
      89  
      90  #if HAVE_PSTAT_GETPROCVM /* HP-UX */
      91  # include <sys/pstat.h> /* pstat_getprocvm */
      92  #endif
      93  
      94  #if defined __APPLE__ && defined __MACH__ /* Mac OS X */
      95  # include <mach/mach.h>
      96  #endif
      97  
      98  #if defined __GNU__ /* GNU/Hurd */
      99  # include <mach/mach.h>
     100  #endif
     101  
     102  #if defined _WIN32 || defined __CYGWIN__ /* Windows */
     103  # include <windows.h>
     104  #endif
     105  
     106  #if defined __BEOS__ || defined __HAIKU__ /* BeOS, Haiku */
     107  # include <OS.h>
     108  #endif
     109  
     110  #if HAVE_MQUERY /* OpenBSD */
     111  # include <sys/types.h>
     112  # include <sys/mman.h> /* mquery */
     113  #endif
     114  
     115  
     116  /* Support for reading text files in the /proc file system.  */
     117  
     118  #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
     119  
     120  /* Buffered read-only streams.
     121     We cannot use <stdio.h> here, because fopen() calls malloc(), and a malloc()
     122     call may call mmap() and thus pre-allocate available memory.
     123     Also, we cannot use multiple read() calls, because if the buffer size is
     124     smaller than the file's contents:
     125       - On NetBSD, the second read() call would return 0, thus making the file
     126         appear truncated.
     127       - On DragonFly BSD, the first read() call would fail with errno = EFBIG.
     128       - On all platforms, if some other thread is doing memory allocations or
     129         deallocations between two read() calls, there is a high risk that the
     130         result of these two read() calls don't fit together, and as a
     131         consequence we will parse garbage and either omit some VMAs or return
     132         VMAs with nonsensical addresses.
     133     So use mmap(), and ignore the resulting VMA.  */
     134  
     135  # if defined __linux__ || defined __ANDROID__
     136    /* On Linux, if the file does not entirely fit into the buffer, the read()
     137       function stops before the line that would come out truncated.  The
     138       maximum size of such a line is 73 + PATH_MAX bytes.  To be sure that we
     139       have read everything, we must verify that at least that many bytes are
     140       left when read() returned.  */
     141  #  define MIN_LEFTOVER (73 + PATH_MAX)
     142  # else
     143  #  define MIN_LEFTOVER 1
     144  # endif
     145  
     146  # ifdef TEST
     147  /* During testing, we want to run into the hairy cases.  */
     148  #  define STACK_ALLOCATED_BUFFER_SIZE 32
     149  # else
     150  #  if MIN_LEFTOVER < 1024
     151  #   define STACK_ALLOCATED_BUFFER_SIZE 1024
     152  #  else
     153      /* There is no point in using a stack-allocated buffer if it is too small anyway.  */
     154  #   define STACK_ALLOCATED_BUFFER_SIZE 1
     155  #  endif
     156  # endif
     157  
     158  struct rofile
     159    {
     160      size_t position;
     161      size_t filled;
     162      int eof_seen;
     163      /* These fields deal with allocation of the buffer.  */
     164      char *buffer;
     165      char *auxmap;
     166      size_t auxmap_length;
     167      unsigned long auxmap_start;
     168      unsigned long auxmap_end;
     169      char stack_allocated_buffer[STACK_ALLOCATED_BUFFER_SIZE];
     170    };
     171  
     172  /* Open a read-only file stream.  */
     173  static int
     174  rof_open (struct rofile *rof, const char *filename)
     175  {
     176    int fd;
     177    unsigned long pagesize;
     178    size_t size;
     179  
     180    fd = open (filename, O_RDONLY | O_CLOEXEC);
     181    if (fd < 0)
     182      return -1;
     183    rof->position = 0;
     184    rof->eof_seen = 0;
     185    /* Try the static buffer first.  */
     186    pagesize = 0;
     187    rof->buffer = rof->stack_allocated_buffer;
     188    size = sizeof (rof->stack_allocated_buffer);
     189    rof->auxmap = NULL;
     190    rof->auxmap_start = 0;
     191    rof->auxmap_end = 0;
     192    for (;;)
     193      {
     194        /* Attempt to read the contents in a single system call.  */
     195        if (size > MIN_LEFTOVER)
     196          {
     197            int n = read (fd, rof->buffer, size);
     198            if (n < 0 && errno == EINTR)
     199              goto retry;
     200  # if defined __DragonFly__
     201            if (!(n < 0 && errno == EFBIG))
     202  # endif
     203              {
     204                if (n <= 0)
     205                  /* Empty file.  */
     206                  goto fail1;
     207                if (n + MIN_LEFTOVER <= size)
     208                  {
     209                    /* The buffer was sufficiently large.  */
     210                    rof->filled = n;
     211  # if defined __linux__ || defined __ANDROID__
     212                    /* On Linux, the read() call may stop even if the buffer was
     213                       large enough.  We need the equivalent of full_read().  */
     214                    for (;;)
     215                      {
     216                        n = read (fd, rof->buffer + rof->filled, size - rof->filled);
     217                        if (n < 0 && errno == EINTR)
     218                          goto retry;
     219                        if (n < 0)
     220                          /* Some error.  */
     221                          goto fail1;
     222                        if (n + MIN_LEFTOVER > size - rof->filled)
     223                          /* Allocate a larger buffer.  */
     224                          break;
     225                        if (n == 0)
     226                          {
     227                            /* Reached the end of file.  */
     228                            close (fd);
     229                            return 0;
     230                          }
     231                        rof->filled += n;
     232                      }
     233  # else
     234                    close (fd);
     235                    return 0;
     236  # endif
     237                  }
     238              }
     239          }
     240        /* Allocate a larger buffer.  */
     241        if (pagesize == 0)
     242          {
     243            pagesize = getpagesize ();
     244            size = pagesize;
     245            while (size <= MIN_LEFTOVER)
     246              size = 2 * size;
     247          }
     248        else
     249          {
     250            size = 2 * size;
     251            if (size == 0)
     252              /* Wraparound.  */
     253              goto fail1;
     254            if (rof->auxmap != NULL)
     255              munmap (rof->auxmap, rof->auxmap_length);
     256          }
     257        rof->auxmap = (void *) mmap ((void *) 0, size, PROT_READ | PROT_WRITE,
     258                                     MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
     259        if (rof->auxmap == (void *) -1)
     260          {
     261            close (fd);
     262            return -1;
     263          }
     264        rof->auxmap_length = size;
     265        rof->auxmap_start = (unsigned long) rof->auxmap;
     266        rof->auxmap_end = rof->auxmap_start + size;
     267        rof->buffer = (char *) rof->auxmap;
     268       retry:
     269        /* Restart.  */
     270        if (lseek (fd, 0, SEEK_SET) < 0)
     271          {
     272            close (fd);
     273            fd = open (filename, O_RDONLY | O_CLOEXEC);
     274            if (fd < 0)
     275              goto fail2;
     276          }
     277      }
     278   fail1:
     279    close (fd);
     280   fail2:
     281    if (rof->auxmap != NULL)
     282      munmap (rof->auxmap, rof->auxmap_length);
     283    return -1;
     284  }
     285  
     286  /* Return the next byte from a read-only file stream without consuming it,
     287     or -1 at EOF.  */
     288  static int
     289  rof_peekchar (struct rofile *rof)
     290  {
     291    if (rof->position == rof->filled)
     292      {
     293        rof->eof_seen = 1;
     294        return -1;
     295      }
     296    return (unsigned char) rof->buffer[rof->position];
     297  }
     298  
     299  /* Return the next byte from a read-only file stream, or -1 at EOF.  */
     300  static int
     301  rof_getchar (struct rofile *rof)
     302  {
     303    int c = rof_peekchar (rof);
     304    if (c >= 0)
     305      rof->position++;
     306    return c;
     307  }
     308  
     309  /* Parse an unsigned hexadecimal number from a read-only file stream.  */
     310  static int
     311  rof_scanf_lx (struct rofile *rof, unsigned long *valuep)
     312  {
     313    unsigned long value = 0;
     314    unsigned int numdigits = 0;
     315    for (;;)
     316      {
     317        int c = rof_peekchar (rof);
     318        if (c >= '0' && c <= '9')
     319          value = (value << 4) + (c - '0');
     320        else if (c >= 'A' && c <= 'F')
     321          value = (value << 4) + (c - 'A' + 10);
     322        else if (c >= 'a' && c <= 'f')
     323          value = (value << 4) + (c - 'a' + 10);
     324        else
     325          break;
     326        rof_getchar (rof);
     327        numdigits++;
     328      }
     329    if (numdigits == 0)
     330      return -1;
     331    *valuep = value;
     332    return 0;
     333  }
     334  
     335  /* Close a read-only file stream.  */
     336  static void
     337  rof_close (struct rofile *rof)
     338  {
     339    if (rof->auxmap != NULL)
     340      munmap (rof->auxmap, rof->auxmap_length);
     341  }
     342  
     343  #endif
     344  
     345  
     346  /* Support for reading the info from a text file in the /proc file system.  */
     347  
     348  #if defined __linux__ || defined __ANDROID__ || (defined __FreeBSD_kernel__ && !defined __FreeBSD__) /* || defined __CYGWIN__ */
     349  /* GNU/kFreeBSD mounts /proc as linprocfs, which looks like a Linux /proc
     350     file system.  */
     351  
     352  static int
     353  vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
     354  {
     355    struct rofile rof;
     356  
     357    /* Open the current process' maps file.  It describes one VMA per line.  */
     358    if (rof_open (&rof, "/proc/self/maps") >= 0)
     359      {
     360        unsigned long auxmap_start = rof.auxmap_start;
     361        unsigned long auxmap_end = rof.auxmap_end;
     362  
     363        for (;;)
     364          {
     365            unsigned long start, end;
     366            unsigned int flags;
     367            int c;
     368  
     369            /* Parse one line.  First start and end.  */
     370            if (!(rof_scanf_lx (&rof, &start) >= 0
     371                  && rof_getchar (&rof) == '-'
     372                  && rof_scanf_lx (&rof, &end) >= 0))
     373              break;
     374            /* Then the flags.  */
     375            do
     376              c = rof_getchar (&rof);
     377            while (c == ' ');
     378            flags = 0;
     379            if (c == 'r')
     380              flags |= VMA_PROT_READ;
     381            c = rof_getchar (&rof);
     382            if (c == 'w')
     383              flags |= VMA_PROT_WRITE;
     384            c = rof_getchar (&rof);
     385            if (c == 'x')
     386              flags |= VMA_PROT_EXECUTE;
     387            while (c = rof_getchar (&rof), c != -1 && c != '\n')
     388              ;
     389  
     390            if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
     391              {
     392                /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
     393                   = [start,auxmap_start-1] u [auxmap_end,end-1].  */
     394                if (start < auxmap_start)
     395                  if (callback (data, start, auxmap_start, flags))
     396                    break;
     397                if (auxmap_end - 1 < end - 1)
     398                  if (callback (data, auxmap_end, end, flags))
     399                    break;
     400              }
     401            else
     402              {
     403                if (callback (data, start, end, flags))
     404                  break;
     405              }
     406          }
     407        rof_close (&rof);
     408        return 0;
     409      }
     410  
     411    return -1;
     412  }
     413  
     414  #elif defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__
     415  
     416  static int
     417  vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
     418  {
     419    struct rofile rof;
     420  
     421    /* Open the current process' maps file.  It describes one VMA per line.  */
     422    if (rof_open (&rof, "/proc/curproc/map") >= 0)
     423      {
     424        unsigned long auxmap_start = rof.auxmap_start;
     425        unsigned long auxmap_end = rof.auxmap_end;
     426  
     427        for (;;)
     428          {
     429            unsigned long start, end;
     430            unsigned int flags;
     431            int c;
     432  
     433            /* Parse one line.  First start.  */
     434            if (!(rof_getchar (&rof) == '0'
     435                  && rof_getchar (&rof) == 'x'
     436                  && rof_scanf_lx (&rof, &start) >= 0))
     437              break;
     438            while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
     439              rof_getchar (&rof);
     440            /* Then end.  */
     441            if (!(rof_getchar (&rof) == '0'
     442                  && rof_getchar (&rof) == 'x'
     443                  && rof_scanf_lx (&rof, &end) >= 0))
     444              break;
     445  # if defined __FreeBSD__ || defined __DragonFly__
     446            /* Then the resident pages count.  */
     447            do
     448              c = rof_getchar (&rof);
     449            while (c == ' ');
     450            do
     451              c = rof_getchar (&rof);
     452            while (c != -1 && c != '\n' && c != ' ');
     453            /* Then the private resident pages count.  */
     454            do
     455              c = rof_getchar (&rof);
     456            while (c == ' ');
     457            do
     458              c = rof_getchar (&rof);
     459            while (c != -1 && c != '\n' && c != ' ');
     460            /* Then some kernel address.  */
     461            do
     462              c = rof_getchar (&rof);
     463            while (c == ' ');
     464            do
     465              c = rof_getchar (&rof);
     466            while (c != -1 && c != '\n' && c != ' ');
     467  # endif
     468            /* Then the flags.  */
     469            do
     470              c = rof_getchar (&rof);
     471            while (c == ' ');
     472            flags = 0;
     473            if (c == 'r')
     474              flags |= VMA_PROT_READ;
     475            c = rof_getchar (&rof);
     476            if (c == 'w')
     477              flags |= VMA_PROT_WRITE;
     478            c = rof_getchar (&rof);
     479            if (c == 'x')
     480              flags |= VMA_PROT_EXECUTE;
     481            while (c = rof_getchar (&rof), c != -1 && c != '\n')
     482              ;
     483  
     484            if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
     485              {
     486                /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
     487                   = [start,auxmap_start-1] u [auxmap_end,end-1].  */
     488                if (start < auxmap_start)
     489                  if (callback (data, start, auxmap_start, flags))
     490                    break;
     491                if (auxmap_end - 1 < end - 1)
     492                  if (callback (data, auxmap_end, end, flags))
     493                    break;
     494              }
     495            else
     496              {
     497                if (callback (data, start, end, flags))
     498                  break;
     499              }
     500          }
     501        rof_close (&rof);
     502        return 0;
     503      }
     504  
     505    return -1;
     506  }
     507  
     508  #elif defined __minix
     509  
     510  static int
     511  vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
     512  {
     513    char fnamebuf[6+10+4+1];
     514    char *fname;
     515    struct rofile rof;
     516  
     517    /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()).  */
     518    fname = fnamebuf + sizeof (fnamebuf) - (4 + 1);
     519    memcpy (fname, "/map", 4 + 1);
     520    {
     521      unsigned int value = getpid ();
     522      do
     523        *--fname = (value % 10) + '0';
     524      while ((value = value / 10) > 0);
     525    }
     526    fname -= 6;
     527    memcpy (fname, "/proc/", 6);
     528  
     529    /* Open the current process' maps file.  It describes one VMA per line.  */
     530    if (rof_open (&rof, fname) >= 0)
     531      {
     532        unsigned long auxmap_start = rof.auxmap_start;
     533        unsigned long auxmap_end = rof.auxmap_end;
     534  
     535        for (;;)
     536          {
     537            unsigned long start, end;
     538            unsigned int flags;
     539            int c;
     540  
     541            /* Parse one line.  First start and end.  */
     542            if (!(rof_scanf_lx (&rof, &start) >= 0
     543                  && rof_getchar (&rof) == '-'
     544                  && rof_scanf_lx (&rof, &end) >= 0))
     545              break;
     546            /* Then the flags.  */
     547            do
     548              c = rof_getchar (&rof);
     549            while (c == ' ');
     550            flags = 0;
     551            if (c == 'r')
     552              flags |= VMA_PROT_READ;
     553            c = rof_getchar (&rof);
     554            if (c == 'w')
     555              flags |= VMA_PROT_WRITE;
     556            c = rof_getchar (&rof);
     557            if (c == 'x')
     558              flags |= VMA_PROT_EXECUTE;
     559            while (c = rof_getchar (&rof), c != -1 && c != '\n')
     560              ;
     561  
     562            if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
     563              {
     564                /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
     565                   = [start,auxmap_start-1] u [auxmap_end,end-1].  */
     566                if (start < auxmap_start)
     567                  if (callback (data, start, auxmap_start, flags))
     568                    break;
     569                if (auxmap_end - 1 < end - 1)
     570                  if (callback (data, auxmap_end, end, flags))
     571                    break;
     572              }
     573            else
     574              {
     575                if (callback (data, start, end, flags))
     576                  break;
     577              }
     578          }
     579        rof_close (&rof);
     580        return 0;
     581      }
     582  
     583    return -1;
     584  }
     585  
     586  #else
     587  
     588  static inline int
     589  vma_iterate_proc (vma_iterate_callback_fn callback, void *data)
     590  {
     591    return -1;
     592  }
     593  
     594  #endif
     595  
     596  
     597  /* Support for reading the info from the BSD sysctl() system call.  */
     598  
     599  #if (defined __FreeBSD__ || defined __FreeBSD_kernel__) && defined KERN_PROC_VMMAP /* FreeBSD >= 7.1 */
     600  
     601  static int
     602  vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
     603  {
     604    /* Documentation: https://www.freebsd.org/cgi/man.cgi?sysctl(3)  */
     605    int info_path[] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid () };
     606    size_t len;
     607    size_t pagesize;
     608    size_t memneed;
     609    void *auxmap;
     610    unsigned long auxmap_start;
     611    unsigned long auxmap_end;
     612    char *mem;
     613    char *p;
     614    char *p_end;
     615  
     616    len = 0;
     617    if (sysctl (info_path, 4, NULL, &len, NULL, 0) < 0)
     618      return -1;
     619    /* Allow for small variations over time.  In a multithreaded program
     620       new VMAs can be allocated at any moment.  */
     621    len = 2 * len + 200;
     622    /* Allocate memneed bytes of memory.
     623       We cannot use alloca here, because not much stack space is guaranteed.
     624       We also cannot use malloc here, because a malloc() call may call mmap()
     625       and thus pre-allocate available memory.
     626       So use mmap(), and ignore the resulting VMA.  */
     627    pagesize = getpagesize ();
     628    memneed = len;
     629    memneed = ((memneed - 1) / pagesize + 1) * pagesize;
     630    auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
     631                            MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
     632    if (auxmap == (void *) -1)
     633      return -1;
     634    auxmap_start = (unsigned long) auxmap;
     635    auxmap_end = auxmap_start + memneed;
     636    mem = (char *) auxmap;
     637    if (sysctl (info_path, 4, mem, &len, NULL, 0) < 0)
     638      {
     639        munmap (auxmap, memneed);
     640        return -1;
     641      }
     642    p = mem;
     643    p_end = mem + len;
     644    while (p < p_end)
     645      {
     646        struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
     647        unsigned long start = kve->kve_start;
     648        unsigned long end = kve->kve_end;
     649        unsigned int flags = 0;
     650        if (kve->kve_protection & KVME_PROT_READ)
     651          flags |= VMA_PROT_READ;
     652        if (kve->kve_protection & KVME_PROT_WRITE)
     653          flags |= VMA_PROT_WRITE;
     654        if (kve->kve_protection & KVME_PROT_EXEC)
     655          flags |= VMA_PROT_EXECUTE;
     656        if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
     657          {
     658            /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
     659               = [start,auxmap_start-1] u [auxmap_end,end-1].  */
     660            if (start < auxmap_start)
     661              if (callback (data, start, auxmap_start, flags))
     662                break;
     663            if (auxmap_end - 1 < end - 1)
     664              if (callback (data, auxmap_end, end, flags))
     665                break;
     666          }
     667        else
     668          {
     669            if (callback (data, start, end, flags))
     670              break;
     671          }
     672        p += kve->kve_structsize;
     673      }
     674    munmap (auxmap, memneed);
     675    return 0;
     676  }
     677  
     678  #elif defined __NetBSD__ && defined VM_PROC_MAP /* NetBSD >= 8.0 */
     679  
     680  static int
     681  vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
     682  {
     683    /* Documentation: https://man.netbsd.org/man/sysctl+7  */
     684    unsigned int entry_size =
     685      /* If we wanted to have the path of each entry, we would need
     686         sizeof (struct kinfo_vmentry).  But we need only the non-string
     687         parts of each entry.  */
     688      offsetof (struct kinfo_vmentry, kve_path);
     689    int info_path[] = { CTL_VM, VM_PROC, VM_PROC_MAP, getpid (), entry_size };
     690    size_t len;
     691    size_t pagesize;
     692    size_t memneed;
     693    void *auxmap;
     694    unsigned long auxmap_start;
     695    unsigned long auxmap_end;
     696    char *mem;
     697    char *p;
     698    char *p_end;
     699  
     700    len = 0;
     701    if (sysctl (info_path, 5, NULL, &len, NULL, 0) < 0)
     702      return -1;
     703    /* Allow for small variations over time.  In a multithreaded program
     704       new VMAs can be allocated at any moment.  */
     705    len = 2 * len + 10 * entry_size;
     706    /* But the system call rejects lengths > 1 MB.  */
     707    if (len > 0x100000)
     708      len = 0x100000;
     709    /* And the system call causes a kernel panic if the length is not a multiple
     710       of entry_size.  */
     711    len = (len / entry_size) * entry_size;
     712    /* Allocate memneed bytes of memory.
     713       We cannot use alloca here, because not much stack space is guaranteed.
     714       We also cannot use malloc here, because a malloc() call may call mmap()
     715       and thus pre-allocate available memory.
     716       So use mmap(), and ignore the resulting VMA.  */
     717    pagesize = getpagesize ();
     718    memneed = len;
     719    memneed = ((memneed - 1) / pagesize + 1) * pagesize;
     720    auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
     721                            MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
     722    if (auxmap == (void *) -1)
     723      return -1;
     724    auxmap_start = (unsigned long) auxmap;
     725    auxmap_end = auxmap_start + memneed;
     726    mem = (char *) auxmap;
     727    if (sysctl (info_path, 5, mem, &len, NULL, 0) < 0
     728        || len > 0x100000 - entry_size)
     729      {
     730        /* sysctl failed, or the list of VMAs is possibly truncated.  */
     731        munmap (auxmap, memneed);
     732        return -1;
     733      }
     734    p = mem;
     735    p_end = mem + len;
     736    while (p < p_end)
     737      {
     738        struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
     739        unsigned long start = kve->kve_start;
     740        unsigned long end = kve->kve_end;
     741        unsigned int flags = 0;
     742        if (kve->kve_protection & KVME_PROT_READ)
     743          flags |= VMA_PROT_READ;
     744        if (kve->kve_protection & KVME_PROT_WRITE)
     745          flags |= VMA_PROT_WRITE;
     746        if (kve->kve_protection & KVME_PROT_EXEC)
     747          flags |= VMA_PROT_EXECUTE;
     748        if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
     749          {
     750            /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
     751               = [start,auxmap_start-1] u [auxmap_end,end-1].  */
     752            if (start < auxmap_start)
     753              if (callback (data, start, auxmap_start, flags))
     754                break;
     755            if (auxmap_end - 1 < end - 1)
     756              if (callback (data, auxmap_end, end, flags))
     757                break;
     758          }
     759        else
     760          {
     761            if (callback (data, start, end, flags))
     762              break;
     763          }
     764        p += entry_size;
     765      }
     766    munmap (auxmap, memneed);
     767    return 0;
     768  }
     769  
     770  #elif defined __OpenBSD__ && defined KERN_PROC_VMMAP /* OpenBSD >= 5.7 */
     771  
     772  static int
     773  vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
     774  {
     775    /* Documentation: https://man.openbsd.org/sysctl.2  */
     776    int info_path[] = { CTL_KERN, KERN_PROC_VMMAP, getpid () };
     777    size_t len;
     778    size_t pagesize;
     779    size_t memneed;
     780    void *auxmap;
     781    unsigned long auxmap_start;
     782    unsigned long auxmap_end;
     783    char *mem;
     784    char *p;
     785    char *p_end;
     786  
     787    len = 0;
     788    if (sysctl (info_path, 3, NULL, &len, NULL, 0) < 0)
     789      return -1;
     790    /* Allow for small variations over time.  In a multithreaded program
     791       new VMAs can be allocated at any moment.  */
     792    len = 2 * len + 10 * sizeof (struct kinfo_vmentry);
     793    /* But the system call rejects lengths > 64 KB.  */
     794    if (len > 0x10000)
     795      len = 0x10000;
     796    /* And the system call rejects lengths that are not a multiple of
     797       sizeof (struct kinfo_vmentry).  */
     798    len = (len / sizeof (struct kinfo_vmentry)) * sizeof (struct kinfo_vmentry);
     799    /* Allocate memneed bytes of memory.
     800       We cannot use alloca here, because not much stack space is guaranteed.
     801       We also cannot use malloc here, because a malloc() call may call mmap()
     802       and thus pre-allocate available memory.
     803       So use mmap(), and ignore the resulting VMA.  */
     804    pagesize = getpagesize ();
     805    memneed = len;
     806    memneed = ((memneed - 1) / pagesize + 1) * pagesize;
     807    auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
     808                            MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
     809    if (auxmap == (void *) -1)
     810      return -1;
     811    auxmap_start = (unsigned long) auxmap;
     812    auxmap_end = auxmap_start + memneed;
     813    mem = (char *) auxmap;
     814    if (sysctl (info_path, 3, mem, &len, NULL, 0) < 0
     815        || len > 0x10000 - sizeof (struct kinfo_vmentry))
     816      {
     817        /* sysctl failed, or the list of VMAs is possibly truncated.  */
     818        munmap (auxmap, memneed);
     819        return -1;
     820      }
     821    p = mem;
     822    p_end = mem + len;
     823    while (p < p_end)
     824      {
     825        struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
     826        unsigned long start = kve->kve_start;
     827        unsigned long end = kve->kve_end;
     828        unsigned int flags = 0;
     829        if (kve->kve_protection & KVE_PROT_READ)
     830          flags |= VMA_PROT_READ;
     831        if (kve->kve_protection & KVE_PROT_WRITE)
     832          flags |= VMA_PROT_WRITE;
     833        if (kve->kve_protection & KVE_PROT_EXEC)
     834          flags |= VMA_PROT_EXECUTE;
     835        if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
     836          {
     837            /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
     838               = [start,auxmap_start-1] u [auxmap_end,end-1].  */
     839            if (start < auxmap_start)
     840              if (callback (data, start, auxmap_start, flags))
     841                break;
     842            if (auxmap_end - 1 < end - 1)
     843              if (callback (data, auxmap_end, end, flags))
     844                break;
     845          }
     846        else
     847          {
     848            if (start != end)
     849              if (callback (data, start, end, flags))
     850                break;
     851          }
     852        p += sizeof (struct kinfo_vmentry);
     853      }
     854    munmap (auxmap, memneed);
     855    return 0;
     856  }
     857  
     858  #else
     859  
     860  static inline int
     861  vma_iterate_bsd (vma_iterate_callback_fn callback, void *data)
     862  {
     863    return -1;
     864  }
     865  
     866  #endif
     867  
     868  
     869  int
     870  vma_iterate (vma_iterate_callback_fn callback, void *data)
     871  {
     872  #if defined __linux__ || defined __ANDROID__ || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__ || defined __minix /* || defined __CYGWIN__ */
     873  
     874  # if defined __FreeBSD__
     875    /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
     876       function vma_iterate_proc does not return the virtual memory areas that
     877       were created by anonymous mmap.  See
     878       <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
     879       So use vma_iterate_proc only as a fallback.  */
     880    int retval = vma_iterate_bsd (callback, data);
     881    if (retval == 0)
     882        return 0;
     883  
     884    return vma_iterate_proc (callback, data);
     885  # else
     886    /* On the other platforms, try the /proc approach first, and the sysctl()
     887       as a fallback.  */
     888    int retval = vma_iterate_proc (callback, data);
     889    if (retval == 0)
     890        return 0;
     891  
     892    return vma_iterate_bsd (callback, data);
     893  # endif
     894  
     895  #elif defined _AIX /* AIX */
     896  
     897    /* On AIX, there is a /proc/$pic/map file, that contains records of type
     898       prmap_t, defined in <sys/procfs.h>.  In older versions of AIX, it lists
     899       only the virtual memory areas that are connected to a file, not the
     900       anonymous ones.  But at least since AIX 7.1, it is well usable.  */
     901  
     902    size_t pagesize;
     903    char fnamebuf[6+10+4+1];
     904    char *fname;
     905    int fd;
     906    size_t memneed;
     907  
     908    pagesize = getpagesize ();
     909  
     910    /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()).  */
     911    fname = fnamebuf + sizeof (fnamebuf) - (4+1);
     912    memcpy (fname, "/map", 4+1);
     913    {
     914      unsigned int value = getpid ();
     915      do
     916        *--fname = (value % 10) + '0';
     917      while ((value = value / 10) > 0);
     918    }
     919    fname -= 6;
     920    memcpy (fname, "/proc/", 6);
     921  
     922    fd = open (fname, O_RDONLY | O_CLOEXEC);
     923    if (fd < 0)
     924      return -1;
     925  
     926    /* The contents of /proc/<pid>/map contains a number of prmap_t entries,
     927       then an entirely null prmap_t entry, then a heap of NUL terminated
     928       strings.
     929       Documentation: https://www.ibm.com/docs/en/aix/7.1?topic=files-proc-file
     930       We read the entire contents, but look only at the prmap_t entries and
     931       ignore the tail part.  */
     932  
     933    for (memneed = 2 * pagesize; ; memneed = 2 * memneed)
     934      {
     935        /* Allocate memneed bytes of memory.
     936           We cannot use alloca here, because not much stack space is guaranteed.
     937           We also cannot use malloc here, because a malloc() call may call mmap()
     938           and thus pre-allocate available memory.
     939           So use mmap(), and ignore the resulting VMA if it occurs among the
     940           resulting VMAs.  (Normally it doesn't, because it was allocated after
     941           the open() call.)  */
     942        void *auxmap;
     943        unsigned long auxmap_start;
     944        unsigned long auxmap_end;
     945        ssize_t nbytes;
     946  
     947        auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
     948                                MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
     949        if (auxmap == (void *) -1)
     950          {
     951            close (fd);
     952            return -1;
     953          }
     954        auxmap_start = (unsigned long) auxmap;
     955        auxmap_end = auxmap_start + memneed;
     956  
     957        /* Read the contents of /proc/<pid>/map in a single system call.
     958           This guarantees a consistent result (no duplicated or omitted
     959           entries).  */
     960       retry:
     961        do
     962          nbytes = read (fd, auxmap, memneed);
     963        while (nbytes < 0 && errno == EINTR);
     964        if (nbytes <= 0)
     965          {
     966            munmap (auxmap, memneed);
     967            close (fd);
     968            return -1;
     969          }
     970        if (nbytes == memneed)
     971          {
     972            /* Need more memory.  */
     973            munmap (auxmap, memneed);
     974            if (lseek (fd, 0, SEEK_SET) < 0)
     975              {
     976                close (fd);
     977                return -1;
     978              }
     979          }
     980        else
     981          {
     982            if (read (fd, (char *) auxmap + nbytes, 1) > 0)
     983              {
     984                /* Oops, we had a short read.  Retry.  */
     985                if (lseek (fd, 0, SEEK_SET) < 0)
     986                  {
     987                    munmap (auxmap, memneed);
     988                    close (fd);
     989                    return -1;
     990                  }
     991                goto retry;
     992              }
     993  
     994            /* We now have the entire contents of /proc/<pid>/map in memory.  */
     995            prmap_t* maps = (prmap_t *) auxmap;
     996  
     997            /* The entries are not sorted by address.  Therefore
     998               1. Extract the relevant information into an array.
     999               2. Sort the array in ascending order.
    1000               3. Invoke the callback.  */
    1001            typedef struct
    1002              {
    1003                uintptr_t start;
    1004                uintptr_t end;
    1005                unsigned int flags;
    1006              }
    1007            vma_t;
    1008            /* Since 2 * sizeof (vma_t) <= sizeof (prmap_t), we can reuse the
    1009               same memory.  */
    1010            vma_t *vmas = (vma_t *) auxmap;
    1011  
    1012            vma_t *vp = vmas;
    1013            {
    1014              prmap_t* mp;
    1015              for (mp = maps;;)
    1016                {
    1017                  unsigned long start, end;
    1018  
    1019                  start = (unsigned long) mp->pr_vaddr;
    1020                  end = start + mp->pr_size;
    1021                  if (start == 0 && end == 0 && mp->pr_mflags == 0)
    1022                    break;
    1023                  /* Discard empty VMAs and kernel VMAs.  */
    1024                  if (start < end && (mp->pr_mflags & MA_KERNTEXT) == 0)
    1025                    {
    1026                      unsigned int flags;
    1027                      flags = 0;
    1028                      if (mp->pr_mflags & MA_READ)
    1029                        flags |= VMA_PROT_READ;
    1030                      if (mp->pr_mflags & MA_WRITE)
    1031                        flags |= VMA_PROT_WRITE;
    1032                      if (mp->pr_mflags & MA_EXEC)
    1033                        flags |= VMA_PROT_EXECUTE;
    1034  
    1035                      if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
    1036                        {
    1037                          /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
    1038                             = [start,auxmap_start-1] u [auxmap_end,end-1].  */
    1039                          if (start < auxmap_start)
    1040                            {
    1041                              vp->start = start;
    1042                              vp->end = auxmap_start;
    1043                              vp->flags = flags;
    1044                              vp++;
    1045                            }
    1046                          if (auxmap_end - 1 < end - 1)
    1047                            {
    1048                              vp->start = auxmap_end;
    1049                              vp->end = end;
    1050                              vp->flags = flags;
    1051                              vp++;
    1052                            }
    1053                        }
    1054                      else
    1055                        {
    1056                          vp->start = start;
    1057                          vp->end = end;
    1058                          vp->flags = flags;
    1059                          vp++;
    1060                        }
    1061                    }
    1062                  mp++;
    1063                }
    1064            }
    1065  
    1066            size_t nvmas = vp - vmas;
    1067            /* Sort the array in ascending order.
    1068               Better not call qsort(), since it may call malloc().
    1069               Insertion-sort is OK in this case, despite its worst-case running
    1070               time of O(N²), since the number of VMAs will rarely be larger than
    1071               1000.  */
    1072            {
    1073              size_t i;
    1074              for (i = 1; i < nvmas; i++)
    1075                {
    1076                  /* Invariant: Here vmas[0..i-1] is sorted.  */
    1077                  size_t j;
    1078                  for (j = i; j > 0 && vmas[j - 1].start > vmas[j].start; j--)
    1079                    {
    1080                      vma_t tmp = vmas[j - 1];
    1081                      vmas[j - 1] = vmas[j];
    1082                      vmas[j] = tmp;
    1083                    }
    1084                  /* Invariant: Here vmas[0..i] is sorted.  */
    1085                }
    1086            }
    1087  
    1088            /* Invoke the callback.  */
    1089            {
    1090              size_t i;
    1091              for (i = 0; i < nvmas; i++)
    1092                {
    1093                  vma_t *vpi = &vmas[i];
    1094                  if (callback (data, vpi->start, vpi->end, vpi->flags))
    1095                    break;
    1096                }
    1097            }
    1098  
    1099            munmap (auxmap, memneed);
    1100            break;
    1101          }
    1102      }
    1103  
    1104    close (fd);
    1105    return 0;
    1106  
    1107  #elif defined __sgi || defined __osf__ /* IRIX, OSF/1 */
    1108  
    1109    size_t pagesize;
    1110    char fnamebuf[6+10+1];
    1111    char *fname;
    1112    int fd;
    1113    int nmaps;
    1114    size_t memneed;
    1115  # if HAVE_MAP_ANONYMOUS
    1116  #  define zero_fd -1
    1117  #  define map_flags MAP_ANONYMOUS
    1118  # else
    1119    int zero_fd;
    1120  #  define map_flags 0
    1121  # endif
    1122    void *auxmap;
    1123    unsigned long auxmap_start;
    1124    unsigned long auxmap_end;
    1125    prmap_t* maps;
    1126    prmap_t* mp;
    1127  
    1128    pagesize = getpagesize ();
    1129  
    1130    /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
    1131    fname = fnamebuf + sizeof (fnamebuf) - 1;
    1132    *fname = '\0';
    1133    {
    1134      unsigned int value = getpid ();
    1135      do
    1136        *--fname = (value % 10) + '0';
    1137      while ((value = value / 10) > 0);
    1138    }
    1139    fname -= 6;
    1140    memcpy (fname, "/proc/", 6);
    1141  
    1142    fd = open (fname, O_RDONLY | O_CLOEXEC);
    1143    if (fd < 0)
    1144      return -1;
    1145  
    1146    if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    1147      goto fail2;
    1148  
    1149    memneed = (nmaps + 10) * sizeof (prmap_t);
    1150    /* Allocate memneed bytes of memory.
    1151       We cannot use alloca here, because not much stack space is guaranteed.
    1152       We also cannot use malloc here, because a malloc() call may call mmap()
    1153       and thus pre-allocate available memory.
    1154       So use mmap(), and ignore the resulting VMA.  */
    1155    memneed = ((memneed - 1) / pagesize + 1) * pagesize;
    1156  # if !HAVE_MAP_ANONYMOUS
    1157    zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
    1158    if (zero_fd < 0)
    1159      goto fail2;
    1160  # endif
    1161    auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
    1162                            map_flags | MAP_PRIVATE, zero_fd, 0);
    1163  # if !HAVE_MAP_ANONYMOUS
    1164    close (zero_fd);
    1165  # endif
    1166    if (auxmap == (void *) -1)
    1167      goto fail2;
    1168    auxmap_start = (unsigned long) auxmap;
    1169    auxmap_end = auxmap_start + memneed;
    1170    maps = (prmap_t *) auxmap;
    1171  
    1172    if (ioctl (fd, PIOCMAP, maps) < 0)
    1173      goto fail1;
    1174  
    1175    for (mp = maps;;)
    1176      {
    1177        unsigned long start, end;
    1178        unsigned int flags;
    1179  
    1180        start = (unsigned long) mp->pr_vaddr;
    1181        end = start + mp->pr_size;
    1182        if (start == 0 && end == 0)
    1183          break;
    1184        flags = 0;
    1185        if (mp->pr_mflags & MA_READ)
    1186          flags |= VMA_PROT_READ;
    1187        if (mp->pr_mflags & MA_WRITE)
    1188          flags |= VMA_PROT_WRITE;
    1189        if (mp->pr_mflags & MA_EXEC)
    1190          flags |= VMA_PROT_EXECUTE;
    1191        mp++;
    1192        if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
    1193          {
    1194            /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
    1195               = [start,auxmap_start-1] u [auxmap_end,end-1].  */
    1196            if (start < auxmap_start)
    1197              if (callback (data, start, auxmap_start, flags))
    1198                break;
    1199            if (auxmap_end - 1 < end - 1)
    1200              if (callback (data, auxmap_end, end, flags))
    1201                break;
    1202          }
    1203        else
    1204          {
    1205            if (callback (data, start, end, flags))
    1206              break;
    1207          }
    1208      }
    1209    munmap (auxmap, memneed);
    1210    close (fd);
    1211    return 0;
    1212  
    1213   fail1:
    1214    munmap (auxmap, memneed);
    1215   fail2:
    1216    close (fd);
    1217    return -1;
    1218  
    1219  #elif defined __sun /* Solaris */
    1220  
    1221    /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
    1222       _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
    1223                                    32-bit   64-bit
    1224           _STRUCTURED_PROC = 0       32       56
    1225           _STRUCTURED_PROC = 1       96      104
    1226       Therefore, if the include files provide the newer API, prmap_t has
    1227       the bigger size, and thus you MUST use the newer API.  And if the
    1228       include files provide the older API, prmap_t has the smaller size,
    1229       and thus you MUST use the older API.  */
    1230  
    1231  # if defined PIOCNMAP && defined PIOCMAP
    1232    /* We must use the older /proc interface.  */
    1233  
    1234    size_t pagesize;
    1235    char fnamebuf[6+10+1];
    1236    char *fname;
    1237    int fd;
    1238    int nmaps;
    1239    size_t memneed;
    1240  #  if HAVE_MAP_ANONYMOUS
    1241  #   define zero_fd -1
    1242  #   define map_flags MAP_ANONYMOUS
    1243  #  else /* Solaris <= 7 */
    1244    int zero_fd;
    1245  #   define map_flags 0
    1246  #  endif
    1247    void *auxmap;
    1248    unsigned long auxmap_start;
    1249    unsigned long auxmap_end;
    1250    prmap_t* maps;
    1251    prmap_t* mp;
    1252  
    1253    pagesize = getpagesize ();
    1254  
    1255    /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
    1256    fname = fnamebuf + sizeof (fnamebuf) - 1;
    1257    *fname = '\0';
    1258    {
    1259      unsigned int value = getpid ();
    1260      do
    1261        *--fname = (value % 10) + '0';
    1262      while ((value = value / 10) > 0);
    1263    }
    1264    fname -= 6;
    1265    memcpy (fname, "/proc/", 6);
    1266  
    1267    fd = open (fname, O_RDONLY | O_CLOEXEC);
    1268    if (fd < 0)
    1269      return -1;
    1270  
    1271    if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
    1272      goto fail2;
    1273  
    1274    memneed = (nmaps + 10) * sizeof (prmap_t);
    1275    /* Allocate memneed bytes of memory.
    1276       We cannot use alloca here, because not much stack space is guaranteed.
    1277       We also cannot use malloc here, because a malloc() call may call mmap()
    1278       and thus pre-allocate available memory.
    1279       So use mmap(), and ignore the resulting VMA.  */
    1280    memneed = ((memneed - 1) / pagesize + 1) * pagesize;
    1281  #  if !HAVE_MAP_ANONYMOUS
    1282    zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
    1283    if (zero_fd < 0)
    1284      goto fail2;
    1285  #  endif
    1286    auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
    1287                            map_flags | MAP_PRIVATE, zero_fd, 0);
    1288  #  if !HAVE_MAP_ANONYMOUS
    1289    close (zero_fd);
    1290  #  endif
    1291    if (auxmap == (void *) -1)
    1292      goto fail2;
    1293    auxmap_start = (unsigned long) auxmap;
    1294    auxmap_end = auxmap_start + memneed;
    1295    maps = (prmap_t *) auxmap;
    1296  
    1297    if (ioctl (fd, PIOCMAP, maps) < 0)
    1298      goto fail1;
    1299  
    1300    for (mp = maps;;)
    1301      {
    1302        unsigned long start, end;
    1303        unsigned int flags;
    1304  
    1305        start = (unsigned long) mp->pr_vaddr;
    1306        end = start + mp->pr_size;
    1307        if (start == 0 && end == 0)
    1308          break;
    1309        flags = 0;
    1310        if (mp->pr_mflags & MA_READ)
    1311          flags |= VMA_PROT_READ;
    1312        if (mp->pr_mflags & MA_WRITE)
    1313          flags |= VMA_PROT_WRITE;
    1314        if (mp->pr_mflags & MA_EXEC)
    1315          flags |= VMA_PROT_EXECUTE;
    1316        mp++;
    1317        if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
    1318          {
    1319            /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
    1320               = [start,auxmap_start-1] u [auxmap_end,end-1].  */
    1321            if (start < auxmap_start)
    1322              if (callback (data, start, auxmap_start, flags))
    1323                break;
    1324            if (auxmap_end - 1 < end - 1)
    1325              if (callback (data, auxmap_end, end, flags))
    1326                break;
    1327          }
    1328        else
    1329          {
    1330            if (callback (data, start, end, flags))
    1331              break;
    1332          }
    1333      }
    1334    munmap (auxmap, memneed);
    1335    close (fd);
    1336    return 0;
    1337  
    1338   fail1:
    1339    munmap (auxmap, memneed);
    1340   fail2:
    1341    close (fd);
    1342    return -1;
    1343  
    1344  # else
    1345    /* We must use the newer /proc interface.
    1346       Documentation:
    1347       https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
    1348       The contents of /proc/<pid>/map consists of records of type
    1349       prmap_t.  These are different in 32-bit and 64-bit processes,
    1350       but here we are fortunately accessing only the current process.  */
    1351  
    1352    size_t pagesize;
    1353    char fnamebuf[6+10+4+1];
    1354    char *fname;
    1355    int fd;
    1356    int nmaps;
    1357    size_t memneed;
    1358  #  if HAVE_MAP_ANONYMOUS
    1359  #   define zero_fd -1
    1360  #   define map_flags MAP_ANONYMOUS
    1361  #  else /* Solaris <= 7 */
    1362    int zero_fd;
    1363  #   define map_flags 0
    1364  #  endif
    1365    void *auxmap;
    1366    unsigned long auxmap_start;
    1367    unsigned long auxmap_end;
    1368    prmap_t* maps;
    1369    prmap_t* maps_end;
    1370    prmap_t* mp;
    1371  
    1372    pagesize = getpagesize ();
    1373  
    1374    /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()).  */
    1375    fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
    1376    memcpy (fname, "/map", 4 + 1);
    1377    {
    1378      unsigned int value = getpid ();
    1379      do
    1380        *--fname = (value % 10) + '0';
    1381      while ((value = value / 10) > 0);
    1382    }
    1383    fname -= 6;
    1384    memcpy (fname, "/proc/", 6);
    1385  
    1386    fd = open (fname, O_RDONLY | O_CLOEXEC);
    1387    if (fd < 0)
    1388      return -1;
    1389  
    1390    {
    1391      struct stat statbuf;
    1392      if (fstat (fd, &statbuf) < 0)
    1393        goto fail2;
    1394      nmaps = statbuf.st_size / sizeof (prmap_t);
    1395    }
    1396  
    1397    memneed = (nmaps + 10) * sizeof (prmap_t);
    1398    /* Allocate memneed bytes of memory.
    1399       We cannot use alloca here, because not much stack space is guaranteed.
    1400       We also cannot use malloc here, because a malloc() call may call mmap()
    1401       and thus pre-allocate available memory.
    1402       So use mmap(), and ignore the resulting VMA.  */
    1403    memneed = ((memneed - 1) / pagesize + 1) * pagesize;
    1404  #  if !HAVE_MAP_ANONYMOUS
    1405    zero_fd = open ("/dev/zero", O_RDONLY | O_CLOEXEC, 0644);
    1406    if (zero_fd < 0)
    1407      goto fail2;
    1408  #  endif
    1409    auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
    1410                            map_flags | MAP_PRIVATE, zero_fd, 0);
    1411  #  if !HAVE_MAP_ANONYMOUS
    1412    close (zero_fd);
    1413  #  endif
    1414    if (auxmap == (void *) -1)
    1415      goto fail2;
    1416    auxmap_start = (unsigned long) auxmap;
    1417    auxmap_end = auxmap_start + memneed;
    1418    maps = (prmap_t *) auxmap;
    1419  
    1420    /* Read up to memneed bytes from fd into maps.  */
    1421    {
    1422      size_t remaining = memneed;
    1423      size_t total_read = 0;
    1424      char *ptr = (char *) maps;
    1425  
    1426      do
    1427        {
    1428          size_t nread = read (fd, ptr, remaining);
    1429          if (nread == (size_t)-1)
    1430            {
    1431              if (errno == EINTR)
    1432                continue;
    1433              goto fail1;
    1434            }
    1435          if (nread == 0)
    1436            /* EOF */
    1437            break;
    1438          total_read += nread;
    1439          ptr += nread;
    1440          remaining -= nread;
    1441        }
    1442      while (remaining > 0);
    1443  
    1444      nmaps = (memneed - remaining) / sizeof (prmap_t);
    1445      maps_end = maps + nmaps;
    1446    }
    1447  
    1448    for (mp = maps; mp < maps_end; mp++)
    1449      {
    1450        unsigned long start, end;
    1451        unsigned int flags;
    1452  
    1453        start = (unsigned long) mp->pr_vaddr;
    1454        end = start + mp->pr_size;
    1455        flags = 0;
    1456        if (mp->pr_mflags & MA_READ)
    1457          flags |= VMA_PROT_READ;
    1458        if (mp->pr_mflags & MA_WRITE)
    1459          flags |= VMA_PROT_WRITE;
    1460        if (mp->pr_mflags & MA_EXEC)
    1461          flags |= VMA_PROT_EXECUTE;
    1462        if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
    1463          {
    1464            /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
    1465               = [start,auxmap_start-1] u [auxmap_end,end-1].  */
    1466            if (start < auxmap_start)
    1467              if (callback (data, start, auxmap_start, flags))
    1468                break;
    1469            if (auxmap_end - 1 < end - 1)
    1470              if (callback (data, auxmap_end, end, flags))
    1471                break;
    1472          }
    1473        else
    1474          {
    1475            if (callback (data, start, end, flags))
    1476              break;
    1477          }
    1478      }
    1479    munmap (auxmap, memneed);
    1480    close (fd);
    1481    return 0;
    1482  
    1483   fail1:
    1484    munmap (auxmap, memneed);
    1485   fail2:
    1486    close (fd);
    1487    return -1;
    1488  
    1489  # endif
    1490  
    1491  #elif HAVE_PSTAT_GETPROCVM /* HP-UX */
    1492  
    1493    unsigned long pagesize = getpagesize ();
    1494    int i;
    1495  
    1496    for (i = 0; ; i++)
    1497      {
    1498        struct pst_vm_status info;
    1499        int ret = pstat_getprocvm (&info, sizeof (info), 0, i);
    1500        if (ret < 0)
    1501          return -1;
    1502        if (ret == 0)
    1503          break;
    1504        {
    1505          unsigned long start = info.pst_vaddr;
    1506          unsigned long end = start + info.pst_length * pagesize;
    1507          unsigned int flags = 0;
    1508          if (info.pst_permission & PS_PROT_READ)
    1509            flags |= VMA_PROT_READ;
    1510          if (info.pst_permission & PS_PROT_WRITE)
    1511            flags |= VMA_PROT_WRITE;
    1512          if (info.pst_permission & PS_PROT_EXECUTE)
    1513            flags |= VMA_PROT_EXECUTE;
    1514  
    1515          if (callback (data, start, end, flags))
    1516            break;
    1517        }
    1518      }
    1519  
    1520  #elif defined __APPLE__ && defined __MACH__ /* Mac OS X */
    1521  
    1522    task_t task = mach_task_self ();
    1523    vm_address_t address;
    1524    vm_size_t size;
    1525  
    1526    for (address = VM_MIN_ADDRESS;; address += size)
    1527      {
    1528        int more;
    1529        mach_port_t object_name;
    1530        unsigned int flags;
    1531        /* In Mac OS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
    1532           32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
    1533           mach_vm_address_t and mach_vm_size_t are always 64 bits large.
    1534           Mac OS X 10.5 has three vm_region like methods:
    1535             - vm_region. It has arguments that depend on whether the current
    1536               process is 32-bit or 64-bit. When linking dynamically, this
    1537               function exists only in 32-bit processes. Therefore we use it only
    1538               in 32-bit processes.
    1539             - vm_region_64. It has arguments that depend on whether the current
    1540               process is 32-bit or 64-bit. It interprets a flavor
    1541               VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
    1542               dangerous since 'struct vm_region_basic_info_64' is larger than
    1543               'struct vm_region_basic_info'; therefore let's write
    1544               VM_REGION_BASIC_INFO_64 explicitly.
    1545             - mach_vm_region. It has arguments that are 64-bit always. This
    1546               function is useful when you want to access the VM of a process
    1547               other than the current process.
    1548           In 64-bit processes, we could use vm_region_64 or mach_vm_region.
    1549           I choose vm_region_64 because it uses the same types as vm_region,
    1550           resulting in less conditional code.  */
    1551  # if defined __aarch64__ || defined __ppc64__ || defined __x86_64__
    1552        struct vm_region_basic_info_64 info;
    1553        mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
    1554  
    1555        more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
    1556                              (vm_region_info_t)&info, &info_count, &object_name)
    1557                == KERN_SUCCESS);
    1558  # else
    1559        struct vm_region_basic_info info;
    1560        mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
    1561  
    1562        more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
    1563                           (vm_region_info_t)&info, &info_count, &object_name)
    1564                == KERN_SUCCESS);
    1565  # endif
    1566        if (object_name != MACH_PORT_NULL)
    1567          mach_port_deallocate (mach_task_self (), object_name);
    1568        if (!more)
    1569          break;
    1570        flags = 0;
    1571        if (info.protection & VM_PROT_READ)
    1572          flags |= VMA_PROT_READ;
    1573        if (info.protection & VM_PROT_WRITE)
    1574          flags |= VMA_PROT_WRITE;
    1575        if (info.protection & VM_PROT_EXECUTE)
    1576          flags |= VMA_PROT_EXECUTE;
    1577        if (callback (data, address, address + size, flags))
    1578          break;
    1579      }
    1580    return 0;
    1581  
    1582  #elif defined __GNU__ /* GNU/Hurd */
    1583  
    1584    /* The Hurd has a /proc/self/maps that looks like the Linux one, but it
    1585       lacks the VMAs created through anonymous mmap.  Therefore use the Mach
    1586       API.
    1587       Documentation:
    1588       https://www.gnu.org/software/hurd/gnumach-doc/Memory-Attributes.html */
    1589  
    1590    task_t task = mach_task_self ();
    1591    vm_address_t address;
    1592    vm_size_t size;
    1593  
    1594    for (address = 0;; address += size)
    1595      {
    1596        vm_prot_t protection;
    1597        vm_prot_t max_protection;
    1598        vm_inherit_t inheritance;
    1599        boolean_t shared;
    1600        memory_object_name_t object_name;
    1601        vm_offset_t offset;
    1602        unsigned int flags;
    1603  
    1604        if (!(vm_region (task, &address, &size, &protection, &max_protection,
    1605                           &inheritance, &shared, &object_name, &offset)
    1606              == KERN_SUCCESS))
    1607          break;
    1608        mach_port_deallocate (task, object_name);
    1609        flags = 0;
    1610        if (protection & VM_PROT_READ)
    1611          flags |= VMA_PROT_READ;
    1612        if (protection & VM_PROT_WRITE)
    1613          flags |= VMA_PROT_WRITE;
    1614        if (protection & VM_PROT_EXECUTE)
    1615          flags |= VMA_PROT_EXECUTE;
    1616        if (callback (data, address, address + size, flags))
    1617          break;
    1618      }
    1619    return 0;
    1620  
    1621  #elif defined _WIN32 || defined __CYGWIN__
    1622    /* Windows platform.  Use the native Windows API.  */
    1623  
    1624    MEMORY_BASIC_INFORMATION info;
    1625    uintptr_t address = 0;
    1626  
    1627    while (VirtualQuery ((void*)address, &info, sizeof(info)) == sizeof(info))
    1628      {
    1629        if (info.State != MEM_FREE)
    1630          /* Ignore areas where info.State has the value MEM_RESERVE or,
    1631             equivalently, info.Protect has the undocumented value 0.
    1632             This is needed, so that on Cygwin, areas used by malloc() are
    1633             distinguished from areas reserved for future malloc().  */
    1634          if (info.State != MEM_RESERVE)
    1635            {
    1636              uintptr_t start, end;
    1637              unsigned int flags;
    1638  
    1639              start = (uintptr_t)info.BaseAddress;
    1640              end = start + info.RegionSize;
    1641              switch (info.Protect & ~(PAGE_GUARD|PAGE_NOCACHE))
    1642                {
    1643                case PAGE_READONLY:
    1644                  flags = VMA_PROT_READ;
    1645                  break;
    1646                case PAGE_READWRITE:
    1647                case PAGE_WRITECOPY:
    1648                  flags = VMA_PROT_READ | VMA_PROT_WRITE;
    1649                  break;
    1650                case PAGE_EXECUTE:
    1651                  flags = VMA_PROT_EXECUTE;
    1652                  break;
    1653                case PAGE_EXECUTE_READ:
    1654                  flags = VMA_PROT_READ | VMA_PROT_EXECUTE;
    1655                  break;
    1656                case PAGE_EXECUTE_READWRITE:
    1657                case PAGE_EXECUTE_WRITECOPY:
    1658                  flags = VMA_PROT_READ | VMA_PROT_WRITE | VMA_PROT_EXECUTE;
    1659                  break;
    1660                case PAGE_NOACCESS:
    1661                default:
    1662                  flags = 0;
    1663                  break;
    1664                }
    1665  
    1666              if (callback (data, start, end, flags))
    1667                break;
    1668            }
    1669        address = (uintptr_t)info.BaseAddress + info.RegionSize;
    1670      }
    1671    return 0;
    1672  
    1673  #elif defined __BEOS__ || defined __HAIKU__
    1674    /* Use the BeOS specific API.  */
    1675  
    1676    area_info info;
    1677    ssize_t cookie;
    1678  
    1679    cookie = 0;
    1680    while (get_next_area_info (0, &cookie, &info) == B_OK)
    1681      {
    1682        unsigned long start, end;
    1683        unsigned int flags;
    1684  
    1685        start = (unsigned long) info.address;
    1686        end = start + info.size;
    1687        flags = 0;
    1688        if (info.protection & B_READ_AREA)
    1689          flags |= VMA_PROT_READ | VMA_PROT_EXECUTE;
    1690        if (info.protection & B_WRITE_AREA)
    1691          flags |= VMA_PROT_WRITE;
    1692  
    1693        if (callback (data, start, end, flags))
    1694          break;
    1695      }
    1696    return 0;
    1697  
    1698  #elif HAVE_MQUERY /* OpenBSD */
    1699  
    1700  # if defined __OpenBSD__
    1701    /* Try sysctl() first.  It is more efficient than the mquery() loop below
    1702       and also provides the flags.  */
    1703    {
    1704      int retval = vma_iterate_bsd (callback, data);
    1705      if (retval == 0)
    1706        return 0;
    1707    }
    1708  # endif
    1709  
    1710    {
    1711      uintptr_t pagesize;
    1712      uintptr_t address;
    1713      int /*bool*/ address_known_mapped;
    1714  
    1715      pagesize = getpagesize ();
    1716      /* Avoid calling mquery with a NULL first argument, because this argument
    1717         value has a specific meaning.  We know the NULL page is unmapped.  */
    1718      address = pagesize;
    1719      address_known_mapped = 0;
    1720      for (;;)
    1721        {
    1722          /* Test whether the page at address is mapped.  */
    1723          if (address_known_mapped
    1724              || mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0)
    1725                 == (void *) -1)
    1726            {
    1727              /* The page at address is mapped.
    1728                 This is the start of an interval.  */
    1729              uintptr_t start = address;
    1730              uintptr_t end;
    1731  
    1732              /* Find the end of the interval.  */
    1733              end = (uintptr_t) mquery ((void *) address, pagesize, 0, 0, -1, 0);
    1734              if (end == (uintptr_t) (void *) -1)
    1735                end = 0; /* wrap around */
    1736              address = end;
    1737  
    1738              /* It's too complicated to find out about the flags.
    1739                 Just pass 0.  */
    1740              if (callback (data, start, end, 0))
    1741                break;
    1742  
    1743              if (address < pagesize) /* wrap around? */
    1744                break;
    1745            }
    1746          /* Here we know that the page at address is unmapped.  */
    1747          {
    1748            uintptr_t query_size = pagesize;
    1749  
    1750            address += pagesize;
    1751  
    1752            /* Query larger and larger blocks, to get through the unmapped address
    1753               range with few mquery() calls.  */
    1754            for (;;)
    1755              {
    1756                if (2 * query_size > query_size)
    1757                  query_size = 2 * query_size;
    1758                if (address + query_size - 1 < query_size) /* wrap around? */
    1759                  {
    1760                    address_known_mapped = 0;
    1761                    break;
    1762                  }
    1763                if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
    1764                    == (void *) -1)
    1765                  {
    1766                    /* Not all the interval [address .. address + query_size - 1]
    1767                       is unmapped.  */
    1768                    address_known_mapped = (query_size == pagesize);
    1769                    break;
    1770                  }
    1771                /* The interval [address .. address + query_size - 1] is
    1772                   unmapped.  */
    1773                address += query_size;
    1774              }
    1775            /* Reduce the query size again, to determine the precise size of the
    1776               unmapped interval that starts at address.  */
    1777            while (query_size > pagesize)
    1778              {
    1779                query_size = query_size / 2;
    1780                if (address + query_size - 1 >= query_size)
    1781                  {
    1782                    if (mquery ((void *) address, query_size, 0, MAP_FIXED, -1, 0)
    1783                        != (void *) -1)
    1784                      {
    1785                        /* The interval [address .. address + query_size - 1] is
    1786                           unmapped.  */
    1787                        address += query_size;
    1788                        address_known_mapped = 0;
    1789                      }
    1790                    else
    1791                      address_known_mapped = (query_size == pagesize);
    1792                  }
    1793              }
    1794            /* Here again query_size = pagesize, and
    1795               either address + pagesize - 1 < pagesize, or
    1796               mquery ((void *) address, pagesize, 0, MAP_FIXED, -1, 0) fails.
    1797               So, the unmapped area ends at address.  */
    1798          }
    1799          if (address + pagesize - 1 < pagesize) /* wrap around? */
    1800            break;
    1801        }
    1802      return 0;
    1803    }
    1804  
    1805  #else
    1806  
    1807    /* Not implemented.  */
    1808    return -1;
    1809  
    1810  #endif
    1811  }
    1812  
    1813  
    1814  #ifdef TEST
    1815  
    1816  #include <stdio.h>
    1817  
    1818  /* Output the VMAs of the current process in a format similar to the Linux
    1819     /proc/$pid/maps file.  */
    1820  
    1821  static int
    1822  vma_iterate_callback (void *data, uintptr_t start, uintptr_t end,
    1823                        unsigned int flags)
    1824  {
    1825    printf ("%08lx-%08lx %c%c%c\n",
    1826            (unsigned long) start, (unsigned long) end,
    1827            flags & VMA_PROT_READ ? 'r' : '-',
    1828            flags & VMA_PROT_WRITE ? 'w' : '-',
    1829            flags & VMA_PROT_EXECUTE ? 'x' : '-');
    1830    return 0;
    1831  }
    1832  
    1833  int
    1834  main ()
    1835  {
    1836    vma_iterate (vma_iterate_callback, NULL);
    1837  
    1838    /* Let the user interactively look at the /proc file system.  */
    1839    sleep (10);
    1840  
    1841    return 0;
    1842  }
    1843  
    1844  /*
    1845   * Local Variables:
    1846   * compile-command: "gcc -ggdb -DTEST -Wall -I.. vma-iter.c"
    1847   * End:
    1848   */
    1849  
    1850  #endif /* TEST */