1 /* Determine the virtual memory area of a given address.
2 Copyright (C) 2002-2021 Bruno Haible <bruno@clisp.org>
3 Copyright (C) 2003-2006 Paolo Bonzini <bonzini@gnu.org>
4
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
17
18 #include <config.h>
19
20 /* Specification. */
21 #include "stackvma.h"
22
23 #include <stdio.h>
24 #include <stdlib.h>
25
26 /* =========================== stackvma-simple.c =========================== */
27
28 #if defined __linux__ || defined __ANDROID__ \
29 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
30 || defined __NetBSD__ \
31 || (defined __APPLE__ && defined __MACH__) \
32 || defined __sgi || defined __sun \
33 || defined __CYGWIN__ || defined __HAIKU__
34
35 /* This file contains the proximity test function for the simple cases, where
36 the OS has an API for enumerating the mapped ranges of virtual memory. */
37
38 # if STACK_DIRECTION < 0
39
40 /* Info about the gap between this VMA and the previous one.
41 addr must be < vma->start. */
42 static int
43 simple_is_near_this (uintptr_t addr, struct vma_struct *vma)
44 {
45 return (vma->start - addr <= (vma->start - vma->prev_end) / 2);
46 }
47
48 # endif
49 # if STACK_DIRECTION > 0
50
51 /* Info about the gap between this VMA and the next one.
52 addr must be > vma->end - 1. */
53 static int
54 simple_is_near_this (uintptr_t addr, struct vma_struct *vma)
55 {
56 return (addr - vma->end < (vma->next_start - vma->end) / 2);
57 }
58
59 # endif
60
61 #endif
62
63 /* =========================== stackvma-rofile.c =========================== */
64 /* Buffered read-only streams. */
65
66 #if defined __linux__ || defined __ANDROID__ \
67 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
68 || defined __NetBSD__ \
69 || defined __CYGWIN__
70
71 # include <errno.h> /* errno, EINTR */
72 # include <fcntl.h> /* open, O_RDONLY */
73 # include <stddef.h> /* size_t */
74 # include <unistd.h> /* getpagesize, lseek, read, close */
75 # include <sys/types.h>
76 # include <sys/mman.h> /* mmap, munmap */
77
78 # if defined __linux__ || defined __ANDROID__
79 # include <limits.h> /* PATH_MAX */
80 # endif
81
82 /* Buffered read-only streams.
83 We cannot use <stdio.h> here, because fopen() calls malloc(), and a malloc()
84 call may have been interrupted.
85 Also, we cannot use multiple read() calls, because if the buffer size is
86 smaller than the file's contents:
87 - On NetBSD, the second read() call would return 0, thus making the file
88 appear truncated.
89 - On DragonFly BSD, the first read() call would fail with errno = EFBIG.
90 - On all platforms, if some other thread is doing memory allocations or
91 deallocations between two read() calls, there is a high risk that the
92 result of these two read() calls don't fit together, and as a
93 consequence we will parse gargage and either omit some VMAs or return
94 VMAs with nonsensical addresses.
95 So use mmap(), and ignore the resulting VMA.
96 The stack-allocated buffer cannot be too large, because this can be called
97 when we are in the context of an alternate stack of just SIGSTKSZ bytes. */
98
99 # if defined __linux__ || defined __ANDROID__
100 /* On Linux, if the file does not entirely fit into the buffer, the read()
101 function stops before the line that would come out truncated. The
102 maximum size of such a line is 73 + PATH_MAX bytes. To be sure that we
103 have read everything, we must verify that at least that many bytes are
104 left when read() returned. */
105 # define MIN_LEFTOVER (73 + PATH_MAX)
106 # else
107 # define MIN_LEFTOVER 1
108 # endif
109
110 # if MIN_LEFTOVER < 1024
111 # define STACK_ALLOCATED_BUFFER_SIZE 1024
112 # else
113 /* There is no point in using a stack-allocated buffer if it is too small
114 anyway. */
115 # define STACK_ALLOCATED_BUFFER_SIZE 1
116 # endif
117
118 struct rofile
119 {
120 size_t position;
121 size_t filled;
122 int eof_seen;
123 /* These fields deal with allocation of the buffer. */
124 char *buffer;
125 char *auxmap;
126 size_t auxmap_length;
127 uintptr_t auxmap_start;
128 uintptr_t auxmap_end;
129 char stack_allocated_buffer[STACK_ALLOCATED_BUFFER_SIZE];
130 };
131
132 /* Open a read-only file stream. */
133 static int
134 rof_open (struct rofile *rof, const char *filename)
135 {
136 int fd;
137 uintptr_t pagesize;
138 size_t size;
139
140 fd = open (filename, O_RDONLY);
141 if (fd < 0)
142 return -1;
143 rof->position = 0;
144 rof->eof_seen = 0;
145 /* Try the static buffer first. */
146 pagesize = 0;
147 rof->buffer = rof->stack_allocated_buffer;
148 size = sizeof (rof->stack_allocated_buffer);
149 rof->auxmap = NULL;
150 rof->auxmap_start = 0;
151 rof->auxmap_end = 0;
152 for (;;)
153 {
154 /* Attempt to read the contents in a single system call. */
155 if (size > MIN_LEFTOVER)
156 {
157 int n = read (fd, rof->buffer, size);
158 if (n < 0 && errno == EINTR)
159 goto retry;
160 # if defined __DragonFly__
161 if (!(n < 0 && errno == EFBIG))
162 # endif
163 {
164 if (n <= 0)
165 /* Empty file. */
166 goto fail1;
167 if (n + MIN_LEFTOVER <= size)
168 {
169 /* The buffer was sufficiently large. */
170 rof->filled = n;
171 # if defined __linux__ || defined __ANDROID__
172 /* On Linux, the read() call may stop even if the buffer was
173 large enough. We need the equivalent of full_read(). */
174 for (;;)
175 {
176 n = read (fd, rof->buffer + rof->filled, size - rof->filled);
177 if (n < 0 && errno == EINTR)
178 goto retry;
179 if (n < 0)
180 /* Some error. */
181 goto fail1;
182 if (n + MIN_LEFTOVER > size - rof->filled)
183 /* Allocate a larger buffer. */
184 break;
185 if (n == 0)
186 {
187 /* Reached the end of file. */
188 close (fd);
189 return 0;
190 }
191 rof->filled += n;
192 }
193 # else
194 close (fd);
195 return 0;
196 # endif
197 }
198 }
199 }
200 /* Allocate a larger buffer. */
201 if (pagesize == 0)
202 {
203 pagesize = getpagesize ();
204 size = pagesize;
205 while (size <= MIN_LEFTOVER)
206 size = 2 * size;
207 }
208 else
209 {
210 size = 2 * size;
211 if (size == 0)
212 /* Wraparound. */
213 goto fail1;
214 if (rof->auxmap != NULL)
215 munmap (rof->auxmap, rof->auxmap_length);
216 }
217 rof->auxmap = (void *) mmap ((void *) 0, size, PROT_READ | PROT_WRITE,
218 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
219 if (rof->auxmap == (void *) -1)
220 {
221 close (fd);
222 return -1;
223 }
224 rof->auxmap_length = size;
225 rof->auxmap_start = (uintptr_t) rof->auxmap;
226 rof->auxmap_end = rof->auxmap_start + size;
227 rof->buffer = (char *) rof->auxmap;
228 retry:
229 /* Restart. */
230 if (lseek (fd, 0, SEEK_SET) < 0)
231 {
232 close (fd);
233 fd = open (filename, O_RDONLY);
234 if (fd < 0)
235 goto fail2;
236 }
237 }
238 fail1:
239 close (fd);
240 fail2:
241 if (rof->auxmap != NULL)
242 munmap (rof->auxmap, rof->auxmap_length);
243 return -1;
244 }
245
246 /* Return the next byte from a read-only file stream without consuming it,
247 or -1 at EOF. */
248 static int
249 rof_peekchar (struct rofile *rof)
250 {
251 if (rof->position == rof->filled)
252 {
253 rof->eof_seen = 1;
254 return -1;
255 }
256 return (unsigned char) rof->buffer[rof->position];
257 }
258
259 /* Return the next byte from a read-only file stream, or -1 at EOF. */
260 static int
261 rof_getchar (struct rofile *rof)
262 {
263 int c = rof_peekchar (rof);
264 if (c >= 0)
265 rof->position++;
266 return c;
267 }
268
269 /* Parse an unsigned hexadecimal number from a read-only file stream. */
270 static int
271 rof_scanf_lx (struct rofile *rof, uintptr_t *valuep)
272 {
273 uintptr_t value = 0;
274 unsigned int numdigits = 0;
275 for (;;)
276 {
277 int c = rof_peekchar (rof);
278 if (c >= '0' && c <= '9')
279 value = (value << 4) + (c - '0');
280 else if (c >= 'A' && c <= 'F')
281 value = (value << 4) + (c - 'A' + 10);
282 else if (c >= 'a' && c <= 'f')
283 value = (value << 4) + (c - 'a' + 10);
284 else
285 break;
286 rof_getchar (rof);
287 numdigits++;
288 }
289 if (numdigits == 0)
290 return -1;
291 *valuep = value;
292 return 0;
293 }
294
295 /* Close a read-only file stream. */
296 static void
297 rof_close (struct rofile *rof)
298 {
299 if (rof->auxmap != NULL)
300 munmap (rof->auxmap, rof->auxmap_length);
301 }
302
303 #endif
304
305 /* ========================== stackvma-vma-iter.c ========================== */
306 /* Iterate through the virtual memory areas of the current process,
307 by reading from the /proc file system. */
308
309 /* This code is a simplied copy (no handling of protection flags) of the
310 code in gnulib's lib/vma-iter.c. */
311
312 #if defined __linux__ || defined __ANDROID__ \
313 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
314 || defined __NetBSD__ \
315 || defined __CYGWIN__
316
317 /* Forward declarations. */
318 struct callback_locals;
319 static int callback (struct callback_locals *locals, uintptr_t start, uintptr_t end);
320
321 # if defined __linux__ || defined __ANDROID__ || (defined __FreeBSD_kernel__ && !defined __FreeBSD__) || defined __CYGWIN__
322 /* GNU/kFreeBSD mounts /proc as linprocfs, which looks like a Linux /proc
323 file system. */
324
325 static int
326 vma_iterate_proc (struct callback_locals *locals)
327 {
328 struct rofile rof;
329
330 /* Open the current process' maps file. It describes one VMA per line. */
331 if (rof_open (&rof, "/proc/self/maps") >= 0)
332 {
333 uintptr_t auxmap_start = rof.auxmap_start;
334 uintptr_t auxmap_end = rof.auxmap_end;
335
336 for (;;)
337 {
338 uintptr_t start, end;
339 int c;
340
341 /* Parse one line. First start and end. */
342 if (!(rof_scanf_lx (&rof, &start) >= 0
343 && rof_getchar (&rof) == '-'
344 && rof_scanf_lx (&rof, &end) >= 0))
345 break;
346 while (c = rof_getchar (&rof), c != -1 && c != '\n')
347 ;
348
349 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
350 {
351 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
352 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
353 if (start < auxmap_start)
354 if (callback (locals, start, auxmap_start))
355 break;
356 if (auxmap_end - 1 < end - 1)
357 if (callback (locals, auxmap_end, end))
358 break;
359 }
360 else
361 {
362 if (callback (locals, start, end))
363 break;
364 }
365 }
366 rof_close (&rof);
367 return 0;
368 }
369
370 return -1;
371 }
372
373 # elif defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__
374
375 static int
376 vma_iterate_proc (struct callback_locals *locals)
377 {
378 struct rofile rof;
379
380 /* Open the current process' maps file. It describes one VMA per line.
381 On FreeBSD:
382 Cf. <https://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?annotate=HEAD>
383 On NetBSD, there are two such files:
384 - /proc/curproc/map in near-FreeBSD syntax,
385 - /proc/curproc/maps in Linux syntax.
386 Cf. <http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/miscfs/procfs/procfs_map.c?rev=HEAD> */
387 if (rof_open (&rof, "/proc/curproc/map") >= 0)
388 {
389 uintptr_t auxmap_start = rof.auxmap_start;
390 uintptr_t auxmap_end = rof.auxmap_end;
391
392 for (;;)
393 {
394 uintptr_t start, end;
395 int c;
396
397 /* Parse one line. First start. */
398 if (!(rof_getchar (&rof) == '0'
399 && rof_getchar (&rof) == 'x'
400 && rof_scanf_lx (&rof, &start) >= 0))
401 break;
402 while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
403 rof_getchar (&rof);
404 /* Then end. */
405 if (!(rof_getchar (&rof) == '0'
406 && rof_getchar (&rof) == 'x'
407 && rof_scanf_lx (&rof, &end) >= 0))
408 break;
409 while (c = rof_getchar (&rof), c != -1 && c != '\n')
410 ;
411
412 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
413 {
414 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
415 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
416 if (start < auxmap_start)
417 if (callback (locals, start, auxmap_start))
418 break;
419 if (auxmap_end - 1 < end - 1)
420 if (callback (locals, auxmap_end, end))
421 break;
422 }
423 else
424 {
425 if (callback (locals, start, end))
426 break;
427 }
428 }
429 rof_close (&rof);
430 return 0;
431 }
432
433 return -1;
434 }
435
436 # endif
437
438 # if (defined __FreeBSD_kernel__ || defined __FreeBSD__) && defined KERN_PROC_VMMAP /* FreeBSD >= 7.1 */
439
440 # include <sys/user.h> /* struct kinfo_vmentry */
441 # include <sys/sysctl.h> /* sysctl */
442
443 static int
444 vma_iterate_bsd (struct callback_locals *locals)
445 {
446 /* Documentation: https://www.freebsd.org/cgi/man.cgi?sysctl(3) */
447 int info_path[] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid () };
448 size_t len;
449 size_t pagesize;
450 size_t memneed;
451 void *auxmap;
452 unsigned long auxmap_start;
453 unsigned long auxmap_end;
454 char *mem;
455 char *p;
456 char *p_end;
457
458 len = 0;
459 if (sysctl (info_path, 4, NULL, &len, NULL, 0) < 0)
460 return -1;
461 /* Allow for small variations over time. In a multithreaded program
462 new VMAs can be allocated at any moment. */
463 len = 2 * len + 200;
464 /* Allocate memneed bytes of memory.
465 We cannot use alloca here, because not much stack space is guaranteed.
466 We also cannot use malloc here, because a malloc() call may call mmap()
467 and thus pre-allocate available memory.
468 So use mmap(), and ignore the resulting VMA. */
469 pagesize = getpagesize ();
470 memneed = len;
471 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
472 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
473 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
474 if (auxmap == (void *) -1)
475 return -1;
476 auxmap_start = (unsigned long) auxmap;
477 auxmap_end = auxmap_start + memneed;
478 mem = (char *) auxmap;
479 if (sysctl (info_path, 4, mem, &len, NULL, 0) < 0)
480 {
481 munmap (auxmap, memneed);
482 return -1;
483 }
484 p = mem;
485 p_end = mem + len;
486 while (p < p_end)
487 {
488 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
489 unsigned long start = kve->kve_start;
490 unsigned long end = kve->kve_end;
491 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
492 {
493 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
494 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
495 if (start < auxmap_start)
496 if (callback (locals, start, auxmap_start))
497 break;
498 if (auxmap_end - 1 < end - 1)
499 if (callback (locals, auxmap_end, end))
500 break;
501 }
502 else
503 {
504 if (callback (locals, start, end))
505 break;
506 }
507 p += kve->kve_structsize;
508 }
509 munmap (auxmap, memneed);
510 return 0;
511 }
512
513 # else
514
515 # define vma_iterate_bsd(locals) (-1)
516
517 # endif
518
519
520 /* Iterate over the virtual memory areas of the current process.
521 If such iteration is supported, the callback is called once for every
522 virtual memory area, in ascending order, with the following arguments:
523 - LOCALS is the same argument as passed to vma_iterate.
524 - START is the address of the first byte in the area, page-aligned.
525 - END is the address of the last byte in the area plus 1, page-aligned.
526 Note that it may be 0 for the last area in the address space.
527 If the callback returns 0, the iteration continues. If it returns 1,
528 the iteration terminates prematurely.
529 This function may open file descriptors, but does not call malloc().
530 Return 0 if all went well, or -1 in case of error. */
531 static int
532 vma_iterate (struct callback_locals *locals)
533 {
534 # if defined __FreeBSD__
535 /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
536 function vma_iterate_proc does not return the virtual memory areas that
537 were created by anonymous mmap. See
538 <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
539 So use vma_iterate_proc only as a fallback. */
540 int retval = vma_iterate_bsd (locals);
541 if (retval == 0)
542 return 0;
543
544 return vma_iterate_proc (locals);
545 # else
546 /* On the other platforms, try the /proc approach first, and the sysctl()
547 as a fallback. */
548 int retval = vma_iterate_proc (locals);
549 if (retval == 0)
550 return 0;
551
552 return vma_iterate_bsd (locals);
553 # endif
554 }
555
556 #endif
557
558 /* =========================== stackvma-mincore.c =========================== */
559
560 /* mincore() is a system call that allows to inquire the status of a
561 range of pages of virtual memory. In particular, it allows to inquire
562 whether a page is mapped at all (except on Mac OS X, where mincore
563 returns 0 even for unmapped addresses).
564 As of 2006, mincore() is supported by: possible bits:
565 - Linux, since Linux 2.4 and glibc 2.2, 1
566 - Solaris, since Solaris 9, 1
567 - MacOS X, since MacOS X 10.3 (at least), 1
568 - FreeBSD, since FreeBSD 6.0, MINCORE_{INCORE,REFERENCED,MODIFIED}
569 - NetBSD, since NetBSD 3.0 (at least), 1
570 - OpenBSD, since OpenBSD 2.6 (at least), 1
571 - AIX, since AIX 5.3, 1
572 As of 2019, also on
573 - Hurd.
574 However, while the API allows to easily determine the bounds of mapped
575 virtual memory, it does not make it easy to find the bounds of _unmapped_
576 virtual memory ranges. We try to work around this, but it may still be
577 slow. */
578
579 #if defined __linux__ || defined __ANDROID__ \
580 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
581 || defined __NetBSD__ /* || defined __OpenBSD__ */ \
582 /* || (defined __APPLE__ && defined __MACH__) */ \
583 || defined _AIX || defined __sun
584
585 # include <unistd.h> /* getpagesize, mincore */
586 # include <sys/types.h>
587 # include <sys/mman.h> /* mincore */
588
589 /* The AIX declaration of mincore() uses 'caddr_t', whereas the other platforms
590 use 'void *'. */
591 # ifdef _AIX
592 typedef caddr_t MINCORE_ADDR_T;
593 # else
594 typedef void* MINCORE_ADDR_T;
595 # endif
596
597 /* The glibc and musl declaration of mincore() uses 'unsigned char *', whereas
598 the BSD declaration uses 'char *'. */
599 # if __GLIBC__ >= 2 || defined __linux__ || defined __ANDROID__
600 typedef unsigned char pageinfo_t;
601 # else
602 typedef char pageinfo_t;
603 # endif
604
605 /* Cache for getpagesize(). */
606 static uintptr_t pagesize;
607
608 /* Initialize pagesize. */
609 static void
610 init_pagesize (void)
611 {
612 pagesize = getpagesize ();
613 }
614
615 /* Test whether the page starting at ADDR is among the address range.
616 ADDR must be a multiple of pagesize. */
617 static int
618 is_mapped (uintptr_t addr)
619 {
620 pageinfo_t vec[1];
621 return mincore ((MINCORE_ADDR_T) addr, pagesize, vec) >= 0;
622 }
623
624 /* Assuming that the page starting at ADDR is among the address range,
625 return the start of its virtual memory range.
626 ADDR must be a multiple of pagesize. */
627 static uintptr_t
628 mapped_range_start (uintptr_t addr)
629 {
630 /* Use a moderately sized VEC here, small enough that it fits on the stack
631 (without requiring malloc). */
632 pageinfo_t vec[1024];
633 uintptr_t stepsize = sizeof (vec);
634
635 for (;;)
636 {
637 uintptr_t max_remaining;
638
639 if (addr == 0)
640 return addr;
641
642 max_remaining = addr / pagesize;
643 if (stepsize > max_remaining)
644 stepsize = max_remaining;
645 if (mincore ((MINCORE_ADDR_T) (addr - stepsize * pagesize),
646 stepsize * pagesize, vec) < 0)
647 /* Time to search in smaller steps. */
648 break;
649 /* The entire range exists. Continue searching in large steps. */
650 addr -= stepsize * pagesize;
651 }
652 for (;;)
653 {
654 uintptr_t halfstepsize1;
655 uintptr_t halfstepsize2;
656
657 if (stepsize == 1)
658 return addr;
659
660 /* Here we know that less than stepsize pages exist starting at addr. */
661 halfstepsize1 = (stepsize + 1) / 2;
662 halfstepsize2 = stepsize / 2;
663 /* halfstepsize1 + halfstepsize2 = stepsize. */
664
665 if (mincore ((MINCORE_ADDR_T) (addr - halfstepsize1 * pagesize),
666 halfstepsize1 * pagesize, vec) < 0)
667 stepsize = halfstepsize1;
668 else
669 {
670 addr -= halfstepsize1 * pagesize;
671 stepsize = halfstepsize2;
672 }
673 }
674 }
675
676 /* Assuming that the page starting at ADDR is among the address range,
677 return the end of its virtual memory range + 1.
678 ADDR must be a multiple of pagesize. */
679 static uintptr_t
680 mapped_range_end (uintptr_t addr)
681 {
682 /* Use a moderately sized VEC here, small enough that it fits on the stack
683 (without requiring malloc). */
684 pageinfo_t vec[1024];
685 uintptr_t stepsize = sizeof (vec);
686
687 addr += pagesize;
688 for (;;)
689 {
690 uintptr_t max_remaining;
691
692 if (addr == 0) /* wrapped around? */
693 return addr;
694
695 max_remaining = (- addr) / pagesize;
696 if (stepsize > max_remaining)
697 stepsize = max_remaining;
698 if (mincore ((MINCORE_ADDR_T) addr, stepsize * pagesize, vec) < 0)
699 /* Time to search in smaller steps. */
700 break;
701 /* The entire range exists. Continue searching in large steps. */
702 addr += stepsize * pagesize;
703 }
704 for (;;)
705 {
706 uintptr_t halfstepsize1;
707 uintptr_t halfstepsize2;
708
709 if (stepsize == 1)
710 return addr;
711
712 /* Here we know that less than stepsize pages exist starting at addr. */
713 halfstepsize1 = (stepsize + 1) / 2;
714 halfstepsize2 = stepsize / 2;
715 /* halfstepsize1 + halfstepsize2 = stepsize. */
716
717 if (mincore ((MINCORE_ADDR_T) addr, halfstepsize1 * pagesize, vec) < 0)
718 stepsize = halfstepsize1;
719 else
720 {
721 addr += halfstepsize1 * pagesize;
722 stepsize = halfstepsize2;
723 }
724 }
725 }
726
727 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
728 ADDR1 must be <= ADDR2. */
729 static int
730 is_unmapped (uintptr_t addr1, uintptr_t addr2)
731 {
732 uintptr_t count;
733 uintptr_t stepsize;
734
735 /* Round addr1 down. */
736 addr1 = (addr1 / pagesize) * pagesize;
737 /* Round addr2 up and turn it into an exclusive bound. */
738 addr2 = ((addr2 / pagesize) + 1) * pagesize;
739
740 /* This is slow: mincore() does not provide a way to determine the bounds
741 of the gaps directly. So we have to use mincore() on individual pages
742 over and over again. Only after we've verified that all pages are
743 unmapped, we know that the range is completely unmapped.
744 If we were to traverse the pages from bottom to top or from top to bottom,
745 it would be slow even in the average case. To speed up the search, we
746 exploit the fact that mapped memory ranges are larger than one page on
747 average, therefore we have good chances of hitting a mapped area if we
748 traverse only every second, or only fourth page, etc. This doesn't
749 decrease the worst-case runtime, only the average runtime. */
750 count = (addr2 - addr1) / pagesize;
751 /* We have to test is_mapped (addr1 + i * pagesize) for 0 <= i < count. */
752 for (stepsize = 1; stepsize < count; )
753 stepsize = 2 * stepsize;
754 for (;;)
755 {
756 uintptr_t addr_stepsize;
757 uintptr_t i;
758 uintptr_t addr;
759
760 stepsize = stepsize / 2;
761 if (stepsize == 0)
762 break;
763 addr_stepsize = stepsize * pagesize;
764 for (i = stepsize, addr = addr1 + addr_stepsize;
765 i < count;
766 i += 2 * stepsize, addr += 2 * addr_stepsize)
767 /* Here addr = addr1 + i * pagesize. */
768 if (is_mapped (addr))
769 return 0;
770 }
771 return 1;
772 }
773
774 # if STACK_DIRECTION < 0
775
776 /* Info about the gap between this VMA and the previous one.
777 addr must be < vma->start. */
778 static int
779 mincore_is_near_this (uintptr_t addr, struct vma_struct *vma)
780 {
781 /* vma->start - addr <= (vma->start - vma->prev_end) / 2
782 is mathematically equivalent to
783 vma->prev_end <= 2 * addr - vma->start
784 <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
785 But be careful about overflow: if 2 * addr - vma->start is negative,
786 we consider a tiny "guard page" mapping [0, 0] to be present around
787 NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
788 therefore return false. */
789 uintptr_t testaddr = addr - (vma->start - addr);
790 if (testaddr > addr) /* overflow? */
791 return 0;
792 /* Here testaddr <= addr < vma->start. */
793 return is_unmapped (testaddr, vma->start - 1);
794 }
795
796 # endif
797 # if STACK_DIRECTION > 0
798
799 /* Info about the gap between this VMA and the next one.
800 addr must be > vma->end - 1. */
801 static int
802 mincore_is_near_this (uintptr_t addr, struct vma_struct *vma)
803 {
804 /* addr - vma->end < (vma->next_start - vma->end) / 2
805 is mathematically equivalent to
806 vma->next_start > 2 * addr - vma->end
807 <==> is_unmapped (vma->end, 2 * addr - vma->end).
808 But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
809 we consider a tiny "guard page" mapping [0, 0] to be present around
810 NULL; it intersects the range (vma->end, 2 * addr - vma->end),
811 therefore return false. */
812 uintptr_t testaddr = addr + (addr - vma->end);
813 if (testaddr < addr) /* overflow? */
814 return 0;
815 /* Here vma->end - 1 < addr <= testaddr. */
816 return is_unmapped (vma->end, testaddr);
817 }
818
819 # endif
820
821 static int
822 mincore_get_vma (uintptr_t address, struct vma_struct *vma)
823 {
824 if (pagesize == 0)
825 init_pagesize ();
826 address = (address / pagesize) * pagesize;
827 vma->start = mapped_range_start (address);
828 vma->end = mapped_range_end (address);
829 vma->is_near_this = mincore_is_near_this;
830 return 0;
831 }
832
833 #endif
834
835 /* ========================================================================== */
836
837 /* ---------------------------- stackvma-linux.c ---------------------------- */
838
839 #if defined __linux__ || defined __ANDROID__ /* Linux */
840
841 struct callback_locals
842 {
843 uintptr_t address;
844 struct vma_struct *vma;
845 # if STACK_DIRECTION < 0
846 uintptr_t prev;
847 # else
848 int stop_at_next_vma;
849 # endif
850 int retval;
851 };
852
853 static int
854 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
855 {
856 # if STACK_DIRECTION < 0
857 if (locals->address >= start && locals->address <= end - 1)
858 {
859 locals->vma->start = start;
860 locals->vma->end = end;
861 locals->vma->prev_end = locals->prev;
862 locals->retval = 0;
863 return 1;
864 }
865 locals->prev = end;
866 # else
867 if (locals->stop_at_next_vma)
868 {
869 locals->vma->next_start = start;
870 locals->stop_at_next_vma = 0;
871 return 1;
872 }
873 if (locals->address >= start && locals->address <= end - 1)
874 {
875 locals->vma->start = start;
876 locals->vma->end = end;
877 locals->retval = 0;
878 locals->stop_at_next_vma = 1;
879 return 0;
880 }
881 # endif
882 return 0;
883 }
884
885 int
886 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
887 {
888 struct callback_locals locals;
889 locals.address = address;
890 locals.vma = vma;
891 # if STACK_DIRECTION < 0
892 locals.prev = 0;
893 # else
894 locals.stop_at_next_vma = 0;
895 # endif
896 locals.retval = -1;
897
898 vma_iterate (&locals);
899 if (locals.retval == 0)
900 {
901 # if !(STACK_DIRECTION < 0)
902 if (locals.stop_at_next_vma)
903 vma->next_start = 0;
904 # endif
905 vma->is_near_this = simple_is_near_this;
906 return 0;
907 }
908
909 return mincore_get_vma (address, vma);
910 }
911
912 /* --------------------------- stackvma-freebsd.c --------------------------- */
913
914 #elif defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ /* GNU/kFreeBSD, FreeBSD */
915
916 struct callback_locals
917 {
918 uintptr_t address;
919 struct vma_struct *vma;
920 /* The stack appears as multiple adjacents segments, therefore we
921 merge adjacent segments. */
922 uintptr_t curr_start, curr_end;
923 # if STACK_DIRECTION < 0
924 uintptr_t prev_end;
925 # else
926 int stop_at_next_vma;
927 # endif
928 int retval;
929 };
930
931 static int
932 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
933 {
934 if (start == locals->curr_end)
935 {
936 /* Merge adjacent segments. */
937 locals->curr_end = end;
938 return 0;
939 }
940 # if STACK_DIRECTION < 0
941 if (locals->curr_start < locals->curr_end
942 && locals->address >= locals->curr_start
943 && locals->address <= locals->curr_end - 1)
944 {
945 locals->vma->start = locals->curr_start;
946 locals->vma->end = locals->curr_end;
947 locals->vma->prev_end = locals->prev_end;
948 locals->retval = 0;
949 return 1;
950 }
951 locals->prev_end = locals->curr_end;
952 # else
953 if (locals->stop_at_next_vma)
954 {
955 locals->vma->next_start = locals->curr_start;
956 locals->stop_at_next_vma = 0;
957 return 1;
958 }
959 if (locals->curr_start < locals->curr_end
960 && locals->address >= locals->curr_start
961 && locals->address <= locals->curr_end - 1)
962 {
963 locals->vma->start = locals->curr_start;
964 locals->vma->end = locals->curr_end;
965 locals->retval = 0;
966 locals->stop_at_next_vma = 1;
967 return 0;
968 }
969 # endif
970 locals->curr_start = start; locals->curr_end = end;
971 return 0;
972 }
973
974 int
975 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
976 {
977 struct callback_locals locals;
978 locals.address = address;
979 locals.vma = vma;
980 locals.curr_start = 0;
981 locals.curr_end = 0;
982 # if STACK_DIRECTION < 0
983 locals.prev_end = 0;
984 # else
985 locals.stop_at_next_vma = 0;
986 # endif
987 locals.retval = -1;
988
989 vma_iterate (&locals);
990 if (locals.retval < 0)
991 {
992 if (locals.curr_start < locals.curr_end
993 && address >= locals.curr_start && address <= locals.curr_end - 1)
994 {
995 vma->start = locals.curr_start;
996 vma->end = locals.curr_end;
997 # if STACK_DIRECTION < 0
998 vma->prev_end = locals.prev_end;
999 # else
1000 vma->next_start = 0;
1001 # endif
1002 locals.retval = 0;
1003 }
1004 }
1005 if (locals.retval == 0)
1006 {
1007 # if !(STACK_DIRECTION < 0)
1008 if (locals.stop_at_next_vma)
1009 vma->next_start = 0;
1010 # endif
1011 vma->is_near_this = simple_is_near_this;
1012 return 0;
1013 }
1014
1015 /* FreeBSD 6.[01] doesn't allow to distinguish unmapped pages from
1016 mapped but swapped-out pages. See whether it's fixed. */
1017 if (!is_mapped (0))
1018 /* OK, mincore() appears to work as expected. */
1019 return mincore_get_vma (address, vma);
1020 return -1;
1021 }
1022
1023 /* --------------------------- stackvma-netbsd.c --------------------------- */
1024
1025 #elif defined __NetBSD__ /* NetBSD */
1026
1027 struct callback_locals
1028 {
1029 uintptr_t address;
1030 struct vma_struct *vma;
1031 /* The stack appears as multiple adjacents segments, therefore we
1032 merge adjacent segments. */
1033 uintptr_t curr_start, curr_end;
1034 # if STACK_DIRECTION < 0
1035 uintptr_t prev_end;
1036 # else
1037 int stop_at_next_vma;
1038 # endif
1039 int retval;
1040 };
1041
1042 static int
1043 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1044 {
1045 if (start == locals->curr_end)
1046 {
1047 /* Merge adjacent segments. */
1048 locals->curr_end = end;
1049 return 0;
1050 }
1051 # if STACK_DIRECTION < 0
1052 if (locals->curr_start < locals->curr_end
1053 && locals->address >= locals->curr_start
1054 && locals->address <= locals->curr_end - 1)
1055 {
1056 locals->vma->start = locals->curr_start;
1057 locals->vma->end = locals->curr_end;
1058 locals->vma->prev_end = locals->prev_end;
1059 locals->retval = 0;
1060 return 1;
1061 }
1062 locals->prev_end = locals->curr_end;
1063 # else
1064 if (locals->stop_at_next_vma)
1065 {
1066 locals->vma->next_start = locals->curr_start;
1067 locals->stop_at_next_vma = 0;
1068 return 1;
1069 }
1070 if (locals->curr_start < locals->curr_end
1071 && locals->address >= locals->curr_start
1072 && locals->address <= locals->curr_end - 1)
1073 {
1074 locals->vma->start = locals->curr_start;
1075 locals->vma->end = locals->curr_end;
1076 locals->retval = 0;
1077 locals->stop_at_next_vma = 1;
1078 return 0;
1079 }
1080 # endif
1081 locals->curr_start = start; locals->curr_end = end;
1082 return 0;
1083 }
1084
1085 int
1086 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1087 {
1088 struct callback_locals locals;
1089 locals.address = address;
1090 locals.vma = vma;
1091 locals.curr_start = 0;
1092 locals.curr_end = 0;
1093 # if STACK_DIRECTION < 0
1094 locals.prev_end = 0;
1095 # else
1096 locals.stop_at_next_vma = 0;
1097 # endif
1098 locals.retval = -1;
1099
1100 vma_iterate (&locals);
1101 if (locals.retval < 0)
1102 {
1103 if (locals.curr_start < locals.curr_end
1104 && address >= locals.curr_start && address <= locals.curr_end - 1)
1105 {
1106 vma->start = locals.curr_start;
1107 vma->end = locals.curr_end;
1108 # if STACK_DIRECTION < 0
1109 vma->prev_end = locals.prev_end;
1110 # else
1111 vma->next_start = 0;
1112 # endif
1113 locals.retval = 0;
1114 }
1115 }
1116 if (locals.retval == 0)
1117 {
1118 # if !(STACK_DIRECTION < 0)
1119 if (locals.stop_at_next_vma)
1120 vma->next_start = 0;
1121 # endif
1122 vma->is_near_this = simple_is_near_this;
1123 return 0;
1124 }
1125
1126 return mincore_get_vma (address, vma);
1127 }
1128
1129 /* --------------------------- stackvma-mquery.c --------------------------- */
1130
1131 /* mquery() is a system call that allows to inquire the status of a
1132 range of pages of virtual memory. In particular, it allows to inquire
1133 whether a page is mapped at all, and where is the next unmapped page
1134 after a given address.
1135 As of 2021, mquery() is supported by:
1136 - OpenBSD, since OpenBSD 3.4.
1137 Note that this file can give different results. For example, on
1138 OpenBSD 4.4 / i386 the stack segment (which starts around 0xcdbfe000)
1139 ends at 0xcfbfdfff according to mincore, but at 0xffffffff according to
1140 mquery. */
1141
1142 #elif defined __OpenBSD__ /* OpenBSD */
1143
1144 # include <unistd.h> /* getpagesize, mincore */
1145 # include <sys/types.h>
1146 # include <sys/mman.h> /* mincore */
1147
1148 /* Cache for getpagesize(). */
1149 static uintptr_t pagesize;
1150
1151 /* Initialize pagesize. */
1152 static void
1153 init_pagesize (void)
1154 {
1155 pagesize = getpagesize ();
1156 }
1157
1158 /* Test whether the page starting at ADDR is among the address range.
1159 ADDR must be a multiple of pagesize. */
1160 static int
1161 is_mapped (uintptr_t addr)
1162 {
1163 /* Avoid calling mquery with a NULL first argument, because this argument
1164 value has a specific meaning. We know the NULL page is unmapped. */
1165 if (addr == 0)
1166 return 0;
1167 return mquery ((void *) addr, pagesize, 0, MAP_FIXED, -1, 0) == (void *) -1;
1168 }
1169
1170 /* Assuming that the page starting at ADDR is among the address range,
1171 return the start of its virtual memory range.
1172 ADDR must be a multiple of pagesize. */
1173 static uintptr_t
1174 mapped_range_start (uintptr_t addr)
1175 {
1176 uintptr_t stepsize;
1177 uintptr_t known_unmapped_page;
1178
1179 /* Look at smaller addresses, in larger and larger steps, to minimize the
1180 number of mquery() calls. */
1181 stepsize = pagesize;
1182 for (;;)
1183 {
1184 uintptr_t hole;
1185
1186 if (addr == 0)
1187 abort ();
1188
1189 if (addr <= stepsize)
1190 {
1191 known_unmapped_page = 0;
1192 break;
1193 }
1194
1195 hole = (uintptr_t) mquery ((void *) (addr - stepsize), pagesize,
1196 0, 0, -1, 0);
1197 if (!(hole == (uintptr_t) (void *) -1 || hole >= addr))
1198 {
1199 /* Some part of [addr - stepsize, addr - 1] is unmapped. */
1200 known_unmapped_page = hole;
1201 break;
1202 }
1203
1204 /* The entire range [addr - stepsize, addr - 1] is mapped. */
1205 addr -= stepsize;
1206
1207 if (2 * stepsize > stepsize && 2 * stepsize < addr)
1208 stepsize = 2 * stepsize;
1209 }
1210
1211 /* Now reduce the step size again.
1212 We know that the page at known_unmapped_page is unmapped and that
1213 0 < addr - known_unmapped_page <= stepsize. */
1214 while (stepsize > pagesize && stepsize / 2 >= addr - known_unmapped_page)
1215 stepsize = stepsize / 2;
1216 /* Still 0 < addr - known_unmapped_page <= stepsize. */
1217 while (stepsize > pagesize)
1218 {
1219 uintptr_t hole;
1220
1221 stepsize = stepsize / 2;
1222 hole = (uintptr_t) mquery ((void *) (addr - stepsize), pagesize,
1223 0, 0, -1, 0);
1224 if (!(hole == (uintptr_t) (void *) -1 || hole >= addr))
1225 /* Some part of [addr - stepsize, addr - 1] is unmapped. */
1226 known_unmapped_page = hole;
1227 else
1228 /* The entire range [addr - stepsize, addr - 1] is mapped. */
1229 addr -= stepsize;
1230 /* Still 0 < addr - known_unmapped_page <= stepsize. */
1231 }
1232
1233 return addr;
1234 }
1235
1236 /* Assuming that the page starting at ADDR is among the address range,
1237 return the end of its virtual memory range + 1.
1238 ADDR must be a multiple of pagesize. */
1239 static uintptr_t
1240 mapped_range_end (uintptr_t addr)
1241 {
1242 uintptr_t end;
1243
1244 if (addr == 0)
1245 abort ();
1246
1247 end = (uintptr_t) mquery ((void *) addr, pagesize, 0, 0, -1, 0);
1248 if (end == (uintptr_t) (void *) -1)
1249 end = 0; /* wrap around */
1250 return end;
1251 }
1252
1253 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
1254 ADDR1 must be <= ADDR2. */
1255 static int
1256 is_unmapped (uintptr_t addr1, uintptr_t addr2)
1257 {
1258 /* Round addr1 down. */
1259 addr1 = (addr1 / pagesize) * pagesize;
1260 /* Round addr2 up and turn it into an exclusive bound. */
1261 addr2 = ((addr2 / pagesize) + 1) * pagesize;
1262
1263 /* Avoid calling mquery with a NULL first argument, because this argument
1264 value has a specific meaning. We know the NULL page is unmapped. */
1265 if (addr1 == 0)
1266 addr1 = pagesize;
1267
1268 if (addr1 < addr2)
1269 {
1270 if (mquery ((void *) addr1, addr2 - addr1, 0, MAP_FIXED, -1, 0)
1271 == (void *) -1)
1272 /* Not all the interval [addr1 .. addr2 - 1] is unmapped. */
1273 return 0;
1274 else
1275 /* The interval [addr1 .. addr2 - 1] is unmapped. */
1276 return 1;
1277 }
1278 return 1;
1279 }
1280
1281 # if STACK_DIRECTION < 0
1282
1283 /* Info about the gap between this VMA and the previous one.
1284 addr must be < vma->start. */
1285 static int
1286 mquery_is_near_this (uintptr_t addr, struct vma_struct *vma)
1287 {
1288 /* vma->start - addr <= (vma->start - vma->prev_end) / 2
1289 is mathematically equivalent to
1290 vma->prev_end <= 2 * addr - vma->start
1291 <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
1292 But be careful about overflow: if 2 * addr - vma->start is negative,
1293 we consider a tiny "guard page" mapping [0, 0] to be present around
1294 NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
1295 therefore return false. */
1296 uintptr_t testaddr = addr - (vma->start - addr);
1297 if (testaddr > addr) /* overflow? */
1298 return 0;
1299 /* Here testaddr <= addr < vma->start. */
1300 return is_unmapped (testaddr, vma->start - 1);
1301 }
1302
1303 # endif
1304 # if STACK_DIRECTION > 0
1305
1306 /* Info about the gap between this VMA and the next one.
1307 addr must be > vma->end - 1. */
1308 static int
1309 mquery_is_near_this (uintptr_t addr, struct vma_struct *vma)
1310 {
1311 /* addr - vma->end < (vma->next_start - vma->end) / 2
1312 is mathematically equivalent to
1313 vma->next_start > 2 * addr - vma->end
1314 <==> is_unmapped (vma->end, 2 * addr - vma->end).
1315 But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
1316 we consider a tiny "guard page" mapping [0, 0] to be present around
1317 NULL; it intersects the range (vma->end, 2 * addr - vma->end),
1318 therefore return false. */
1319 uintptr_t testaddr = addr + (addr - vma->end);
1320 if (testaddr < addr) /* overflow? */
1321 return 0;
1322 /* Here vma->end - 1 < addr <= testaddr. */
1323 return is_unmapped (vma->end, testaddr);
1324 }
1325
1326 # endif
1327
1328 int
1329 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1330 {
1331 if (pagesize == 0)
1332 init_pagesize ();
1333 address = (address / pagesize) * pagesize;
1334 vma->start = mapped_range_start (address);
1335 vma->end = mapped_range_end (address);
1336 vma->is_near_this = mquery_is_near_this;
1337 return 0;
1338 }
1339
1340 /* ---------------------------- stackvma-mach.c ---------------------------- */
1341
1342 #elif (defined __APPLE__ && defined __MACH__) /* macOS */
1343
1344 #include <libc.h>
1345 #include <nlist.h>
1346 #include <mach/mach.h>
1347 #include <mach/machine/vm_param.h>
1348
1349 int
1350 sigsegv_get_vma (uintptr_t req_address, struct vma_struct *vma)
1351 {
1352 uintptr_t prev_address = 0, prev_size = 0;
1353 uintptr_t join_address = 0, join_size = 0;
1354 int more = 1;
1355 vm_address_t address;
1356 vm_size_t size;
1357 task_t task = mach_task_self ();
1358
1359 for (address = VM_MIN_ADDRESS; more; address += size)
1360 {
1361 mach_port_t object_name;
1362 /* In MacOS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
1363 32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
1364 mach_vm_address_t and mach_vm_size_t are always 64 bits large.
1365 MacOS X 10.5 has three vm_region like methods:
1366 - vm_region. It has arguments that depend on whether the current
1367 process is 32-bit or 64-bit. When linking dynamically, this
1368 function exists only in 32-bit processes. Therefore we use it only
1369 in 32-bit processes.
1370 - vm_region_64. It has arguments that depend on whether the current
1371 process is 32-bit or 64-bit. It interprets a flavor
1372 VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
1373 dangerous since 'struct vm_region_basic_info_64' is larger than
1374 'struct vm_region_basic_info'; therefore let's write
1375 VM_REGION_BASIC_INFO_64 explicitly.
1376 - mach_vm_region. It has arguments that are 64-bit always. This
1377 function is useful when you want to access the VM of a process
1378 other than the current process.
1379 In 64-bit processes, we could use vm_region_64 or mach_vm_region.
1380 I choose vm_region_64 because it uses the same types as vm_region,
1381 resulting in less conditional code. */
1382 # if defined __aarch64__ || defined __ppc64__ || defined __x86_64__
1383 struct vm_region_basic_info_64 info;
1384 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
1385
1386 more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
1387 (vm_region_info_t)&info, &info_count, &object_name)
1388 == KERN_SUCCESS);
1389 # else
1390 struct vm_region_basic_info info;
1391 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
1392
1393 more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
1394 (vm_region_info_t)&info, &info_count, &object_name)
1395 == KERN_SUCCESS);
1396 # endif
1397 if (!more)
1398 {
1399 address = join_address + join_size;
1400 size = 0;
1401 }
1402
1403 if ((uintptr_t) address == join_address + join_size)
1404 join_size += size;
1405 else
1406 {
1407 prev_address = join_address;
1408 prev_size = join_size;
1409 join_address = (uintptr_t) address;
1410 join_size = size;
1411 }
1412
1413 if (object_name != MACH_PORT_NULL)
1414 mach_port_deallocate (mach_task_self (), object_name);
1415
1416 # if STACK_DIRECTION < 0
1417 if (join_address <= req_address && join_address + join_size > req_address)
1418 {
1419 vma->start = join_address;
1420 vma->end = join_address + join_size;
1421 vma->prev_end = prev_address + prev_size;
1422 vma->is_near_this = simple_is_near_this;
1423 return 0;
1424 }
1425 # else
1426 if (prev_address <= req_address && prev_address + prev_size > req_address)
1427 {
1428 vma->start = prev_address;
1429 vma->end = prev_address + prev_size;
1430 vma->next_start = join_address;
1431 vma->is_near_this = simple_is_near_this;
1432 return 0;
1433 }
1434 # endif
1435 }
1436
1437 # if STACK_DIRECTION > 0
1438 if (join_address <= req_address && join_address + size > req_address)
1439 {
1440 vma->start = prev_address;
1441 vma->end = prev_address + prev_size;
1442 vma->next_start = ~0UL;
1443 vma->is_near_this = simple_is_near_this;
1444 return 0;
1445 }
1446 # endif
1447
1448 return -1;
1449 }
1450
1451 /* -------------------------------------------------------------------------- */
1452
1453 #elif defined _AIX /* AIX */
1454
1455 int
1456 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1457 {
1458 return mincore_get_vma (address, vma);
1459 }
1460
1461 /* --------------------------- stackvma-procfs.h --------------------------- */
1462
1463 #elif defined __sgi || defined __sun /* IRIX, Solaris */
1464
1465 # include <errno.h> /* errno, EINTR */
1466 # include <fcntl.h> /* open, O_RDONLY */
1467 # include <stddef.h> /* size_t */
1468 # include <unistd.h> /* getpagesize, getpid, read, close */
1469 # include <sys/types.h>
1470 # include <sys/mman.h> /* mmap, munmap */
1471 # include <sys/stat.h> /* fstat */
1472 # include <string.h> /* memcpy */
1473
1474 /* Try to use the newer ("structured") /proc filesystem API, if supported. */
1475 # define _STRUCTURED_PROC 1
1476 # include <sys/procfs.h> /* prmap_t, optionally PIOC* */
1477
1478 # if !defined __sun
1479
1480 /* Cache for getpagesize(). */
1481 static uintptr_t pagesize;
1482
1483 /* Initialize pagesize. */
1484 static void
1485 init_pagesize (void)
1486 {
1487 pagesize = getpagesize ();
1488 }
1489
1490 # endif
1491
1492 struct callback_locals
1493 {
1494 uintptr_t address;
1495 struct vma_struct *vma;
1496 # if STACK_DIRECTION < 0
1497 uintptr_t prev;
1498 # else
1499 int stop_at_next_vma;
1500 # endif
1501 int retval;
1502 };
1503
1504 static int
1505 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1506 {
1507 # if STACK_DIRECTION < 0
1508 if (locals->address >= start && locals->address <= end - 1)
1509 {
1510 locals->vma->start = start;
1511 locals->vma->end = end;
1512 locals->vma->prev_end = locals->prev;
1513 locals->retval = 0;
1514 return 1;
1515 }
1516 locals->prev = end;
1517 # else
1518 if (locals->stop_at_next_vma)
1519 {
1520 locals->vma->next_start = start;
1521 locals->stop_at_next_vma = 0;
1522 return 1;
1523 }
1524 if (locals->address >= start && locals->address <= end - 1)
1525 {
1526 locals->vma->start = start;
1527 locals->vma->end = end;
1528 locals->retval = 0;
1529 locals->stop_at_next_vma = 1;
1530 return 0;
1531 }
1532 # endif
1533 return 0;
1534 }
1535
1536 /* Iterate over the virtual memory areas of the current process.
1537 If such iteration is supported, the callback is called once for every
1538 virtual memory area, in ascending order, with the following arguments:
1539 - LOCALS is the same argument as passed to vma_iterate.
1540 - START is the address of the first byte in the area, page-aligned.
1541 - END is the address of the last byte in the area plus 1, page-aligned.
1542 Note that it may be 0 for the last area in the address space.
1543 If the callback returns 0, the iteration continues. If it returns 1,
1544 the iteration terminates prematurely.
1545 This function may open file descriptors, but does not call malloc().
1546 Return 0 if all went well, or -1 in case of error. */
1547 /* This code is a simplied copy (no handling of protection flags) of the
1548 code in gnulib's lib/vma-iter.c. */
1549 static int
1550 vma_iterate (struct callback_locals *locals)
1551 {
1552 /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
1553 _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
1554 32-bit 64-bit
1555 _STRUCTURED_PROC = 0 32 56
1556 _STRUCTURED_PROC = 1 96 104
1557 Therefore, if the include files provide the newer API, prmap_t has
1558 the bigger size, and thus you MUST use the newer API. And if the
1559 include files provide the older API, prmap_t has the smaller size,
1560 and thus you MUST use the older API. */
1561
1562 # if defined PIOCNMAP && defined PIOCMAP
1563 /* We must use the older /proc interface. */
1564
1565 char fnamebuf[6+10+1];
1566 char *fname;
1567 int fd;
1568 int nmaps;
1569 size_t memneed;
1570 # if HAVE_MAP_ANONYMOUS
1571 # define zero_fd -1
1572 # define map_flags MAP_ANONYMOUS
1573 # else /* !HAVE_MAP_ANONYMOUS */
1574 int zero_fd;
1575 # define map_flags 0
1576 # endif
1577 void *auxmap;
1578 uintptr_t auxmap_start;
1579 uintptr_t auxmap_end;
1580 prmap_t* maps;
1581 prmap_t* mp;
1582
1583 if (pagesize == 0)
1584 init_pagesize ();
1585
1586 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()). */
1587 fname = fnamebuf + sizeof (fnamebuf) - 1;
1588 *fname = '\0';
1589 {
1590 unsigned int value = getpid ();
1591 do
1592 *--fname = (value % 10) + '0';
1593 while ((value = value / 10) > 0);
1594 }
1595 fname -= 6;
1596 memcpy (fname, "/proc/", 6);
1597
1598 fd = open (fname, O_RDONLY);
1599 if (fd < 0)
1600 return -1;
1601
1602 if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
1603 goto fail2;
1604
1605 memneed = (nmaps + 10) * sizeof (prmap_t);
1606 /* Allocate memneed bytes of memory.
1607 We cannot use alloca here, because not much stack space is guaranteed.
1608 We also cannot use malloc here, because a malloc() call may call mmap()
1609 and thus pre-allocate available memory.
1610 So use mmap(), and ignore the resulting VMA. */
1611 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1612 # if !HAVE_MAP_ANONYMOUS
1613 zero_fd = open ("/dev/zero", O_RDONLY, 0644);
1614 if (zero_fd < 0)
1615 goto fail2;
1616 # endif
1617 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1618 map_flags | MAP_PRIVATE, zero_fd, 0);
1619 # if !HAVE_MAP_ANONYMOUS
1620 close (zero_fd);
1621 # endif
1622 if (auxmap == (void *) -1)
1623 goto fail2;
1624 auxmap_start = (uintptr_t) auxmap;
1625 auxmap_end = auxmap_start + memneed;
1626 maps = (prmap_t *) auxmap;
1627
1628 if (ioctl (fd, PIOCMAP, maps) < 0)
1629 goto fail1;
1630
1631 for (mp = maps;;)
1632 {
1633 uintptr_t start, end;
1634
1635 start = (uintptr_t) mp->pr_vaddr;
1636 end = start + mp->pr_size;
1637 if (start == 0 && end == 0)
1638 break;
1639 mp++;
1640 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1641 {
1642 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1643 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1644 if (start < auxmap_start)
1645 if (callback (locals, start, auxmap_start))
1646 break;
1647 if (auxmap_end - 1 < end - 1)
1648 if (callback (locals, auxmap_end, end))
1649 break;
1650 }
1651 else
1652 {
1653 if (callback (locals, start, end))
1654 break;
1655 }
1656 }
1657 munmap (auxmap, memneed);
1658 close (fd);
1659 return 0;
1660
1661 fail1:
1662 munmap (auxmap, memneed);
1663 fail2:
1664 close (fd);
1665 return -1;
1666
1667 # else
1668 /* We must use the newer /proc interface.
1669 Documentation:
1670 https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
1671 The contents of /proc/<pid>/map consists of records of type
1672 prmap_t. These are different in 32-bit and 64-bit processes,
1673 but here we are fortunately accessing only the current process. */
1674
1675 char fnamebuf[6+10+4+1];
1676 char *fname;
1677 int fd;
1678 int nmaps;
1679 size_t memneed;
1680 # if HAVE_MAP_ANONYMOUS
1681 # define zero_fd -1
1682 # define map_flags MAP_ANONYMOUS
1683 # else /* !HAVE_MAP_ANONYMOUS */
1684 int zero_fd;
1685 # define map_flags 0
1686 # endif
1687 void *auxmap;
1688 uintptr_t auxmap_start;
1689 uintptr_t auxmap_end;
1690 prmap_t* maps;
1691 prmap_t* maps_end;
1692 prmap_t* mp;
1693
1694 if (pagesize == 0)
1695 init_pagesize ();
1696
1697 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
1698 fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
1699 memcpy (fname, "/map", 4 + 1);
1700 {
1701 unsigned int value = getpid ();
1702 do
1703 *--fname = (value % 10) + '0';
1704 while ((value = value / 10) > 0);
1705 }
1706 fname -= 6;
1707 memcpy (fname, "/proc/", 6);
1708
1709 fd = open (fname, O_RDONLY);
1710 if (fd < 0)
1711 return -1;
1712
1713 {
1714 struct stat statbuf;
1715 if (fstat (fd, &statbuf) < 0)
1716 goto fail2;
1717 nmaps = statbuf.st_size / sizeof (prmap_t);
1718 }
1719
1720 memneed = (nmaps + 10) * sizeof (prmap_t);
1721 /* Allocate memneed bytes of memory.
1722 We cannot use alloca here, because not much stack space is guaranteed.
1723 We also cannot use malloc here, because a malloc() call may call mmap()
1724 and thus pre-allocate available memory.
1725 So use mmap(), and ignore the resulting VMA. */
1726 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1727 # if !HAVE_MAP_ANONYMOUS
1728 zero_fd = open ("/dev/zero", O_RDONLY, 0644);
1729 if (zero_fd < 0)
1730 goto fail2;
1731 # endif
1732 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1733 map_flags | MAP_PRIVATE, zero_fd, 0);
1734 # if !HAVE_MAP_ANONYMOUS
1735 close (zero_fd);
1736 # endif
1737 if (auxmap == (void *) -1)
1738 goto fail2;
1739 auxmap_start = (uintptr_t) auxmap;
1740 auxmap_end = auxmap_start + memneed;
1741 maps = (prmap_t *) auxmap;
1742
1743 /* Read up to memneed bytes from fd into maps. */
1744 {
1745 size_t remaining = memneed;
1746 size_t total_read = 0;
1747 char *ptr = (char *) maps;
1748
1749 do
1750 {
1751 size_t nread = read (fd, ptr, remaining);
1752 if (nread == (size_t)-1)
1753 {
1754 if (errno == EINTR)
1755 continue;
1756 goto fail1;
1757 }
1758 if (nread == 0)
1759 /* EOF */
1760 break;
1761 total_read += nread;
1762 ptr += nread;
1763 remaining -= nread;
1764 }
1765 while (remaining > 0);
1766
1767 nmaps = (memneed - remaining) / sizeof (prmap_t);
1768 maps_end = maps + nmaps;
1769 }
1770
1771 for (mp = maps; mp < maps_end; mp++)
1772 {
1773 uintptr_t start, end;
1774
1775 start = (uintptr_t) mp->pr_vaddr;
1776 end = start + mp->pr_size;
1777 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1778 {
1779 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1780 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1781 if (start < auxmap_start)
1782 if (callback (locals, start, auxmap_start))
1783 break;
1784 if (auxmap_end - 1 < end - 1)
1785 if (callback (locals, auxmap_end, end))
1786 break;
1787 }
1788 else
1789 {
1790 if (callback (locals, start, end))
1791 break;
1792 }
1793 }
1794 munmap (auxmap, memneed);
1795 close (fd);
1796 return 0;
1797
1798 fail1:
1799 munmap (auxmap, memneed);
1800 fail2:
1801 close (fd);
1802 return -1;
1803
1804 # endif
1805 }
1806
1807 int
1808 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1809 {
1810 struct callback_locals locals;
1811 locals.address = address;
1812 locals.vma = vma;
1813 # if STACK_DIRECTION < 0
1814 locals.prev = 0;
1815 # else
1816 locals.stop_at_next_vma = 0;
1817 # endif
1818 locals.retval = -1;
1819
1820 vma_iterate (&locals);
1821 if (locals.retval == 0)
1822 {
1823 # if !(STACK_DIRECTION < 0)
1824 if (locals.stop_at_next_vma)
1825 vma->next_start = 0;
1826 # endif
1827 vma->is_near_this = simple_is_near_this;
1828 return 0;
1829 }
1830
1831 # if defined __sun
1832 return mincore_get_vma (address, vma);
1833 # else
1834 return -1;
1835 # endif
1836 }
1837
1838 /* -------------------------------------------------------------------------- */
1839
1840 #elif defined __CYGWIN__ /* Cygwin */
1841
1842 struct callback_locals
1843 {
1844 uintptr_t address;
1845 struct vma_struct *vma;
1846 /* The stack appears as three adjacents segments, therefore we
1847 merge adjacent segments. */
1848 uintptr_t curr_start, curr_end;
1849 # if STACK_DIRECTION < 0
1850 uintptr_t prev_end;
1851 # else
1852 int stop_at_next_vma;
1853 # endif
1854 int retval;
1855 };
1856
1857 static int
1858 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1859 {
1860 if (start == locals->curr_end)
1861 {
1862 /* Merge adjacent segments. */
1863 locals->curr_end = end;
1864 return 0;
1865 }
1866 # if STACK_DIRECTION < 0
1867 if (locals->curr_start < locals->curr_end
1868 && locals->address >= locals->curr_start
1869 && locals->address <= locals->curr_end - 1)
1870 {
1871 locals->vma->start = locals->curr_start;
1872 locals->vma->end = locals->curr_end;
1873 locals->vma->prev_end = locals->prev_end;
1874 locals->retval = 0;
1875 return 1;
1876 }
1877 locals->prev_end = locals->curr_end;
1878 # else
1879 if (locals->stop_at_next_vma)
1880 {
1881 locals->vma->next_start = locals->curr_start;
1882 locals->stop_at_next_vma = 0;
1883 return 1;
1884 }
1885 if (locals->curr_start < locals->curr_end
1886 && locals->address >= locals->curr_start
1887 && locals->address <= locals->curr_end - 1)
1888 {
1889 locals->vma->start = locals->curr_start;
1890 locals->vma->end = locals->curr_end;
1891 locals->retval = 0;
1892 locals->stop_at_next_vma = 1;
1893 return 0;
1894 }
1895 # endif
1896 locals->curr_start = start; locals->curr_end = end;
1897 return 0;
1898 }
1899
1900 int
1901 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1902 {
1903 struct callback_locals locals;
1904 locals.address = address;
1905 locals.vma = vma;
1906 locals.curr_start = 0;
1907 locals.curr_end = 0;
1908 # if STACK_DIRECTION < 0
1909 locals.prev_end = 0;
1910 # else
1911 locals.stop_at_next_vma = 0;
1912 # endif
1913 locals.retval = -1;
1914
1915 vma_iterate (&locals);
1916 if (locals.retval < 0)
1917 {
1918 if (locals.curr_start < locals.curr_end
1919 && address >= locals.curr_start && address <= locals.curr_end - 1)
1920 {
1921 vma->start = locals.curr_start;
1922 vma->end = locals.curr_end;
1923 # if STACK_DIRECTION < 0
1924 vma->prev_end = locals.prev_end;
1925 # else
1926 vma->next_start = 0;
1927 # endif
1928 locals.retval = 0;
1929 }
1930 }
1931 if (locals.retval == 0)
1932 {
1933 # if !(STACK_DIRECTION < 0)
1934 if (locals.stop_at_next_vma)
1935 vma->next_start = 0;
1936 # endif
1937 vma->is_near_this = simple_is_near_this;
1938 return 0;
1939 }
1940
1941 return -1;
1942 }
1943
1944 /* ---------------------------- stackvma-beos.h ---------------------------- */
1945
1946 #elif defined __HAIKU__ /* Haiku */
1947
1948 # include <OS.h> /* get_next_area_info */
1949
1950 struct callback_locals
1951 {
1952 uintptr_t address;
1953 struct vma_struct *vma;
1954 # if STACK_DIRECTION < 0
1955 uintptr_t prev;
1956 # else
1957 int stop_at_next_vma;
1958 # endif
1959 int retval;
1960 };
1961
1962 static int
1963 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1964 {
1965 # if STACK_DIRECTION < 0
1966 if (locals->address >= start && locals->address <= end - 1)
1967 {
1968 locals->vma->start = start;
1969 locals->vma->end = end;
1970 locals->vma->prev_end = locals->prev;
1971 locals->retval = 0;
1972 return 1;
1973 }
1974 locals->prev = end;
1975 # else
1976 if (locals->stop_at_next_vma)
1977 {
1978 locals->vma->next_start = start;
1979 locals->stop_at_next_vma = 0;
1980 return 1;
1981 }
1982 if (locals->address >= start && locals->address <= end - 1)
1983 {
1984 locals->vma->start = start;
1985 locals->vma->end = end;
1986 locals->retval = 0;
1987 locals->stop_at_next_vma = 1;
1988 return 0;
1989 }
1990 # endif
1991 return 0;
1992 }
1993
1994 /* Iterate over the virtual memory areas of the current process.
1995 If such iteration is supported, the callback is called once for every
1996 virtual memory area, in ascending order, with the following arguments:
1997 - LOCALS is the same argument as passed to vma_iterate.
1998 - START is the address of the first byte in the area, page-aligned.
1999 - END is the address of the last byte in the area plus 1, page-aligned.
2000 Note that it may be 0 for the last area in the address space.
2001 If the callback returns 0, the iteration continues. If it returns 1,
2002 the iteration terminates prematurely.
2003 This function may open file descriptors, but does not call malloc().
2004 Return 0 if all went well, or -1 in case of error. */
2005 /* This code is a simplied copy (no handling of protection flags) of the
2006 code in gnulib's lib/vma-iter.c. */
2007 static int
2008 vma_iterate (struct callback_locals *locals)
2009 {
2010 area_info info;
2011 ssize_t cookie;
2012
2013 cookie = 0;
2014 while (get_next_area_info (0, &cookie, &info) == B_OK)
2015 {
2016 uintptr_t start, end;
2017
2018 start = (uintptr_t) info.address;
2019 end = start + info.size;
2020
2021 if (callback (locals, start, end))
2022 break;
2023 }
2024 return 0;
2025 }
2026
2027 int
2028 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
2029 {
2030 struct callback_locals locals;
2031 locals.address = address;
2032 locals.vma = vma;
2033 # if STACK_DIRECTION < 0
2034 locals.prev = 0;
2035 # else
2036 locals.stop_at_next_vma = 0;
2037 # endif
2038 locals.retval = -1;
2039
2040 vma_iterate (&locals);
2041 if (locals.retval == 0)
2042 {
2043 # if !(STACK_DIRECTION < 0)
2044 if (locals.stop_at_next_vma)
2045 vma->next_start = 0;
2046 # endif
2047 vma->is_near_this = simple_is_near_this;
2048 return 0;
2049 }
2050 return -1;
2051 }
2052
2053 /* -------------------------------------------------------------------------- */
2054
2055 #else /* Hurd, Minix, ... */
2056
2057 int
2058 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
2059 {
2060 /* No way. */
2061 return -1;
2062 }
2063
2064 #endif