root/maint/gnulib/lib/stackvma.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. simple_is_near_this
  2. simple_is_near_this
  3. rof_open
  4. rof_peekchar
  5. rof_getchar
  6. rof_scanf_lx
  7. rof_close
  8. vma_iterate_proc
  9. vma_iterate_proc
  10. vma_iterate_bsd
  11. vma_iterate
  12. init_pagesize
  13. is_mapped
  14. mapped_range_start
  15. mapped_range_end
  16. is_unmapped
  17. mincore_is_near_this
  18. mincore_is_near_this
  19. mincore_get_vma
  20. callback
  21. sigsegv_get_vma
  22. callback
  23. sigsegv_get_vma
  24. callback
  25. sigsegv_get_vma
  26. init_pagesize
  27. is_mapped
  28. mapped_range_start
  29. mapped_range_end
  30. is_unmapped
  31. mquery_is_near_this
  32. mquery_is_near_this
  33. sigsegv_get_vma
  34. sigsegv_get_vma
  35. sigsegv_get_vma
  36. init_pagesize
  37. callback
  38. vma_iterate
  39. sigsegv_get_vma
  40. callback
  41. sigsegv_get_vma
  42. callback
  43. vma_iterate
  44. sigsegv_get_vma
  45. sigsegv_get_vma

   1 /* Determine the virtual memory area of a given address.
   2    Copyright (C) 2002-2021 Free Software Foundation, Inc.
   3    Copyright (C) 2003-2006  Paolo Bonzini <bonzini@gnu.org>
   4 
   5    This program is free software: you can redistribute it and/or modify
   6    it under the terms of the GNU General Public License as published by
   7    the Free Software Foundation; either version 2 of the License, or
   8    (at your option) any later version.
   9 
  10    This program is distributed in the hope that it will be useful,
  11    but WITHOUT ANY WARRANTY; without even the implied warranty of
  12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13    GNU General Public License for more details.
  14 
  15    You should have received a copy of the GNU General Public License
  16    along with this program.  If not, see <https://www.gnu.org/licenses/>.  */
  17 
  18 /* Written by Bruno Haible and Paolo Bonzini.  */
  19 
  20 #include <config.h>
  21 
  22 /* On Solaris in 32-bit mode, when gnulib module 'largefile' is in use,
  23    prevent a compilation error
  24      "Cannot use procfs in the large file compilation environment"
  25    On Android, when targeting Android 4.4 or older with a GCC toolchain,
  26    prevent a compilation error
  27      "error: call to 'mmap' declared with attribute error: mmap is not
  28       available with _FILE_OFFSET_BITS=64 when using GCC until android-21.
  29       Either raise your minSdkVersion, disable _FILE_OFFSET_BITS=64, or
  30       switch to Clang."
  31    The files that we access in this compilation unit are less than 2 GB
  32    large.  */
  33 #if defined __sun || defined __ANDROID__
  34 # undef _FILE_OFFSET_BITS
  35 #endif
  36 
  37 /* Specification.  */
  38 #include "stackvma.h"
  39 
  40 #include <stdio.h>
  41 #include <stdlib.h>
  42 
  43 /* =========================== stackvma-simple.c =========================== */
  44 
  45 #if defined __linux__ || defined __ANDROID__ \
  46     || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
  47     || defined __NetBSD__ \
  48     || (defined __APPLE__ && defined __MACH__) \
  49     || defined __sgi || defined __sun \
  50     || defined __CYGWIN__ || defined __HAIKU__
  51 
  52 /* This file contains the proximity test function for the simple cases, where
  53    the OS has an API for enumerating the mapped ranges of virtual memory.  */
  54 
  55 # if STACK_DIRECTION < 0
  56 
  57 /* Info about the gap between this VMA and the previous one.
  58    addr must be < vma->start.  */
  59 static int
  60 simple_is_near_this (uintptr_t addr, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
  61 {
  62   return (vma->start - addr <= (vma->start - vma->prev_end) / 2);
  63 }
  64 
  65 # endif
  66 # if STACK_DIRECTION > 0
  67 
  68 /* Info about the gap between this VMA and the next one.
  69    addr must be > vma->end - 1.  */
  70 static int
  71 simple_is_near_this (uintptr_t addr, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
  72 {
  73   return (addr - vma->end < (vma->next_start - vma->end) / 2);
  74 }
  75 
  76 # endif
  77 
  78 #endif
  79 
  80 /* =========================== stackvma-rofile.c =========================== */
  81 /* Buffered read-only streams.  */
  82 
  83 #if defined __linux__ || defined __ANDROID__ \
  84     || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
  85     || defined __NetBSD__ \
  86     || defined __CYGWIN__
  87 
  88 # include <errno.h> /* errno, EINTR */
  89 # include <fcntl.h> /* open, O_RDONLY */
  90 # include <stddef.h> /* size_t */
  91 # include <unistd.h> /* getpagesize, lseek, read, close */
  92 # include <sys/types.h>
  93 # include <sys/mman.h> /* mmap, munmap */
  94 
  95 # if defined __linux__ || defined __ANDROID__
  96 #  include <limits.h> /* PATH_MAX */
  97 # endif
  98 
  99 /* Buffered read-only streams.
 100    We cannot use <stdio.h> here, because fopen() calls malloc(), and a malloc()
 101    call may have been interrupted.
 102    Also, we cannot use multiple read() calls, because if the buffer size is
 103    smaller than the file's contents:
 104      - On NetBSD, the second read() call would return 0, thus making the file
 105        appear truncated.
 106      - On DragonFly BSD, the first read() call would fail with errno = EFBIG.
 107      - On all platforms, if some other thread is doing memory allocations or
 108        deallocations between two read() calls, there is a high risk that the
 109        result of these two read() calls don't fit together, and as a
 110        consequence we will parse gargage and either omit some VMAs or return
 111        VMAs with nonsensical addresses.
 112    So use mmap(), and ignore the resulting VMA.
 113    The stack-allocated buffer cannot be too large, because this can be called
 114    when we are in the context of an alternate stack of just SIGSTKSZ bytes.  */
 115 
 116 # if defined __linux__ || defined __ANDROID__
 117   /* On Linux, if the file does not entirely fit into the buffer, the read()
 118      function stops before the line that would come out truncated.  The
 119      maximum size of such a line is 73 + PATH_MAX bytes.  To be sure that we
 120      have read everything, we must verify that at least that many bytes are
 121      left when read() returned.  */
 122 #  define MIN_LEFTOVER (73 + PATH_MAX)
 123 # else
 124 #  define MIN_LEFTOVER 1
 125 # endif
 126 
 127 # if MIN_LEFTOVER < 1024
 128 #  define STACK_ALLOCATED_BUFFER_SIZE 1024
 129 # else
 130   /* There is no point in using a stack-allocated buffer if it is too small
 131      anyway.  */
 132 #  define STACK_ALLOCATED_BUFFER_SIZE 1
 133 # endif
 134 
 135 struct rofile
 136   {
 137     size_t position;
 138     size_t filled;
 139     int eof_seen;
 140     /* These fields deal with allocation of the buffer.  */
 141     char *buffer;
 142     char *auxmap;
 143     size_t auxmap_length;
 144     uintptr_t auxmap_start;
 145     uintptr_t auxmap_end;
 146     char stack_allocated_buffer[STACK_ALLOCATED_BUFFER_SIZE];
 147   };
 148 
 149 /* Open a read-only file stream.  */
 150 static int
 151 rof_open (struct rofile *rof, const char *filename)
     /* [previous][next][first][last][top][bottom][index][help] */
 152 {
 153   int fd;
 154   uintptr_t pagesize;
 155   size_t size;
 156 
 157   fd = open (filename, O_RDONLY);
 158   if (fd < 0)
 159     return -1;
 160   rof->position = 0;
 161   rof->eof_seen = 0;
 162   /* Try the static buffer first.  */
 163   pagesize = 0;
 164   rof->buffer = rof->stack_allocated_buffer;
 165   size = sizeof (rof->stack_allocated_buffer);
 166   rof->auxmap = NULL;
 167   rof->auxmap_start = 0;
 168   rof->auxmap_end = 0;
 169   for (;;)
 170     {
 171       /* Attempt to read the contents in a single system call.  */
 172       if (size > MIN_LEFTOVER)
 173         {
 174           int n = read (fd, rof->buffer, size);
 175           if (n < 0 && errno == EINTR)
 176             goto retry;
 177 # if defined __DragonFly__
 178           if (!(n < 0 && errno == EFBIG))
 179 # endif
 180             {
 181               if (n <= 0)
 182                 /* Empty file.  */
 183                 goto fail1;
 184               if (n + MIN_LEFTOVER <= size)
 185                 {
 186                   /* The buffer was sufficiently large.  */
 187                   rof->filled = n;
 188 # if defined __linux__ || defined __ANDROID__
 189                   /* On Linux, the read() call may stop even if the buffer was
 190                      large enough.  We need the equivalent of full_read().  */
 191                   for (;;)
 192                     {
 193                       n = read (fd, rof->buffer + rof->filled, size - rof->filled);
 194                       if (n < 0 && errno == EINTR)
 195                         goto retry;
 196                       if (n < 0)
 197                         /* Some error.  */
 198                         goto fail1;
 199                       if (n + MIN_LEFTOVER > size - rof->filled)
 200                         /* Allocate a larger buffer.  */
 201                         break;
 202                       if (n == 0)
 203                         {
 204                           /* Reached the end of file.  */
 205                           close (fd);
 206                           return 0;
 207                         }
 208                       rof->filled += n;
 209                     }
 210 # else
 211                   close (fd);
 212                   return 0;
 213 # endif
 214                 }
 215             }
 216         }
 217       /* Allocate a larger buffer.  */
 218       if (pagesize == 0)
 219         {
 220           pagesize = getpagesize ();
 221           size = pagesize;
 222           while (size <= MIN_LEFTOVER)
 223             size = 2 * size;
 224         }
 225       else
 226         {
 227           size = 2 * size;
 228           if (size == 0)
 229             /* Wraparound.  */
 230             goto fail1;
 231           if (rof->auxmap != NULL)
 232             munmap (rof->auxmap, rof->auxmap_length);
 233         }
 234       rof->auxmap = (void *) mmap ((void *) 0, size, PROT_READ | PROT_WRITE,
 235                                    MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
 236       if (rof->auxmap == (void *) -1)
 237         {
 238           close (fd);
 239           return -1;
 240         }
 241       rof->auxmap_length = size;
 242       rof->auxmap_start = (uintptr_t) rof->auxmap;
 243       rof->auxmap_end = rof->auxmap_start + size;
 244       rof->buffer = (char *) rof->auxmap;
 245      retry:
 246       /* Restart.  */
 247       if (lseek (fd, 0, SEEK_SET) < 0)
 248         {
 249           close (fd);
 250           fd = open (filename, O_RDONLY);
 251           if (fd < 0)
 252             goto fail2;
 253         }
 254     }
 255  fail1:
 256   close (fd);
 257  fail2:
 258   if (rof->auxmap != NULL)
 259     munmap (rof->auxmap, rof->auxmap_length);
 260   return -1;
 261 }
 262 
 263 /* Return the next byte from a read-only file stream without consuming it,
 264    or -1 at EOF.  */
 265 static int
 266 rof_peekchar (struct rofile *rof)
     /* [previous][next][first][last][top][bottom][index][help] */
 267 {
 268   if (rof->position == rof->filled)
 269     {
 270       rof->eof_seen = 1;
 271       return -1;
 272     }
 273   return (unsigned char) rof->buffer[rof->position];
 274 }
 275 
 276 /* Return the next byte from a read-only file stream, or -1 at EOF.  */
 277 static int
 278 rof_getchar (struct rofile *rof)
     /* [previous][next][first][last][top][bottom][index][help] */
 279 {
 280   int c = rof_peekchar (rof);
 281   if (c >= 0)
 282     rof->position++;
 283   return c;
 284 }
 285 
 286 /* Parse an unsigned hexadecimal number from a read-only file stream.  */
 287 static int
 288 rof_scanf_lx (struct rofile *rof, uintptr_t *valuep)
     /* [previous][next][first][last][top][bottom][index][help] */
 289 {
 290   uintptr_t value = 0;
 291   unsigned int numdigits = 0;
 292   for (;;)
 293     {
 294       int c = rof_peekchar (rof);
 295       if (c >= '0' && c <= '9')
 296         value = (value << 4) + (c - '0');
 297       else if (c >= 'A' && c <= 'F')
 298         value = (value << 4) + (c - 'A' + 10);
 299       else if (c >= 'a' && c <= 'f')
 300         value = (value << 4) + (c - 'a' + 10);
 301       else
 302         break;
 303       rof_getchar (rof);
 304       numdigits++;
 305     }
 306   if (numdigits == 0)
 307     return -1;
 308   *valuep = value;
 309   return 0;
 310 }
 311 
 312 /* Close a read-only file stream.  */
 313 static void
 314 rof_close (struct rofile *rof)
     /* [previous][next][first][last][top][bottom][index][help] */
 315 {
 316   if (rof->auxmap != NULL)
 317     munmap (rof->auxmap, rof->auxmap_length);
 318 }
 319 
 320 #endif
 321 
 322 /* ========================== stackvma-vma-iter.c ========================== */
 323 /* Iterate through the virtual memory areas of the current process,
 324    by reading from the /proc file system.  */
 325 
 326 /* This code is a simplified copy (no handling of protection flags) of the
 327    code in gnulib's lib/vma-iter.c.  */
 328 
 329 #if defined __linux__ || defined __ANDROID__ \
 330     || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
 331     || defined __NetBSD__ \
 332     || defined __CYGWIN__
 333 
 334 /* Forward declarations.  */
 335 struct callback_locals;
 336 static int callback (struct callback_locals *locals, uintptr_t start, uintptr_t end);
 337 
 338 # if defined __linux__ || defined __ANDROID__ || (defined __FreeBSD_kernel__ && !defined __FreeBSD__) || defined __CYGWIN__
 339 /* GNU/kFreeBSD mounts /proc as linprocfs, which looks like a Linux /proc
 340    file system.  */
 341 
 342 static int
 343 vma_iterate_proc (struct callback_locals *locals)
     /* [previous][next][first][last][top][bottom][index][help] */
 344 {
 345   struct rofile rof;
 346 
 347   /* Open the current process' maps file.  It describes one VMA per line.  */
 348   if (rof_open (&rof, "/proc/self/maps") >= 0)
 349     {
 350       uintptr_t auxmap_start = rof.auxmap_start;
 351       uintptr_t auxmap_end = rof.auxmap_end;
 352 
 353       for (;;)
 354         {
 355           uintptr_t start, end;
 356           int c;
 357 
 358           /* Parse one line.  First start and end.  */
 359           if (!(rof_scanf_lx (&rof, &start) >= 0
 360                 && rof_getchar (&rof) == '-'
 361                 && rof_scanf_lx (&rof, &end) >= 0))
 362             break;
 363           while (c = rof_getchar (&rof), c != -1 && c != '\n')
 364             ;
 365 
 366           if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
 367             {
 368               /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
 369                  = [start,auxmap_start-1] u [auxmap_end,end-1].  */
 370               if (start < auxmap_start)
 371                 if (callback (locals, start, auxmap_start))
 372                   break;
 373               if (auxmap_end - 1 < end - 1)
 374                 if (callback (locals, auxmap_end, end))
 375                   break;
 376             }
 377           else
 378             {
 379               if (callback (locals, start, end))
 380                 break;
 381             }
 382         }
 383       rof_close (&rof);
 384       return 0;
 385     }
 386 
 387   return -1;
 388 }
 389 
 390 # elif defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__
 391 
 392 static int
 393 vma_iterate_proc (struct callback_locals *locals)
     /* [previous][next][first][last][top][bottom][index][help] */
 394 {
 395   struct rofile rof;
 396 
 397   /* Open the current process' maps file.  It describes one VMA per line.
 398      On FreeBSD:
 399        Cf. <https://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?annotate=HEAD>
 400      On NetBSD, there are two such files:
 401        - /proc/curproc/map in near-FreeBSD syntax,
 402        - /proc/curproc/maps in Linux syntax.
 403        Cf. <http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/miscfs/procfs/procfs_map.c?rev=HEAD> */
 404   if (rof_open (&rof, "/proc/curproc/map") >= 0)
 405     {
 406       uintptr_t auxmap_start = rof.auxmap_start;
 407       uintptr_t auxmap_end = rof.auxmap_end;
 408 
 409       for (;;)
 410         {
 411           uintptr_t start, end;
 412           int c;
 413 
 414           /* Parse one line.  First start.  */
 415           if (!(rof_getchar (&rof) == '0'
 416                 && rof_getchar (&rof) == 'x'
 417                 && rof_scanf_lx (&rof, &start) >= 0))
 418             break;
 419           while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
 420             rof_getchar (&rof);
 421           /* Then end.  */
 422           if (!(rof_getchar (&rof) == '0'
 423                 && rof_getchar (&rof) == 'x'
 424                 && rof_scanf_lx (&rof, &end) >= 0))
 425             break;
 426           while (c = rof_getchar (&rof), c != -1 && c != '\n')
 427             ;
 428 
 429           if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
 430             {
 431               /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
 432                  = [start,auxmap_start-1] u [auxmap_end,end-1].  */
 433               if (start < auxmap_start)
 434                 if (callback (locals, start, auxmap_start))
 435                   break;
 436               if (auxmap_end - 1 < end - 1)
 437                 if (callback (locals, auxmap_end, end))
 438                   break;
 439             }
 440           else
 441             {
 442               if (callback (locals, start, end))
 443                 break;
 444             }
 445         }
 446       rof_close (&rof);
 447       return 0;
 448     }
 449 
 450   return -1;
 451 }
 452 
 453 # endif
 454 
 455 # if (defined __FreeBSD_kernel__ || defined __FreeBSD__) && defined KERN_PROC_VMMAP /* FreeBSD >= 7.1 */
 456 
 457 #  include <sys/user.h> /* struct kinfo_vmentry */
 458 #  include <sys/sysctl.h> /* sysctl */
 459 
 460 static int
 461 vma_iterate_bsd (struct callback_locals *locals)
     /* [previous][next][first][last][top][bottom][index][help] */
 462 {
 463   /* Documentation: https://www.freebsd.org/cgi/man.cgi?sysctl(3)  */
 464   int info_path[] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid () };
 465   size_t len;
 466   size_t pagesize;
 467   size_t memneed;
 468   void *auxmap;
 469   unsigned long auxmap_start;
 470   unsigned long auxmap_end;
 471   char *mem;
 472   char *p;
 473   char *p_end;
 474 
 475   len = 0;
 476   if (sysctl (info_path, 4, NULL, &len, NULL, 0) < 0)
 477     return -1;
 478   /* Allow for small variations over time.  In a multithreaded program
 479      new VMAs can be allocated at any moment.  */
 480   len = 2 * len + 200;
 481   /* Allocate memneed bytes of memory.
 482      We cannot use alloca here, because not much stack space is guaranteed.
 483      We also cannot use malloc here, because a malloc() call may call mmap()
 484      and thus pre-allocate available memory.
 485      So use mmap(), and ignore the resulting VMA.  */
 486   pagesize = getpagesize ();
 487   memneed = len;
 488   memneed = ((memneed - 1) / pagesize + 1) * pagesize;
 489   auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
 490                           MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
 491   if (auxmap == (void *) -1)
 492     return -1;
 493   auxmap_start = (unsigned long) auxmap;
 494   auxmap_end = auxmap_start + memneed;
 495   mem = (char *) auxmap;
 496   if (sysctl (info_path, 4, mem, &len, NULL, 0) < 0)
 497     {
 498       munmap (auxmap, memneed);
 499       return -1;
 500     }
 501   p = mem;
 502   p_end = mem + len;
 503   while (p < p_end)
 504     {
 505       struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
 506       unsigned long start = kve->kve_start;
 507       unsigned long end = kve->kve_end;
 508       if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
 509         {
 510           /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
 511              = [start,auxmap_start-1] u [auxmap_end,end-1].  */
 512           if (start < auxmap_start)
 513             if (callback (locals, start, auxmap_start))
 514               break;
 515           if (auxmap_end - 1 < end - 1)
 516             if (callback (locals, auxmap_end, end))
 517               break;
 518         }
 519       else
 520         {
 521           if (callback (locals, start, end))
 522             break;
 523         }
 524       p += kve->kve_structsize;
 525     }
 526   munmap (auxmap, memneed);
 527   return 0;
 528 }
 529 
 530 # else
 531 
 532 #  define vma_iterate_bsd(locals) (-1)
 533 
 534 # endif
 535 
 536 
 537 /* Iterate over the virtual memory areas of the current process.
 538    If such iteration is supported, the callback is called once for every
 539    virtual memory area, in ascending order, with the following arguments:
 540      - LOCALS is the same argument as passed to vma_iterate.
 541      - START is the address of the first byte in the area, page-aligned.
 542      - END is the address of the last byte in the area plus 1, page-aligned.
 543        Note that it may be 0 for the last area in the address space.
 544    If the callback returns 0, the iteration continues.  If it returns 1,
 545    the iteration terminates prematurely.
 546    This function may open file descriptors, but does not call malloc().
 547    Return 0 if all went well, or -1 in case of error.  */
 548 static int
 549 vma_iterate (struct callback_locals *locals)
     /* [previous][next][first][last][top][bottom][index][help] */
 550 {
 551 # if defined __FreeBSD__
 552   /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
 553      function vma_iterate_proc does not return the virtual memory areas that
 554      were created by anonymous mmap.  See
 555      <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
 556      So use vma_iterate_proc only as a fallback.  */
 557   int retval = vma_iterate_bsd (locals);
 558   if (retval == 0)
 559       return 0;
 560 
 561   return vma_iterate_proc (locals);
 562 # else
 563   /* On the other platforms, try the /proc approach first, and the sysctl()
 564      as a fallback.  */
 565   int retval = vma_iterate_proc (locals);
 566   if (retval == 0)
 567       return 0;
 568 
 569   return vma_iterate_bsd (locals);
 570 # endif
 571 }
 572 
 573 #endif
 574 
 575 /* =========================== stackvma-mincore.c =========================== */
 576 
 577 /* mincore() is a system call that allows to inquire the status of a
 578    range of pages of virtual memory.  In particular, it allows to inquire
 579    whether a page is mapped at all (except on Mac OS X, where mincore
 580    returns 0 even for unmapped addresses).
 581    As of 2006, mincore() is supported by:        possible bits:
 582      - Linux,   since Linux 2.4 and glibc 2.2,   1
 583      - Solaris, since Solaris 9,                 1
 584      - MacOS X, since MacOS X 10.3 (at least),   1
 585      - FreeBSD, since FreeBSD 6.0,               MINCORE_{INCORE,REFERENCED,MODIFIED}
 586      - NetBSD,  since NetBSD 3.0 (at least),     1
 587      - OpenBSD, since OpenBSD 2.6 (at least),    1
 588      - AIX,     since AIX 5.3,                   1
 589    As of 2019, also on
 590      - Hurd.
 591    However, while the API allows to easily determine the bounds of mapped
 592    virtual memory, it does not make it easy to find the bounds of _unmapped_
 593    virtual memory ranges.  We try to work around this, but it may still be
 594    slow.  */
 595 
 596 #if defined __linux__ || defined __ANDROID__ \
 597     || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
 598     || defined __NetBSD__ /* || defined __OpenBSD__ */ \
 599     /* || (defined __APPLE__ && defined __MACH__) */ \
 600     || defined _AIX || defined __sun
 601 
 602 # include <unistd.h> /* getpagesize, mincore */
 603 # include <sys/types.h>
 604 # include <sys/mman.h> /* mincore */
 605 
 606 /* The AIX declaration of mincore() uses 'caddr_t', whereas the other platforms
 607    use 'void *'. */
 608 # ifdef _AIX
 609 typedef caddr_t MINCORE_ADDR_T;
 610 # else
 611 typedef void* MINCORE_ADDR_T;
 612 # endif
 613 
 614 /* The glibc and musl declaration of mincore() uses 'unsigned char *', whereas
 615    the BSD declaration uses 'char *'.  */
 616 # if __GLIBC__ >= 2 || defined __linux__ || defined __ANDROID__
 617 typedef unsigned char pageinfo_t;
 618 # else
 619 typedef char pageinfo_t;
 620 # endif
 621 
 622 /* Cache for getpagesize().  */
 623 static uintptr_t pagesize;
 624 
 625 /* Initialize pagesize.  */
 626 static void
 627 init_pagesize (void)
     /* [previous][next][first][last][top][bottom][index][help] */
 628 {
 629   pagesize = getpagesize ();
 630 }
 631 
 632 /* Test whether the page starting at ADDR is among the address range.
 633    ADDR must be a multiple of pagesize.  */
 634 static int
 635 is_mapped (uintptr_t addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 636 {
 637   pageinfo_t vec[1];
 638   return mincore ((MINCORE_ADDR_T) addr, pagesize, vec) >= 0;
 639 }
 640 
 641 /* Assuming that the page starting at ADDR is among the address range,
 642    return the start of its virtual memory range.
 643    ADDR must be a multiple of pagesize.  */
 644 static uintptr_t
 645 mapped_range_start (uintptr_t addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 646 {
 647   /* Use a moderately sized VEC here, small enough that it fits on the stack
 648      (without requiring malloc).  */
 649   pageinfo_t vec[1024];
 650   uintptr_t stepsize = sizeof (vec);
 651 
 652   for (;;)
 653     {
 654       uintptr_t max_remaining;
 655 
 656       if (addr == 0)
 657         return addr;
 658 
 659       max_remaining = addr / pagesize;
 660       if (stepsize > max_remaining)
 661         stepsize = max_remaining;
 662       if (mincore ((MINCORE_ADDR_T) (addr - stepsize * pagesize),
 663                    stepsize * pagesize, vec) < 0)
 664         /* Time to search in smaller steps.  */
 665         break;
 666       /* The entire range exists.  Continue searching in large steps.  */
 667       addr -= stepsize * pagesize;
 668     }
 669   for (;;)
 670     {
 671       uintptr_t halfstepsize1;
 672       uintptr_t halfstepsize2;
 673 
 674       if (stepsize == 1)
 675         return addr;
 676 
 677       /* Here we know that less than stepsize pages exist starting at addr.  */
 678       halfstepsize1 = (stepsize + 1) / 2;
 679       halfstepsize2 = stepsize / 2;
 680       /* halfstepsize1 + halfstepsize2 = stepsize.  */
 681 
 682       if (mincore ((MINCORE_ADDR_T) (addr - halfstepsize1 * pagesize),
 683                    halfstepsize1 * pagesize, vec) < 0)
 684         stepsize = halfstepsize1;
 685       else
 686         {
 687           addr -= halfstepsize1 * pagesize;
 688           stepsize = halfstepsize2;
 689         }
 690     }
 691 }
 692 
 693 /* Assuming that the page starting at ADDR is among the address range,
 694    return the end of its virtual memory range + 1.
 695    ADDR must be a multiple of pagesize.  */
 696 static uintptr_t
 697 mapped_range_end (uintptr_t addr)
     /* [previous][next][first][last][top][bottom][index][help] */
 698 {
 699   /* Use a moderately sized VEC here, small enough that it fits on the stack
 700      (without requiring malloc).  */
 701   pageinfo_t vec[1024];
 702   uintptr_t stepsize = sizeof (vec);
 703 
 704   addr += pagesize;
 705   for (;;)
 706     {
 707       uintptr_t max_remaining;
 708 
 709       if (addr == 0) /* wrapped around? */
 710         return addr;
 711 
 712       max_remaining = (- addr) / pagesize;
 713       if (stepsize > max_remaining)
 714         stepsize = max_remaining;
 715       if (mincore ((MINCORE_ADDR_T) addr, stepsize * pagesize, vec) < 0)
 716         /* Time to search in smaller steps.  */
 717         break;
 718       /* The entire range exists.  Continue searching in large steps.  */
 719       addr += stepsize * pagesize;
 720     }
 721   for (;;)
 722     {
 723       uintptr_t halfstepsize1;
 724       uintptr_t halfstepsize2;
 725 
 726       if (stepsize == 1)
 727         return addr;
 728 
 729       /* Here we know that less than stepsize pages exist starting at addr.  */
 730       halfstepsize1 = (stepsize + 1) / 2;
 731       halfstepsize2 = stepsize / 2;
 732       /* halfstepsize1 + halfstepsize2 = stepsize.  */
 733 
 734       if (mincore ((MINCORE_ADDR_T) addr, halfstepsize1 * pagesize, vec) < 0)
 735         stepsize = halfstepsize1;
 736       else
 737         {
 738           addr += halfstepsize1 * pagesize;
 739           stepsize = halfstepsize2;
 740         }
 741     }
 742 }
 743 
 744 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
 745    ADDR1 must be <= ADDR2.  */
 746 static int
 747 is_unmapped (uintptr_t addr1, uintptr_t addr2)
     /* [previous][next][first][last][top][bottom][index][help] */
 748 {
 749   uintptr_t count;
 750   uintptr_t stepsize;
 751 
 752   /* Round addr1 down.  */
 753   addr1 = (addr1 / pagesize) * pagesize;
 754   /* Round addr2 up and turn it into an exclusive bound.  */
 755   addr2 = ((addr2 / pagesize) + 1) * pagesize;
 756 
 757   /* This is slow: mincore() does not provide a way to determine the bounds
 758      of the gaps directly.  So we have to use mincore() on individual pages
 759      over and over again.  Only after we've verified that all pages are
 760      unmapped, we know that the range is completely unmapped.
 761      If we were to traverse the pages from bottom to top or from top to bottom,
 762      it would be slow even in the average case.  To speed up the search, we
 763      exploit the fact that mapped memory ranges are larger than one page on
 764      average, therefore we have good chances of hitting a mapped area if we
 765      traverse only every second, or only fourth page, etc.  This doesn't
 766      decrease the worst-case runtime, only the average runtime.  */
 767   count = (addr2 - addr1) / pagesize;
 768   /* We have to test is_mapped (addr1 + i * pagesize) for 0 <= i < count.  */
 769   for (stepsize = 1; stepsize < count; )
 770     stepsize = 2 * stepsize;
 771   for (;;)
 772     {
 773       uintptr_t addr_stepsize;
 774       uintptr_t i;
 775       uintptr_t addr;
 776 
 777       stepsize = stepsize / 2;
 778       if (stepsize == 0)
 779         break;
 780       addr_stepsize = stepsize * pagesize;
 781       for (i = stepsize, addr = addr1 + addr_stepsize;
 782            i < count;
 783            i += 2 * stepsize, addr += 2 * addr_stepsize)
 784         /* Here addr = addr1 + i * pagesize.  */
 785         if (is_mapped (addr))
 786           return 0;
 787     }
 788   return 1;
 789 }
 790 
 791 # if STACK_DIRECTION < 0
 792 
 793 /* Info about the gap between this VMA and the previous one.
 794    addr must be < vma->start.  */
 795 static int
 796 mincore_is_near_this (uintptr_t addr, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 797 {
 798   /*   vma->start - addr <= (vma->start - vma->prev_end) / 2
 799      is mathematically equivalent to
 800        vma->prev_end <= 2 * addr - vma->start
 801      <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
 802      But be careful about overflow: if 2 * addr - vma->start is negative,
 803      we consider a tiny "guard page" mapping [0, 0] to be present around
 804      NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
 805      therefore return false.  */
 806   uintptr_t testaddr = addr - (vma->start - addr);
 807   if (testaddr > addr) /* overflow? */
 808     return 0;
 809   /* Here testaddr <= addr < vma->start.  */
 810   return is_unmapped (testaddr, vma->start - 1);
 811 }
 812 
 813 # endif
 814 # if STACK_DIRECTION > 0
 815 
 816 /* Info about the gap between this VMA and the next one.
 817    addr must be > vma->end - 1.  */
 818 static int
 819 mincore_is_near_this (uintptr_t addr, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 820 {
 821   /*   addr - vma->end < (vma->next_start - vma->end) / 2
 822      is mathematically equivalent to
 823        vma->next_start > 2 * addr - vma->end
 824      <==> is_unmapped (vma->end, 2 * addr - vma->end).
 825      But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
 826      we consider a tiny "guard page" mapping [0, 0] to be present around
 827      NULL; it intersects the range (vma->end, 2 * addr - vma->end),
 828      therefore return false.  */
 829   uintptr_t testaddr = addr + (addr - vma->end);
 830   if (testaddr < addr) /* overflow? */
 831     return 0;
 832   /* Here vma->end - 1 < addr <= testaddr.  */
 833   return is_unmapped (vma->end, testaddr);
 834 }
 835 
 836 # endif
 837 
 838 static int
 839 mincore_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 840 {
 841   if (pagesize == 0)
 842     init_pagesize ();
 843   address = (address / pagesize) * pagesize;
 844   vma->start = mapped_range_start (address);
 845   vma->end = mapped_range_end (address);
 846   vma->is_near_this = mincore_is_near_this;
 847   return 0;
 848 }
 849 
 850 #endif
 851 
 852 /* ========================================================================== */
 853 
 854 /* ---------------------------- stackvma-linux.c ---------------------------- */
 855 
 856 #if defined __linux__ || defined __ANDROID__ /* Linux */
 857 
 858 struct callback_locals
 859 {
 860   uintptr_t address;
 861   struct vma_struct *vma;
 862 # if STACK_DIRECTION < 0
 863   uintptr_t prev;
 864 # else
 865   int stop_at_next_vma;
 866 # endif
 867   int retval;
 868 };
 869 
 870 static int
 871 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
     /* [previous][next][first][last][top][bottom][index][help] */
 872 {
 873 # if STACK_DIRECTION < 0
 874   if (locals->address >= start && locals->address <= end - 1)
 875     {
 876       locals->vma->start = start;
 877       locals->vma->end = end;
 878       locals->vma->prev_end = locals->prev;
 879       locals->retval = 0;
 880       return 1;
 881     }
 882   locals->prev = end;
 883 # else
 884   if (locals->stop_at_next_vma)
 885     {
 886       locals->vma->next_start = start;
 887       locals->stop_at_next_vma = 0;
 888       return 1;
 889     }
 890   if (locals->address >= start && locals->address <= end - 1)
 891     {
 892       locals->vma->start = start;
 893       locals->vma->end = end;
 894       locals->retval = 0;
 895       locals->stop_at_next_vma = 1;
 896       return 0;
 897     }
 898 # endif
 899   return 0;
 900 }
 901 
 902 int
 903 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 904 {
 905   struct callback_locals locals;
 906   locals.address = address;
 907   locals.vma = vma;
 908 # if STACK_DIRECTION < 0
 909   locals.prev = 0;
 910 # else
 911   locals.stop_at_next_vma = 0;
 912 # endif
 913   locals.retval = -1;
 914 
 915   vma_iterate (&locals);
 916   if (locals.retval == 0)
 917     {
 918 # if !(STACK_DIRECTION < 0)
 919       if (locals.stop_at_next_vma)
 920         vma->next_start = 0;
 921 # endif
 922       vma->is_near_this = simple_is_near_this;
 923       return 0;
 924     }
 925 
 926   return mincore_get_vma (address, vma);
 927 }
 928 
 929 /* --------------------------- stackvma-freebsd.c --------------------------- */
 930 
 931 #elif defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ /* GNU/kFreeBSD, FreeBSD */
 932 
 933 struct callback_locals
 934 {
 935   uintptr_t address;
 936   struct vma_struct *vma;
 937   /* The stack appears as multiple adjacents segments, therefore we
 938      merge adjacent segments.  */
 939   uintptr_t curr_start, curr_end;
 940 # if STACK_DIRECTION < 0
 941   uintptr_t prev_end;
 942 # else
 943   int stop_at_next_vma;
 944 # endif
 945   int retval;
 946 };
 947 
 948 static int
 949 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
     /* [previous][next][first][last][top][bottom][index][help] */
 950 {
 951   if (start == locals->curr_end)
 952     {
 953       /* Merge adjacent segments.  */
 954       locals->curr_end = end;
 955       return 0;
 956     }
 957 # if STACK_DIRECTION < 0
 958   if (locals->curr_start < locals->curr_end
 959       && locals->address >= locals->curr_start
 960       && locals->address <= locals->curr_end - 1)
 961     {
 962       locals->vma->start = locals->curr_start;
 963       locals->vma->end = locals->curr_end;
 964       locals->vma->prev_end = locals->prev_end;
 965       locals->retval = 0;
 966       return 1;
 967     }
 968   locals->prev_end = locals->curr_end;
 969 # else
 970   if (locals->stop_at_next_vma)
 971     {
 972       locals->vma->next_start = locals->curr_start;
 973       locals->stop_at_next_vma = 0;
 974       return 1;
 975     }
 976   if (locals->curr_start < locals->curr_end
 977       && locals->address >= locals->curr_start
 978       && locals->address <= locals->curr_end - 1)
 979     {
 980       locals->vma->start = locals->curr_start;
 981       locals->vma->end = locals->curr_end;
 982       locals->retval = 0;
 983       locals->stop_at_next_vma = 1;
 984       return 0;
 985     }
 986 # endif
 987   locals->curr_start = start; locals->curr_end = end;
 988   return 0;
 989 }
 990 
 991 int
 992 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
 993 {
 994   struct callback_locals locals;
 995   locals.address = address;
 996   locals.vma = vma;
 997   locals.curr_start = 0;
 998   locals.curr_end = 0;
 999 # if STACK_DIRECTION < 0
1000   locals.prev_end = 0;
1001 # else
1002   locals.stop_at_next_vma = 0;
1003 # endif
1004   locals.retval = -1;
1005 
1006   vma_iterate (&locals);
1007   if (locals.retval < 0)
1008     {
1009       if (locals.curr_start < locals.curr_end
1010           && address >= locals.curr_start && address <= locals.curr_end - 1)
1011         {
1012           vma->start = locals.curr_start;
1013           vma->end = locals.curr_end;
1014 # if STACK_DIRECTION < 0
1015           vma->prev_end = locals.prev_end;
1016 # else
1017           vma->next_start = 0;
1018 # endif
1019           locals.retval = 0;
1020         }
1021     }
1022   if (locals.retval == 0)
1023     {
1024 # if !(STACK_DIRECTION < 0)
1025       if (locals.stop_at_next_vma)
1026         vma->next_start = 0;
1027 # endif
1028       vma->is_near_this = simple_is_near_this;
1029       return 0;
1030     }
1031 
1032   /* FreeBSD 6.[01] doesn't allow to distinguish unmapped pages from
1033      mapped but swapped-out pages.  See whether it's fixed.  */
1034   if (!is_mapped (0))
1035     /* OK, mincore() appears to work as expected.  */
1036     return mincore_get_vma (address, vma);
1037   return -1;
1038 }
1039 
1040 /* --------------------------- stackvma-netbsd.c --------------------------- */
1041 
1042 #elif defined __NetBSD__ /* NetBSD */
1043 
1044 struct callback_locals
1045 {
1046   uintptr_t address;
1047   struct vma_struct *vma;
1048   /* The stack appears as multiple adjacents segments, therefore we
1049      merge adjacent segments.  */
1050   uintptr_t curr_start, curr_end;
1051 # if STACK_DIRECTION < 0
1052   uintptr_t prev_end;
1053 # else
1054   int stop_at_next_vma;
1055 # endif
1056   int retval;
1057 };
1058 
1059 static int
1060 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
     /* [previous][next][first][last][top][bottom][index][help] */
1061 {
1062   if (start == locals->curr_end)
1063     {
1064       /* Merge adjacent segments.  */
1065       locals->curr_end = end;
1066       return 0;
1067     }
1068 # if STACK_DIRECTION < 0
1069   if (locals->curr_start < locals->curr_end
1070       && locals->address >= locals->curr_start
1071       && locals->address <= locals->curr_end - 1)
1072     {
1073       locals->vma->start = locals->curr_start;
1074       locals->vma->end = locals->curr_end;
1075       locals->vma->prev_end = locals->prev_end;
1076       locals->retval = 0;
1077       return 1;
1078     }
1079   locals->prev_end = locals->curr_end;
1080 # else
1081   if (locals->stop_at_next_vma)
1082     {
1083       locals->vma->next_start = locals->curr_start;
1084       locals->stop_at_next_vma = 0;
1085       return 1;
1086     }
1087   if (locals->curr_start < locals->curr_end
1088       && locals->address >= locals->curr_start
1089       && locals->address <= locals->curr_end - 1)
1090     {
1091       locals->vma->start = locals->curr_start;
1092       locals->vma->end = locals->curr_end;
1093       locals->retval = 0;
1094       locals->stop_at_next_vma = 1;
1095       return 0;
1096     }
1097 # endif
1098   locals->curr_start = start; locals->curr_end = end;
1099   return 0;
1100 }
1101 
1102 int
1103 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
1104 {
1105   struct callback_locals locals;
1106   locals.address = address;
1107   locals.vma = vma;
1108   locals.curr_start = 0;
1109   locals.curr_end = 0;
1110 # if STACK_DIRECTION < 0
1111   locals.prev_end = 0;
1112 # else
1113   locals.stop_at_next_vma = 0;
1114 # endif
1115   locals.retval = -1;
1116 
1117   vma_iterate (&locals);
1118   if (locals.retval < 0)
1119     {
1120       if (locals.curr_start < locals.curr_end
1121           && address >= locals.curr_start && address <= locals.curr_end - 1)
1122         {
1123           vma->start = locals.curr_start;
1124           vma->end = locals.curr_end;
1125 # if STACK_DIRECTION < 0
1126           vma->prev_end = locals.prev_end;
1127 # else
1128           vma->next_start = 0;
1129 # endif
1130           locals.retval = 0;
1131         }
1132     }
1133   if (locals.retval == 0)
1134     {
1135 # if !(STACK_DIRECTION < 0)
1136       if (locals.stop_at_next_vma)
1137         vma->next_start = 0;
1138 # endif
1139       vma->is_near_this = simple_is_near_this;
1140       return 0;
1141     }
1142 
1143   return mincore_get_vma (address, vma);
1144 }
1145 
1146 /* --------------------------- stackvma-mquery.c --------------------------- */
1147 
1148 /* mquery() is a system call that allows to inquire the status of a
1149    range of pages of virtual memory.  In particular, it allows to inquire
1150    whether a page is mapped at all, and where is the next unmapped page
1151    after a given address.
1152    As of 2021, mquery() is supported by:
1153      - OpenBSD, since OpenBSD 3.4.
1154    Note that this file can give different results.  For example, on
1155    OpenBSD 4.4 / i386 the stack segment (which starts around 0xcdbfe000)
1156    ends at 0xcfbfdfff according to mincore, but at 0xffffffff according to
1157    mquery.  */
1158 
1159 #elif defined __OpenBSD__ /* OpenBSD */
1160 
1161 # include <unistd.h> /* getpagesize, mincore */
1162 # include <sys/types.h>
1163 # include <sys/mman.h> /* mincore */
1164 
1165 /* Cache for getpagesize().  */
1166 static uintptr_t pagesize;
1167 
1168 /* Initialize pagesize.  */
1169 static void
1170 init_pagesize (void)
     /* [previous][next][first][last][top][bottom][index][help] */
1171 {
1172   pagesize = getpagesize ();
1173 }
1174 
1175 /* Test whether the page starting at ADDR is among the address range.
1176    ADDR must be a multiple of pagesize.  */
1177 static int
1178 is_mapped (uintptr_t addr)
     /* [previous][next][first][last][top][bottom][index][help] */
1179 {
1180   /* Avoid calling mquery with a NULL first argument, because this argument
1181      value has a specific meaning.  We know the NULL page is unmapped.  */
1182   if (addr == 0)
1183     return 0;
1184   return mquery ((void *) addr, pagesize, 0, MAP_FIXED, -1, 0) == (void *) -1;
1185 }
1186 
1187 /* Assuming that the page starting at ADDR is among the address range,
1188    return the start of its virtual memory range.
1189    ADDR must be a multiple of pagesize.  */
1190 static uintptr_t
1191 mapped_range_start (uintptr_t addr)
     /* [previous][next][first][last][top][bottom][index][help] */
1192 {
1193   uintptr_t stepsize;
1194   uintptr_t known_unmapped_page;
1195 
1196   /* Look at smaller addresses, in larger and larger steps, to minimize the
1197      number of mquery() calls.  */
1198   stepsize = pagesize;
1199   for (;;)
1200     {
1201       uintptr_t hole;
1202 
1203       if (addr == 0)
1204         abort ();
1205 
1206       if (addr <= stepsize)
1207         {
1208           known_unmapped_page = 0;
1209           break;
1210         }
1211 
1212       hole = (uintptr_t) mquery ((void *) (addr - stepsize), pagesize,
1213                                      0, 0, -1, 0);
1214       if (!(hole == (uintptr_t) (void *) -1 || hole >= addr))
1215         {
1216           /* Some part of [addr - stepsize, addr - 1] is unmapped.  */
1217           known_unmapped_page = hole;
1218           break;
1219         }
1220 
1221       /* The entire range [addr - stepsize, addr - 1] is mapped.  */
1222       addr -= stepsize;
1223 
1224       if (2 * stepsize > stepsize && 2 * stepsize < addr)
1225         stepsize = 2 * stepsize;
1226     }
1227 
1228   /* Now reduce the step size again.
1229      We know that the page at known_unmapped_page is unmapped and that
1230      0 < addr - known_unmapped_page <= stepsize.  */
1231   while (stepsize > pagesize && stepsize / 2 >= addr - known_unmapped_page)
1232     stepsize = stepsize / 2;
1233   /* Still 0 < addr - known_unmapped_page <= stepsize.  */
1234   while (stepsize > pagesize)
1235     {
1236       uintptr_t hole;
1237 
1238       stepsize = stepsize / 2;
1239       hole = (uintptr_t) mquery ((void *) (addr - stepsize), pagesize,
1240                                      0, 0, -1, 0);
1241       if (!(hole == (uintptr_t) (void *) -1 || hole >= addr))
1242         /* Some part of [addr - stepsize, addr - 1] is unmapped.  */
1243         known_unmapped_page = hole;
1244       else
1245         /* The entire range [addr - stepsize, addr - 1] is mapped.  */
1246         addr -= stepsize;
1247       /* Still 0 < addr - known_unmapped_page <= stepsize.  */
1248     }
1249 
1250   return addr;
1251 }
1252 
1253 /* Assuming that the page starting at ADDR is among the address range,
1254    return the end of its virtual memory range + 1.
1255    ADDR must be a multiple of pagesize.  */
1256 static uintptr_t
1257 mapped_range_end (uintptr_t addr)
     /* [previous][next][first][last][top][bottom][index][help] */
1258 {
1259   uintptr_t end;
1260 
1261   if (addr == 0)
1262     abort ();
1263 
1264   end = (uintptr_t) mquery ((void *) addr, pagesize, 0, 0, -1, 0);
1265   if (end == (uintptr_t) (void *) -1)
1266     end = 0; /* wrap around */
1267   return end;
1268 }
1269 
1270 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
1271    ADDR1 must be <= ADDR2.  */
1272 static int
1273 is_unmapped (uintptr_t addr1, uintptr_t addr2)
     /* [previous][next][first][last][top][bottom][index][help] */
1274 {
1275   /* Round addr1 down.  */
1276   addr1 = (addr1 / pagesize) * pagesize;
1277   /* Round addr2 up and turn it into an exclusive bound.  */
1278   addr2 = ((addr2 / pagesize) + 1) * pagesize;
1279 
1280   /* Avoid calling mquery with a NULL first argument, because this argument
1281      value has a specific meaning.  We know the NULL page is unmapped.  */
1282   if (addr1 == 0)
1283     addr1 = pagesize;
1284 
1285   if (addr1 < addr2)
1286     {
1287       if (mquery ((void *) addr1, addr2 - addr1, 0, MAP_FIXED, -1, 0)
1288           == (void *) -1)
1289         /* Not all the interval [addr1 .. addr2 - 1] is unmapped.  */
1290         return 0;
1291       else
1292         /* The interval [addr1 .. addr2 - 1] is unmapped.  */
1293         return 1;
1294     }
1295   return 1;
1296 }
1297 
1298 # if STACK_DIRECTION < 0
1299 
1300 /* Info about the gap between this VMA and the previous one.
1301    addr must be < vma->start.  */
1302 static int
1303 mquery_is_near_this (uintptr_t addr, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
1304 {
1305   /*   vma->start - addr <= (vma->start - vma->prev_end) / 2
1306      is mathematically equivalent to
1307        vma->prev_end <= 2 * addr - vma->start
1308      <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
1309      But be careful about overflow: if 2 * addr - vma->start is negative,
1310      we consider a tiny "guard page" mapping [0, 0] to be present around
1311      NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
1312      therefore return false.  */
1313   uintptr_t testaddr = addr - (vma->start - addr);
1314   if (testaddr > addr) /* overflow? */
1315     return 0;
1316   /* Here testaddr <= addr < vma->start.  */
1317   return is_unmapped (testaddr, vma->start - 1);
1318 }
1319 
1320 # endif
1321 # if STACK_DIRECTION > 0
1322 
1323 /* Info about the gap between this VMA and the next one.
1324    addr must be > vma->end - 1.  */
1325 static int
1326 mquery_is_near_this (uintptr_t addr, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
1327 {
1328   /*   addr - vma->end < (vma->next_start - vma->end) / 2
1329      is mathematically equivalent to
1330        vma->next_start > 2 * addr - vma->end
1331      <==> is_unmapped (vma->end, 2 * addr - vma->end).
1332      But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
1333      we consider a tiny "guard page" mapping [0, 0] to be present around
1334      NULL; it intersects the range (vma->end, 2 * addr - vma->end),
1335      therefore return false.  */
1336   uintptr_t testaddr = addr + (addr - vma->end);
1337   if (testaddr < addr) /* overflow? */
1338     return 0;
1339   /* Here vma->end - 1 < addr <= testaddr.  */
1340   return is_unmapped (vma->end, testaddr);
1341 }
1342 
1343 # endif
1344 
1345 int
1346 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
1347 {
1348   if (pagesize == 0)
1349     init_pagesize ();
1350   address = (address / pagesize) * pagesize;
1351   vma->start = mapped_range_start (address);
1352   vma->end = mapped_range_end (address);
1353   vma->is_near_this = mquery_is_near_this;
1354   return 0;
1355 }
1356 
1357 /* ---------------------------- stackvma-mach.c ---------------------------- */
1358 
1359 #elif (defined __APPLE__ && defined __MACH__) /* macOS */
1360 
1361 #include <libc.h>
1362 #include <nlist.h>
1363 #include <mach/mach.h>
1364 #include <mach/machine/vm_param.h>
1365 
1366 int
1367 sigsegv_get_vma (uintptr_t req_address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
1368 {
1369   uintptr_t prev_address = 0, prev_size = 0;
1370   uintptr_t join_address = 0, join_size = 0;
1371   int more = 1;
1372   vm_address_t address;
1373   vm_size_t size;
1374   task_t task = mach_task_self ();
1375 
1376   for (address = VM_MIN_ADDRESS; more; address += size)
1377     {
1378       mach_port_t object_name;
1379       /* In MacOS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
1380          32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
1381          mach_vm_address_t and mach_vm_size_t are always 64 bits large.
1382          MacOS X 10.5 has three vm_region like methods:
1383            - vm_region. It has arguments that depend on whether the current
1384              process is 32-bit or 64-bit. When linking dynamically, this
1385              function exists only in 32-bit processes. Therefore we use it only
1386              in 32-bit processes.
1387            - vm_region_64. It has arguments that depend on whether the current
1388              process is 32-bit or 64-bit. It interprets a flavor
1389              VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
1390              dangerous since 'struct vm_region_basic_info_64' is larger than
1391              'struct vm_region_basic_info'; therefore let's write
1392              VM_REGION_BASIC_INFO_64 explicitly.
1393            - mach_vm_region. It has arguments that are 64-bit always. This
1394              function is useful when you want to access the VM of a process
1395              other than the current process.
1396          In 64-bit processes, we could use vm_region_64 or mach_vm_region.
1397          I choose vm_region_64 because it uses the same types as vm_region,
1398          resulting in less conditional code.  */
1399 # if defined __aarch64__ || defined __ppc64__ || defined __x86_64__
1400       struct vm_region_basic_info_64 info;
1401       mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
1402 
1403       more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
1404                             (vm_region_info_t)&info, &info_count, &object_name)
1405               == KERN_SUCCESS);
1406 # else
1407       struct vm_region_basic_info info;
1408       mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
1409 
1410       more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
1411                          (vm_region_info_t)&info, &info_count, &object_name)
1412               == KERN_SUCCESS);
1413 # endif
1414       if (!more)
1415         {
1416           address = join_address + join_size;
1417           size = 0;
1418         }
1419 
1420       if ((uintptr_t) address == join_address + join_size)
1421         join_size += size;
1422       else
1423         {
1424           prev_address = join_address;
1425           prev_size = join_size;
1426           join_address = (uintptr_t) address;
1427           join_size = size;
1428         }
1429 
1430       if (object_name != MACH_PORT_NULL)
1431         mach_port_deallocate (mach_task_self (), object_name);
1432 
1433 # if STACK_DIRECTION < 0
1434       if (join_address <= req_address && join_address + join_size > req_address)
1435         {
1436           vma->start = join_address;
1437           vma->end = join_address + join_size;
1438           vma->prev_end = prev_address + prev_size;
1439           vma->is_near_this = simple_is_near_this;
1440           return 0;
1441         }
1442 # else
1443       if (prev_address <= req_address && prev_address + prev_size > req_address)
1444         {
1445           vma->start = prev_address;
1446           vma->end = prev_address + prev_size;
1447           vma->next_start = join_address;
1448           vma->is_near_this = simple_is_near_this;
1449           return 0;
1450         }
1451 # endif
1452     }
1453 
1454 # if STACK_DIRECTION > 0
1455   if (join_address <= req_address && join_address + size > req_address)
1456     {
1457       vma->start = prev_address;
1458       vma->end = prev_address + prev_size;
1459       vma->next_start = ~0UL;
1460       vma->is_near_this = simple_is_near_this;
1461       return 0;
1462     }
1463 # endif
1464 
1465   return -1;
1466 }
1467 
1468 /* -------------------------------------------------------------------------- */
1469 
1470 #elif defined _AIX /* AIX */
1471 
1472 int
1473 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
1474 {
1475   return mincore_get_vma (address, vma);
1476 }
1477 
1478 /* --------------------------- stackvma-procfs.h --------------------------- */
1479 
1480 #elif defined __sgi || defined __sun /* IRIX, Solaris */
1481 
1482 # include <errno.h> /* errno, EINTR */
1483 # include <fcntl.h> /* open, O_RDONLY */
1484 # include <stddef.h> /* size_t */
1485 # include <unistd.h> /* getpagesize, getpid, read, close */
1486 # include <sys/types.h>
1487 # include <sys/mman.h> /* mmap, munmap */
1488 # include <sys/stat.h> /* fstat */
1489 # include <string.h> /* memcpy */
1490 
1491 /* Try to use the newer ("structured") /proc filesystem API, if supported.  */
1492 # define _STRUCTURED_PROC 1
1493 # include <sys/procfs.h> /* prmap_t, optionally PIOC* */
1494 
1495 # if !defined __sun
1496 
1497 /* Cache for getpagesize().  */
1498 static uintptr_t pagesize;
1499 
1500 /* Initialize pagesize.  */
1501 static void
1502 init_pagesize (void)
     /* [previous][next][first][last][top][bottom][index][help] */
1503 {
1504   pagesize = getpagesize ();
1505 }
1506 
1507 # endif
1508 
1509 struct callback_locals
1510 {
1511   uintptr_t address;
1512   struct vma_struct *vma;
1513 # if STACK_DIRECTION < 0
1514   uintptr_t prev;
1515 # else
1516   int stop_at_next_vma;
1517 # endif
1518   int retval;
1519 };
1520 
1521 static int
1522 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
     /* [previous][next][first][last][top][bottom][index][help] */
1523 {
1524 # if STACK_DIRECTION < 0
1525   if (locals->address >= start && locals->address <= end - 1)
1526     {
1527       locals->vma->start = start;
1528       locals->vma->end = end;
1529       locals->vma->prev_end = locals->prev;
1530       locals->retval = 0;
1531       return 1;
1532     }
1533   locals->prev = end;
1534 # else
1535   if (locals->stop_at_next_vma)
1536     {
1537       locals->vma->next_start = start;
1538       locals->stop_at_next_vma = 0;
1539       return 1;
1540     }
1541   if (locals->address >= start && locals->address <= end - 1)
1542     {
1543       locals->vma->start = start;
1544       locals->vma->end = end;
1545       locals->retval = 0;
1546       locals->stop_at_next_vma = 1;
1547       return 0;
1548     }
1549 # endif
1550   return 0;
1551 }
1552 
1553 /* Iterate over the virtual memory areas of the current process.
1554    If such iteration is supported, the callback is called once for every
1555    virtual memory area, in ascending order, with the following arguments:
1556      - LOCALS is the same argument as passed to vma_iterate.
1557      - START is the address of the first byte in the area, page-aligned.
1558      - END is the address of the last byte in the area plus 1, page-aligned.
1559        Note that it may be 0 for the last area in the address space.
1560    If the callback returns 0, the iteration continues.  If it returns 1,
1561    the iteration terminates prematurely.
1562    This function may open file descriptors, but does not call malloc().
1563    Return 0 if all went well, or -1 in case of error.  */
1564 /* This code is a simplified copy (no handling of protection flags) of the
1565    code in gnulib's lib/vma-iter.c.  */
1566 static int
1567 vma_iterate (struct callback_locals *locals)
     /* [previous][next][first][last][top][bottom][index][help] */
1568 {
1569   /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
1570      _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
1571                                   32-bit   64-bit
1572          _STRUCTURED_PROC = 0       32       56
1573          _STRUCTURED_PROC = 1       96      104
1574      Therefore, if the include files provide the newer API, prmap_t has
1575      the bigger size, and thus you MUST use the newer API.  And if the
1576      include files provide the older API, prmap_t has the smaller size,
1577      and thus you MUST use the older API.  */
1578 
1579 # if defined PIOCNMAP && defined PIOCMAP
1580   /* We must use the older /proc interface.  */
1581 
1582   char fnamebuf[6+10+1];
1583   char *fname;
1584   int fd;
1585   int nmaps;
1586   size_t memneed;
1587 #  if HAVE_MAP_ANONYMOUS
1588 #   define zero_fd -1
1589 #   define map_flags MAP_ANONYMOUS
1590 #  else /* !HAVE_MAP_ANONYMOUS */
1591   int zero_fd;
1592 #   define map_flags 0
1593 #  endif
1594   void *auxmap;
1595   uintptr_t auxmap_start;
1596   uintptr_t auxmap_end;
1597   prmap_t* maps;
1598   prmap_t* mp;
1599 
1600   if (pagesize == 0)
1601     init_pagesize ();
1602 
1603   /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()).  */
1604   fname = fnamebuf + sizeof (fnamebuf) - 1;
1605   *fname = '\0';
1606   {
1607     unsigned int value = getpid ();
1608     do
1609       *--fname = (value % 10) + '0';
1610     while ((value = value / 10) > 0);
1611   }
1612   fname -= 6;
1613   memcpy (fname, "/proc/", 6);
1614 
1615   fd = open (fname, O_RDONLY);
1616   if (fd < 0)
1617     return -1;
1618 
1619   if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
1620     goto fail2;
1621 
1622   memneed = (nmaps + 10) * sizeof (prmap_t);
1623   /* Allocate memneed bytes of memory.
1624      We cannot use alloca here, because not much stack space is guaranteed.
1625      We also cannot use malloc here, because a malloc() call may call mmap()
1626      and thus pre-allocate available memory.
1627      So use mmap(), and ignore the resulting VMA.  */
1628   memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1629 #  if !HAVE_MAP_ANONYMOUS
1630   zero_fd = open ("/dev/zero", O_RDONLY, 0644);
1631   if (zero_fd < 0)
1632     goto fail2;
1633 #  endif
1634   auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1635                           map_flags | MAP_PRIVATE, zero_fd, 0);
1636 #  if !HAVE_MAP_ANONYMOUS
1637   close (zero_fd);
1638 #  endif
1639   if (auxmap == (void *) -1)
1640     goto fail2;
1641   auxmap_start = (uintptr_t) auxmap;
1642   auxmap_end = auxmap_start + memneed;
1643   maps = (prmap_t *) auxmap;
1644 
1645   if (ioctl (fd, PIOCMAP, maps) < 0)
1646     goto fail1;
1647 
1648   for (mp = maps;;)
1649     {
1650       uintptr_t start, end;
1651 
1652       start = (uintptr_t) mp->pr_vaddr;
1653       end = start + mp->pr_size;
1654       if (start == 0 && end == 0)
1655         break;
1656       mp++;
1657       if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1658         {
1659           /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1660              = [start,auxmap_start-1] u [auxmap_end,end-1].  */
1661           if (start < auxmap_start)
1662             if (callback (locals, start, auxmap_start))
1663               break;
1664           if (auxmap_end - 1 < end - 1)
1665             if (callback (locals, auxmap_end, end))
1666               break;
1667         }
1668       else
1669         {
1670           if (callback (locals, start, end))
1671             break;
1672         }
1673     }
1674   munmap (auxmap, memneed);
1675   close (fd);
1676   return 0;
1677 
1678  fail1:
1679   munmap (auxmap, memneed);
1680  fail2:
1681   close (fd);
1682   return -1;
1683 
1684 # else
1685   /* We must use the newer /proc interface.
1686      Documentation:
1687      https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
1688      The contents of /proc/<pid>/map consists of records of type
1689      prmap_t.  These are different in 32-bit and 64-bit processes,
1690      but here we are fortunately accessing only the current process.  */
1691 
1692   char fnamebuf[6+10+4+1];
1693   char *fname;
1694   int fd;
1695   int nmaps;
1696   size_t memneed;
1697 #  if HAVE_MAP_ANONYMOUS
1698 #   define zero_fd -1
1699 #   define map_flags MAP_ANONYMOUS
1700 #  else /* !HAVE_MAP_ANONYMOUS */
1701   int zero_fd;
1702 #   define map_flags 0
1703 #  endif
1704   void *auxmap;
1705   uintptr_t auxmap_start;
1706   uintptr_t auxmap_end;
1707   prmap_t* maps;
1708   prmap_t* maps_end;
1709   prmap_t* mp;
1710 
1711   if (pagesize == 0)
1712     init_pagesize ();
1713 
1714   /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()).  */
1715   fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
1716   memcpy (fname, "/map", 4 + 1);
1717   {
1718     unsigned int value = getpid ();
1719     do
1720       *--fname = (value % 10) + '0';
1721     while ((value = value / 10) > 0);
1722   }
1723   fname -= 6;
1724   memcpy (fname, "/proc/", 6);
1725 
1726   fd = open (fname, O_RDONLY);
1727   if (fd < 0)
1728     return -1;
1729 
1730   {
1731     struct stat statbuf;
1732     if (fstat (fd, &statbuf) < 0)
1733       goto fail2;
1734     nmaps = statbuf.st_size / sizeof (prmap_t);
1735   }
1736 
1737   memneed = (nmaps + 10) * sizeof (prmap_t);
1738   /* Allocate memneed bytes of memory.
1739      We cannot use alloca here, because not much stack space is guaranteed.
1740      We also cannot use malloc here, because a malloc() call may call mmap()
1741      and thus pre-allocate available memory.
1742      So use mmap(), and ignore the resulting VMA.  */
1743   memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1744 #  if !HAVE_MAP_ANONYMOUS
1745   zero_fd = open ("/dev/zero", O_RDONLY, 0644);
1746   if (zero_fd < 0)
1747     goto fail2;
1748 #  endif
1749   auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1750                           map_flags | MAP_PRIVATE, zero_fd, 0);
1751 #  if !HAVE_MAP_ANONYMOUS
1752   close (zero_fd);
1753 #  endif
1754   if (auxmap == (void *) -1)
1755     goto fail2;
1756   auxmap_start = (uintptr_t) auxmap;
1757   auxmap_end = auxmap_start + memneed;
1758   maps = (prmap_t *) auxmap;
1759 
1760   /* Read up to memneed bytes from fd into maps.  */
1761   {
1762     size_t remaining = memneed;
1763     size_t total_read = 0;
1764     char *ptr = (char *) maps;
1765 
1766     do
1767       {
1768         size_t nread = read (fd, ptr, remaining);
1769         if (nread == (size_t)-1)
1770           {
1771             if (errno == EINTR)
1772               continue;
1773             goto fail1;
1774           }
1775         if (nread == 0)
1776           /* EOF */
1777           break;
1778         total_read += nread;
1779         ptr += nread;
1780         remaining -= nread;
1781       }
1782     while (remaining > 0);
1783 
1784     nmaps = (memneed - remaining) / sizeof (prmap_t);
1785     maps_end = maps + nmaps;
1786   }
1787 
1788   for (mp = maps; mp < maps_end; mp++)
1789     {
1790       uintptr_t start, end;
1791 
1792       start = (uintptr_t) mp->pr_vaddr;
1793       end = start + mp->pr_size;
1794       if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1795         {
1796           /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1797              = [start,auxmap_start-1] u [auxmap_end,end-1].  */
1798           if (start < auxmap_start)
1799             if (callback (locals, start, auxmap_start))
1800               break;
1801           if (auxmap_end - 1 < end - 1)
1802             if (callback (locals, auxmap_end, end))
1803               break;
1804         }
1805       else
1806         {
1807           if (callback (locals, start, end))
1808             break;
1809         }
1810     }
1811   munmap (auxmap, memneed);
1812   close (fd);
1813   return 0;
1814 
1815  fail1:
1816   munmap (auxmap, memneed);
1817  fail2:
1818   close (fd);
1819   return -1;
1820 
1821 # endif
1822 }
1823 
1824 int
1825 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
1826 {
1827   struct callback_locals locals;
1828   locals.address = address;
1829   locals.vma = vma;
1830 # if STACK_DIRECTION < 0
1831   locals.prev = 0;
1832 # else
1833   locals.stop_at_next_vma = 0;
1834 # endif
1835   locals.retval = -1;
1836 
1837   vma_iterate (&locals);
1838   if (locals.retval == 0)
1839     {
1840 # if !(STACK_DIRECTION < 0)
1841       if (locals.stop_at_next_vma)
1842         vma->next_start = 0;
1843 # endif
1844       vma->is_near_this = simple_is_near_this;
1845       return 0;
1846     }
1847 
1848 # if defined __sun
1849   return mincore_get_vma (address, vma);
1850 # else
1851   return -1;
1852 # endif
1853 }
1854 
1855 /* -------------------------------------------------------------------------- */
1856 
1857 #elif defined __CYGWIN__ /* Cygwin */
1858 
1859 struct callback_locals
1860 {
1861   uintptr_t address;
1862   struct vma_struct *vma;
1863   /* The stack appears as three adjacents segments, therefore we
1864      merge adjacent segments.  */
1865   uintptr_t curr_start, curr_end;
1866 # if STACK_DIRECTION < 0
1867   uintptr_t prev_end;
1868 # else
1869   int stop_at_next_vma;
1870 # endif
1871   int retval;
1872 };
1873 
1874 static int
1875 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
     /* [previous][next][first][last][top][bottom][index][help] */
1876 {
1877   if (start == locals->curr_end)
1878     {
1879       /* Merge adjacent segments.  */
1880       locals->curr_end = end;
1881       return 0;
1882     }
1883 # if STACK_DIRECTION < 0
1884   if (locals->curr_start < locals->curr_end
1885       && locals->address >= locals->curr_start
1886       && locals->address <= locals->curr_end - 1)
1887     {
1888       locals->vma->start = locals->curr_start;
1889       locals->vma->end = locals->curr_end;
1890       locals->vma->prev_end = locals->prev_end;
1891       locals->retval = 0;
1892       return 1;
1893     }
1894   locals->prev_end = locals->curr_end;
1895 # else
1896   if (locals->stop_at_next_vma)
1897     {
1898       locals->vma->next_start = locals->curr_start;
1899       locals->stop_at_next_vma = 0;
1900       return 1;
1901     }
1902   if (locals->curr_start < locals->curr_end
1903       && locals->address >= locals->curr_start
1904       && locals->address <= locals->curr_end - 1)
1905     {
1906       locals->vma->start = locals->curr_start;
1907       locals->vma->end = locals->curr_end;
1908       locals->retval = 0;
1909       locals->stop_at_next_vma = 1;
1910       return 0;
1911     }
1912 # endif
1913   locals->curr_start = start; locals->curr_end = end;
1914   return 0;
1915 }
1916 
1917 int
1918 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
1919 {
1920   struct callback_locals locals;
1921   locals.address = address;
1922   locals.vma = vma;
1923   locals.curr_start = 0;
1924   locals.curr_end = 0;
1925 # if STACK_DIRECTION < 0
1926   locals.prev_end = 0;
1927 # else
1928   locals.stop_at_next_vma = 0;
1929 # endif
1930   locals.retval = -1;
1931 
1932   vma_iterate (&locals);
1933   if (locals.retval < 0)
1934     {
1935       if (locals.curr_start < locals.curr_end
1936           && address >= locals.curr_start && address <= locals.curr_end - 1)
1937         {
1938           vma->start = locals.curr_start;
1939           vma->end = locals.curr_end;
1940 # if STACK_DIRECTION < 0
1941           vma->prev_end = locals.prev_end;
1942 # else
1943           vma->next_start = 0;
1944 # endif
1945           locals.retval = 0;
1946         }
1947     }
1948   if (locals.retval == 0)
1949     {
1950 # if !(STACK_DIRECTION < 0)
1951       if (locals.stop_at_next_vma)
1952         vma->next_start = 0;
1953 # endif
1954       vma->is_near_this = simple_is_near_this;
1955       return 0;
1956     }
1957 
1958   return -1;
1959 }
1960 
1961 /* ---------------------------- stackvma-beos.h ---------------------------- */
1962 
1963 #elif defined __HAIKU__ /* Haiku */
1964 
1965 # include <OS.h> /* get_next_area_info */
1966 
1967 struct callback_locals
1968 {
1969   uintptr_t address;
1970   struct vma_struct *vma;
1971 # if STACK_DIRECTION < 0
1972   uintptr_t prev;
1973 # else
1974   int stop_at_next_vma;
1975 # endif
1976   int retval;
1977 };
1978 
1979 static int
1980 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
     /* [previous][next][first][last][top][bottom][index][help] */
1981 {
1982 # if STACK_DIRECTION < 0
1983   if (locals->address >= start && locals->address <= end - 1)
1984     {
1985       locals->vma->start = start;
1986       locals->vma->end = end;
1987       locals->vma->prev_end = locals->prev;
1988       locals->retval = 0;
1989       return 1;
1990     }
1991   locals->prev = end;
1992 # else
1993   if (locals->stop_at_next_vma)
1994     {
1995       locals->vma->next_start = start;
1996       locals->stop_at_next_vma = 0;
1997       return 1;
1998     }
1999   if (locals->address >= start && locals->address <= end - 1)
2000     {
2001       locals->vma->start = start;
2002       locals->vma->end = end;
2003       locals->retval = 0;
2004       locals->stop_at_next_vma = 1;
2005       return 0;
2006     }
2007 # endif
2008   return 0;
2009 }
2010 
2011 /* Iterate over the virtual memory areas of the current process.
2012    If such iteration is supported, the callback is called once for every
2013    virtual memory area, in ascending order, with the following arguments:
2014      - LOCALS is the same argument as passed to vma_iterate.
2015      - START is the address of the first byte in the area, page-aligned.
2016      - END is the address of the last byte in the area plus 1, page-aligned.
2017        Note that it may be 0 for the last area in the address space.
2018    If the callback returns 0, the iteration continues.  If it returns 1,
2019    the iteration terminates prematurely.
2020    This function may open file descriptors, but does not call malloc().
2021    Return 0 if all went well, or -1 in case of error.  */
2022 /* This code is a simplified copy (no handling of protection flags) of the
2023    code in gnulib's lib/vma-iter.c.  */
2024 static int
2025 vma_iterate (struct callback_locals *locals)
     /* [previous][next][first][last][top][bottom][index][help] */
2026 {
2027   area_info info;
2028   ssize_t cookie;
2029 
2030   cookie = 0;
2031   while (get_next_area_info (0, &cookie, &info) == B_OK)
2032     {
2033       uintptr_t start, end;
2034 
2035       start = (uintptr_t) info.address;
2036       end = start + info.size;
2037 
2038       if (callback (locals, start, end))
2039         break;
2040     }
2041   return 0;
2042 }
2043 
2044 int
2045 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
2046 {
2047   struct callback_locals locals;
2048   locals.address = address;
2049   locals.vma = vma;
2050 # if STACK_DIRECTION < 0
2051   locals.prev = 0;
2052 # else
2053   locals.stop_at_next_vma = 0;
2054 # endif
2055   locals.retval = -1;
2056 
2057   vma_iterate (&locals);
2058   if (locals.retval == 0)
2059     {
2060 # if !(STACK_DIRECTION < 0)
2061       if (locals.stop_at_next_vma)
2062         vma->next_start = 0;
2063 # endif
2064       vma->is_near_this = simple_is_near_this;
2065       return 0;
2066     }
2067   return -1;
2068 }
2069 
2070 /* -------------------------------------------------------------------------- */
2071 
2072 #else /* Hurd, Minix, ... */
2073 
2074 int
2075 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
     /* [previous][next][first][last][top][bottom][index][help] */
2076 {
2077   /* No way.  */
2078   return -1;
2079 }
2080 
2081 #endif

/* [previous][next][first][last][top][bottom][index][help] */