root/maint/gnulib/lib/ssfmalloc.h

/* [previous][next][first][last][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. compare_pages_by_free_space
  2. page_free_space_is_at_least
  3. set_free_space
  4. flush_all_updates
  5. add_update
  6. drop_update
  7. small_block_page_available_bitmap
  8. small_block_page_blockend_bitmap
  9. init_small_block_page_pool
  10. init_small_block_page
  11. allocate_small_block_in_page
  12. free_small_block_in_page
  13. init_medium_block_page_pool
  14. init_medium_block_page
  15. allocate_medium_block_in_page
  16. free_medium_block_in_page
  17. allocate_block_from_pool
  18. free_block_from_pool
  19. gl_lock_define_initialized
  20. allocate_block
  21. free_block

   1 /* Simple and straight-forward malloc implementation (front end).
   2 
   3    Copyright (C) 2020-2021 Free Software Foundation, Inc.
   4 
   5    This file is free software: you can redistribute it and/or modify
   6    it under the terms of the GNU Lesser General Public License as
   7    published by the Free Software Foundation; either version 2.1 of the
   8    License, or (at your option) any later version.
   9 
  10    This file is distributed in the hope that it will be useful,
  11    but WITHOUT ANY WARRANTY; without even the implied warranty of
  12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13    GNU Lesser General Public License for more details.
  14 
  15    You should have received a copy of the GNU Lesser General Public License
  16    along with this program.  If not, see <https://www.gnu.org/licenses/>.  */
  17 
  18 /* Written by Bruno Haible <bruno@clisp.org>, 2020.  */
  19 
  20 /* This file implements an allocator of memory blocks of given size (a
  21    "malloc front end"), based on an allocator of memory pages (a "malloc
  22    back end").
  23 
  24    The need for such an allocator arises because a memory block is often
  25    50 bytes large or less, whereas an allocator of memory pages provides
  26    entire pages (4096 bytes or more).
  27 
  28    This implementation attempts to be
  29      - simple and straight-forward,
  30      - respecting locality of reference,
  31      - usable for small allocations,
  32      - nevertheless of reasonable speed.
  33 
  34    Simple and straight-forward - means that it contains only a small amount
  35    of code (compared to e.g. tcmalloc).
  36 
  37    Respecting locality of reference - means that searching for a free block
  38    will not follow lists of pointers that touch unrelated cache lines in
  39    the same page or even unrelated memory pages, because that would cause
  40    cache misses or even swapping in of unrelated memory pages.
  41 
  42    Usable for small allocations - means that it can be used for data types
  43    with few instances.  It does not, unlike some other malloc implementations,
  44    allocate 256 KB of memory up-front.  Nor does it allocate memory pages
  45    per thread.
  46 
  47    Reasonable speed is nevertheless guaranteed by
  48      - choosing algorithms that lead to little fragmentation,
  49      - small caches where they make sense.
  50  */
  51 
  52 /* The user of this file needs to define the following macros before including
  53    this file:
  54 
  55      PAGESIZE           A variable-like macro of type intptr_t or uintptr_t
  56                         that evaluates to the memory page size (>= 4096).
  57 
  58      PAGESIZE_MAX       A constant that specifies an upper bound for PAGESIZE.
  59 
  60      ALLOC_PAGES        A function-like macro with the signature
  61                           uintptr_t ALLOC_PAGES (size_t size)
  62                         where the argument size is > 0 and a multiple of the
  63                         PAGESIZE.  It returns a multiple of PAGESIZE, or 0
  64                         upon failure.
  65 
  66      FREE_PAGES         A function-like macro with the signature
  67                           void FREE_PAGES (uintptr_t pages, size_t size)
  68                         where pages is a non-zero value returned by
  69                         ALLOC_PAGES (size).
  70 
  71      ALIGNMENT          A constant that specifies the desired alignment of all
  72                         the returned memory blocks.  Possible values are the
  73                         powers of 2, from sizeof (void *) to 32.
  74 
  75      PAGE_RESERVED_HEADER_SIZE   A constant, either 0 or a multiple of
  76                         sizeof (void *), that denotes the size of a reserved
  77                         header - not to be used by the application - at the
  78                         beginning of a page sequence returned by ALLOC_PAGES.
  79  */
  80 
  81 /* =================== Declarations of exported functions =================== */
  82 
  83 #include <stdint.h>
  84 
  85 /* Allocates a block of memory, aligned to ALIGNMENT bytes.
  86    Returns 0 upon failure.  */
  87 static uintptr_t allocate_block (size_t size);
  88 
  89 /* Frees a block of memory, returned by allocate_block.  */
  90 static void free_block (uintptr_t block);
  91 
  92 /* ============================= Implementation ============================= */
  93 
  94 /* Outline of the implementation decisions (ID):
  95 
  96    * ID: This implementation considers three types of blocks:
  97      - small blocks - these are allocated in "small block" pages.
  98      - medium blocks - these are allocated in "medium block" pages.
  99      - large blocks - these are allocated individually, with a page or
 100        sequence of pages uniquely for this block.
 101    * Rationale:
 102      - Most memory allocations are small (e.g. <= 32 bytes); this is a lesson
 103        learned from xc/programs/Xserver/os/xalloc.c (1997) [Pascal Haible].
 104      - Fragmentation is one of the biggest problems, and keeping large
 105        blocks and small blocks separate from medium blocks is one way to
 106        control it.
 107 
 108    * ID: If an allocation succeeds in one page, the next allocation (of the
 109      same type of block) will try to use the same page.
 110    * Rationale: Locality of reference.
 111 
 112    * ID: Pages of small or medium blocks have their management data structures
 113      concentrated at the beginning of the page.  No chained lists that force
 114      to walk through the page.
 115    * Rationale: Locality of reference.
 116 
 117    * ID: Across pages, the management of the free space is done through data
 118      structures outside the pages.  No chained lists across pages.
 119    * Rationale: Locality of reference.
 120 
 121  */
 122 
 123 #include <stdlib.h>
 124 #include <string.h>     /* ffsll */
 125 #include <strings.h>    /* ffs */
 126 #include "flexmember.h"
 127 #include "glthread/lock.h"
 128 #include "thread-optim.h"
 129 #include "gl_oset.h"
 130 #include "gl_rbtree_oset.h"
 131 
 132 /* Help the branch prediction.  */
 133 #if __GNUC__ >= 3
 134 # define likely(cond)   __builtin_expect ((cond), 1)
 135 # define unlikely(cond) __builtin_expect ((cond), 0)
 136 #else
 137 # define likely(cond)   (cond)
 138 # define unlikely(cond) (cond)
 139 #endif
 140 
 141 enum { small_page_type = 1, medium_page_type = 2, large_page_type = 3 };
 142 
 143 /* Header of a page of small or medium blocks or of a large block.
 144    Lies at an address that is a multiple of PAGESIZE.  */
 145 struct any_page_header
 146 {
 147   #if PAGE_RESERVED_HEADER_SIZE > 0
 148   void * reserved[PAGE_RESERVED_HEADER_SIZE / sizeof (void *)];
 149   #endif
 150   /* small_page_type or medium_page_type or large_page_type */
 151   unsigned char page_type;
 152 };
 153 
 154 /* ========================= Small and medium blocks ======================== */
 155 
 156 /* An integer type, capable of holding the values 0 .. PAGESIZE.  */
 157 #if PAGESIZE_MAX >= 0x10000
 158 typedef unsigned int   pg_offset_t;
 159 #else
 160 typedef unsigned short pg_offset_t;
 161 #endif
 162 
 163 /* Tree element that corresponds to a page.
 164    These tree elements are allocated via malloc().  */
 165 struct page_tree_element
 166 {
 167   uintptr_t page;
 168   pg_offset_t free_space;
 169 };
 170 
 171 /* Header of a page of small or medium blocks.
 172    Lies at an address that is a multiple of PAGESIZE.  */
 173 struct dissected_page_header
 174 {
 175   struct any_page_header common;
 176   /* Amount of free space in this page.  Always a multiple of ALIGNMENT.  */
 177   pg_offset_t free_space;
 178   /* The tree element.  */
 179   struct page_tree_element *tree_element;
 180 };
 181 
 182 /* Data structure for managing a pool of pages.  */
 183 struct page_pool
 184 {
 185   /* Methods.  */
 186   void (*init_page_pool) (struct page_pool *pool);
 187   void (*init_page) (uintptr_t page);
 188   uintptr_t (*allocate_block_in_page) (size_t size, uintptr_t page);
 189   void (*free_block_in_page) (uintptr_t block, uintptr_t page);
 190 
 191   /* Maximum free space in a page of this pool.  */
 192   size_t page_capacity;
 193 
 194   /* Page that provided the last successful allocation from this pool,
 195      or 0.  */
 196   uintptr_t last_page;
 197 
 198   /* Ordered set of managed pages, sorted according to free_space, in ascending
 199      order.  */
 200   gl_oset_t /* <struct page_tree_element *> */ managed_pages;
 201 
 202   /* A queue of pages which have a modified free_space but which have not been
 203      updated in the managed_pages tree so far.  */
 204   #define UPDATE_QUEUE_SIZE 10
 205   unsigned int update_queue_count; /* <= UPDATE_QUEUE_SIZE */
 206   uintptr_t update_queue[UPDATE_QUEUE_SIZE];
 207 
 208   /* A page that could be freed.
 209      We don't free it immediately, so that on allocation/deallocation
 210      pattern like
 211        2x allocate, 2x free, 2x allocate, 2x free, 2x allocate, 2x free, ...
 212      will not allocate and free a page so frequently.  */
 213   uintptr_t freeable_page;
 214 };
 215 
 216 /* Comparison function for managed_pages.  */
 217 static int
 218 compare_pages_by_free_space (const void *elt1, const void *elt2)
     /* [previous][next][first][last][top][bottom][index][help] */
 219 {
 220   struct page_tree_element *element1 = (struct page_tree_element *) elt1;
 221   struct page_tree_element *element2 = (struct page_tree_element *) elt2;
 222   int cmp = _GL_CMP (element1->free_space, element2->free_space);
 223   if (unlikely (cmp == 0))
 224     cmp = _GL_CMP (element1->page, element2->page);
 225   return cmp;
 226 }
 227 
 228 /* Tests whether the free space in a tree element is greater or equal than the
 229    given threshold.  */
 230 static bool
 231 page_free_space_is_at_least (const void *elt, const void *threshold)
     /* [previous][next][first][last][top][bottom][index][help] */
 232 {
 233   struct page_tree_element *element = (struct page_tree_element *) elt;
 234 
 235   return element->free_space >= (uintptr_t) threshold;
 236 }
 237 
 238 /* Updates the free space of a 'struct page_tree_element *'.
 239    Only to be called through gl_oset_update!  */
 240 static void
 241 set_free_space (const void *elt, void *action_data)
     /* [previous][next][first][last][top][bottom][index][help] */
 242 {
 243   struct page_tree_element *element = (struct page_tree_element *) elt;
 244 
 245   element->free_space = (pg_offset_t) (uintptr_t) action_data;
 246 }
 247 
 248 /* Executes the pending updates in the managed_pages tree.  */
 249 static void
 250 flush_all_updates (struct page_pool *pool)
     /* [previous][next][first][last][top][bottom][index][help] */
 251 {
 252   size_t count = pool->update_queue_count;
 253   while (likely (count > 0))
 254     {
 255       --count;
 256       uintptr_t page = pool->update_queue[count];
 257       struct dissected_page_header *pageptr =
 258         (struct dissected_page_header *) page;
 259       struct page_tree_element *tree_element = pageptr->tree_element;
 260       if (gl_oset_update (pool->managed_pages, tree_element,
 261                           set_free_space,
 262                           (void *) (uintptr_t) pageptr->free_space)
 263           < 0)
 264         /* A collision was found.  This contradicts the definition of
 265            compare_pages_by_free_space.  */
 266         abort ();
 267     }
 268   pool->update_queue_count = 0;
 269 }
 270 
 271 /* Adds a page to the update queue.
 272    This function has to be called when the free_space of the page has
 273    changed.  */
 274 static inline void
 275 add_update (uintptr_t page, struct page_pool *pool)
     /* [previous][next][first][last][top][bottom][index][help] */
 276 {
 277   size_t count = pool->update_queue_count;
 278   size_t i;
 279   for (i = 0; i < count; i++)
 280     if (pool->update_queue[i] == page)
 281       /* It's already in the queue.  */
 282       return;
 283 
 284   /* Ensure there is room for adding one more page to the update queue.  */
 285   if (unlikely (count == UPDATE_QUEUE_SIZE))
 286     flush_all_updates (pool);
 287 
 288   /* Add it to the update queue.  */
 289   pool->update_queue[pool->update_queue_count++] = page;
 290 }
 291 
 292 /* Drops a page from the update queue.  */
 293 static inline void
 294 drop_update (uintptr_t page, struct page_pool *pool)
     /* [previous][next][first][last][top][bottom][index][help] */
 295 {
 296   size_t count = pool->update_queue_count;
 297   size_t i;
 298   for (i = 0; i < count; i++)
 299     if (pool->update_queue[i] == page)
 300       {
 301         /* It's in the queue.  Remove it.  */
 302         for (i = i + 1; i < count; i++)
 303           pool->update_queue[i - 1] = pool->update_queue[i];
 304         pool->update_queue_count--;
 305         return;
 306       }
 307 }
 308 
 309 /* ============================== Small blocks ============================== */
 310 
 311 #include "ssfmalloc-bitmap.h"
 312 
 313 /* Maximum size of a small block.
 314    Must be a power of 2.  */
 315 #define SMALL_BLOCK_MAX_SIZE (ALIGNMENT < 8 ? 32 * ALIGNMENT : 256)
 316 
 317 /* Number of rows of ALIGNMENT bytes available in an empty page.  */
 318 static unsigned int small_block_page_num_bits;
 319 /* Offset in the page where the memory blocks start.
 320    A multiple of ALIGNMENT.  */
 321 static unsigned int small_block_page_blocks_start;
 322 /* Number of uint32_t words in each of the two bitmaps.  */
 323 static unsigned int small_block_page_num_bitmap_words;
 324 
 325 /* Header of a page of small blocks.
 326    Lies at an address that is a multiple of PAGESIZE.  */
 327 struct small_page_header
 328 {
 329   struct dissected_page_header common;
 330   /* Two bitmaps, each with small_block_page_num_bitmap_words. In each a bit
 331      represents ALIGNMENT bytes.
 332        - available_bitmap: bit set means available, bit clear means allocated.
 333        - blockend_bitmap: bit set means the an allocated block ends here.  */
 334   uint32_t bitmap_words[FLEXIBLE_ARRAY_MEMBER];
 335 };
 336 
 337 static inline uint32_t *
 338 small_block_page_available_bitmap (struct small_page_header *pageptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 339 {
 340   return &pageptr->bitmap_words[0];
 341 }
 342 
 343 static inline uint32_t *
 344 small_block_page_blockend_bitmap (struct small_page_header *pageptr)
     /* [previous][next][first][last][top][bottom][index][help] */
 345 {
 346   return &pageptr->bitmap_words[small_block_page_num_bitmap_words];
 347 }
 348 
 349 static void
 350 init_small_block_page_pool (struct page_pool *pool)
     /* [previous][next][first][last][top][bottom][index][help] */
 351 {
 352   /* How many usable rows of ALIGNMENT bytes can we have?
 353      Each takes ALIGNMENT bytes + 1/8 byte in each bitmap, so approximately
 354      (ALIGNMENT + 1/4) bytes.  */
 355   unsigned int num_bits = (unsigned int) (4 * PAGESIZE) / (4 * ALIGNMENT + 1);
 356   unsigned int num_bitmap_words;
 357   unsigned int blocks_start;
 358   /* Iterate until it converges.  */
 359   for (;;)
 360     {
 361       num_bitmap_words = (num_bits + 32 - 1) / 32;
 362       blocks_start =
 363         (FLEXSIZEOF (struct small_page_header, bitmap_words,
 364                      2 * num_bitmap_words * sizeof (uint32_t))
 365          + ALIGNMENT - 1) & -ALIGNMENT;
 366       unsigned int num_bits_r = (unsigned int) (PAGESIZE - blocks_start) / ALIGNMENT;
 367       if (num_bits_r >= num_bits)
 368         break;
 369       num_bits = num_bits_r;
 370     }
 371 
 372   small_block_page_num_bits         = num_bits;
 373   small_block_page_num_bitmap_words = num_bitmap_words;
 374   small_block_page_blocks_start     = blocks_start;
 375 
 376   pool->page_capacity = small_block_page_num_bits * ALIGNMENT;
 377 }
 378 
 379 static void
 380 init_small_block_page (uintptr_t page)
     /* [previous][next][first][last][top][bottom][index][help] */
 381 {
 382   struct small_page_header *pageptr = (struct small_page_header *) page;
 383   pageptr->common.common.page_type = small_page_type;
 384 
 385   /* Initialize available_bitmap.  */
 386   uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
 387   init_bitmap_all_bits_set (small_block_page_num_bitmap_words,
 388                             available_bitmap);
 389   if ((small_block_page_num_bits % 32) != 0)
 390     available_bitmap[small_block_page_num_bitmap_words - 1] =
 391       (1U << (small_block_page_num_bits % 32)) - 1;
 392 
 393   /* Initialize blockend_bitmap.  */
 394   init_bitmap_all_bits_clear (small_block_page_num_bitmap_words,
 395                               small_block_page_blockend_bitmap (pageptr));
 396 
 397   pageptr->common.free_space = small_block_page_num_bits * ALIGNMENT;
 398 }
 399 
 400 /* Allocates a block of memory of size <= SMALL_BLOCK_MAX_SIZE,
 401    aligned to ALIGNMENT bytes, from the given page.
 402    Returns 0 upon failure.  */
 403 static uintptr_t
 404 allocate_small_block_in_page (size_t size, uintptr_t page)
     /* [previous][next][first][last][top][bottom][index][help] */
 405 {
 406   struct small_page_header *pageptr = (struct small_page_header *) page;
 407 
 408   /* glibc compatible.  */
 409   if (size == 0)
 410     size = 1;
 411 
 412   /* Number of consecutive bits to look for in the bitmap.  */
 413   size_t c = (size + ALIGNMENT - 1) / ALIGNMENT;
 414 
 415   /* SMALL_BLOCK_MAX_SIZE has been chosen so that c <= 32.  */
 416   if (!(c > 0 && c <= 32))
 417     abort ();
 418 
 419   uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
 420   size_t k = find_first_packet_set (small_block_page_num_bitmap_words,
 421                                     available_bitmap,
 422                                     c);
 423   if (unlikely (k == (size_t)(-1)))
 424     /* Failed to find c consecutive available rows of ALIGNMENT bytes each.  */
 425     return 0;
 426 
 427   uint32_t *blockend_bitmap = small_block_page_blockend_bitmap (pageptr);
 428   size_t j = k / 32;
 429   size_t i = k % 32;
 430   if (i + c <= 32)
 431     {
 432       available_bitmap[j] &= ~(((2U << (c - 1)) - 1) << i);
 433       blockend_bitmap[j] |= (1U << (i + c - 1));
 434     }
 435   else
 436     {
 437       available_bitmap[j] &= ~(-1U << i);
 438       available_bitmap[j + 1] &= ~((1U << (i + c - 32)) - 1);
 439       blockend_bitmap[j + 1] |= (1U << (i + c - 1 - 32));
 440     }
 441 
 442   pageptr->common.free_space -= c * ALIGNMENT;
 443 
 444   return page + small_block_page_blocks_start + k * ALIGNMENT;
 445 }
 446 
 447 static void
 448 free_small_block_in_page (uintptr_t block, uintptr_t page)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450   struct small_page_header *pageptr = (struct small_page_header *) page;
 451 
 452   if (!(block >= page + small_block_page_blocks_start
 453         && (block % ALIGNMENT) == 0))
 454     /* Invalid argument.  */
 455     abort ();
 456 
 457   uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
 458   uint32_t *blockend_bitmap = small_block_page_blockend_bitmap (pageptr);
 459 
 460   /* The bit that corresponds to where the block starts.  */
 461   size_t k = (block - page - small_block_page_blocks_start) / ALIGNMENT;
 462   /* The bit that corresponds to where the block ends.  */
 463   size_t ke = find_first_bit_set (small_block_page_num_bitmap_words,
 464                                   blockend_bitmap,
 465                                   k);
 466   if (/* ke == (size_t)(-1) || */ ke >= k + 32)
 467     /* Invalid argument or invalid state.  */
 468     abort ();
 469 
 470   /* Number of consecutive bits to manipulate in the bitmap.  */
 471   size_t c = ke - k + 1;
 472 
 473   size_t j = k / 32;
 474   size_t i = k % 32;
 475   if (i + c <= 32)
 476     {
 477       available_bitmap[j] |= (((2U << (c - 1)) - 1) << i);
 478       blockend_bitmap[j] &= ~(1U << (i + c - 1));
 479     }
 480   else
 481     {
 482       available_bitmap[j] |= (-1U << i);
 483       available_bitmap[j + 1] |= ((1U << (i + c - 32)) - 1);
 484       blockend_bitmap[j + 1] &= ~(1U << (i + c - 1 - 32));
 485     }
 486 
 487   pageptr->common.free_space += c * ALIGNMENT;
 488 }
 489 
 490 /* Management of pages of small blocks.  */
 491 struct page_pool small_block_pages =
 492   {
 493     init_small_block_page_pool,
 494     init_small_block_page,
 495     allocate_small_block_in_page,
 496     free_small_block_in_page
 497   };
 498 
 499 /* ============================== Medium blocks ============================= */
 500 
 501 /* A range of memory in a page.
 502    It covers the address range [page+start, page+end).
 503    start <= end.  */
 504 struct memory_range
 505 {
 506   pg_offset_t start;
 507   pg_offset_t end;
 508 };
 509 
 510 /* Header of a page of medium blocks.
 511    Lies at an address that is a multiple of PAGESIZE.  */
 512 struct medium_page_header
 513 {
 514   struct dissected_page_header common;
 515   /* If n blocks are allocated, there are n+1 gaps before, between, and
 516      after them.  Keep them in an array, sorted in ascending order.  */
 517   unsigned int num_gaps; /* > 0 */
 518   struct memory_range gaps[FLEXIBLE_ARRAY_MEMBER /* PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1 */];
 519 };
 520 
 521 #define MEDIUM_BLOCKS_PAGE_MAX_GAPS \
 522   (PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
 523 #define MEDIUM_BLOCKS_PAGE_FIRST_GAP_START \
 524   ((FLEXSIZEOF (struct medium_page_header, gaps, \
 525                 MEDIUM_BLOCKS_PAGE_MAX_GAPS * sizeof (struct memory_range)) \
 526     + ALIGNMENT - 1) & -ALIGNMENT)
 527 #define MEDIUM_BLOCKS_PAGE_LAST_GAP_END \
 528   PAGESIZE
 529 #define MEDIUM_BLOCKS_PAGE_CAPACITY \
 530   (MEDIUM_BLOCKS_PAGE_LAST_GAP_END - MEDIUM_BLOCKS_PAGE_FIRST_GAP_START)
 531 
 532 static void
 533 init_medium_block_page_pool (struct page_pool *pool)
     /* [previous][next][first][last][top][bottom][index][help] */
 534 {
 535   pool->page_capacity = MEDIUM_BLOCKS_PAGE_CAPACITY;
 536 }
 537 
 538 static void
 539 init_medium_block_page (uintptr_t page)
     /* [previous][next][first][last][top][bottom][index][help] */
 540 {
 541   struct medium_page_header *pageptr = (struct medium_page_header *) page;
 542   pageptr->common.common.page_type = medium_page_type;
 543   pageptr->num_gaps = 1;
 544   pageptr->gaps[0].start = MEDIUM_BLOCKS_PAGE_FIRST_GAP_START;
 545   pageptr->gaps[0].end   = MEDIUM_BLOCKS_PAGE_LAST_GAP_END;
 546   pageptr->common.free_space = MEDIUM_BLOCKS_PAGE_CAPACITY;
 547 }
 548 
 549 /* Allocates a block of memory of size > SMALL_BLOCK_MAX_SIZE,
 550    aligned to ALIGNMENT bytes, from the given page.
 551    Returns 0 upon failure.  */
 552 static uintptr_t
 553 allocate_medium_block_in_page (size_t size, uintptr_t page)
     /* [previous][next][first][last][top][bottom][index][help] */
 554 {
 555   struct medium_page_header *pageptr = (struct medium_page_header *) page;
 556 
 557   /* Walk through the gaps and remember the smallest gap of at least
 558      the given size.  */
 559   size_t best_i = (size_t)(-1);
 560   size_t best_length = (size_t)(-1);
 561   size_t num_gaps = pageptr->num_gaps;
 562   size_t i;
 563   for (i = 0; i < num_gaps; i++)
 564     {
 565       size_t length = pageptr->gaps[i].end - pageptr->gaps[i].start;
 566       if (length >= size)
 567         {
 568           /* Found a gap of sufficient size.  */
 569           if (length < best_length)
 570             {
 571               best_i = i;
 572               best_length = length;
 573             }
 574         }
 575     }
 576   if (unlikely (best_i == (size_t)(-1)))
 577     /* Failed to find a gap of sufficient size.  */
 578     return 0;
 579 
 580   size_t aligned_size = (size + ALIGNMENT - 1) & -ALIGNMENT;
 581 
 582   if (pageptr->common.free_space < aligned_size)
 583     /* Invalid state: Less free space than expected.  */
 584     abort ();
 585 
 586   /* Split the gap, leaving an empty gap and a remaining gap.  */
 587   for (i = num_gaps - 1; ; i--)
 588     {
 589       pageptr->gaps[i + 1] = pageptr->gaps[i];
 590       if (i == best_i)
 591         break;
 592     }
 593   size_t result = pageptr->gaps[best_i].start;
 594   pageptr->gaps[best_i].end = result;
 595   pageptr->gaps[best_i + 1].start = result + aligned_size;
 596   pageptr->num_gaps = num_gaps + 1;
 597   if (pageptr->num_gaps > PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
 598     /* Invalid state: More gaps than expected.  */
 599     abort ();
 600 
 601   pageptr->common.free_space -= aligned_size;
 602 
 603   return page + result;
 604 }
 605 
 606 static void
 607 free_medium_block_in_page (uintptr_t block, uintptr_t page)
     /* [previous][next][first][last][top][bottom][index][help] */
 608 {
 609   struct medium_page_header *pageptr = (struct medium_page_header *) page;
 610   size_t offset = block - page;
 611 
 612   /* Search for the gap that ends where this block begins.
 613      We can ignore the last gap here, since it ends where the page ends.  */
 614   struct memory_range *gaps = pageptr->gaps;
 615   size_t lo = 0;
 616   size_t hi = pageptr->num_gaps - 1;
 617   size_t index;
 618   while (lo < hi)
 619     {
 620       /* Invariant:
 621          for i < lo, gaps[i].end < offset,
 622          for i >= hi, gaps[i].end > offset.  */
 623       size_t mid = (hi + lo) >> 1; /* >= lo, < hi */
 624       if (offset > gaps[mid].end)
 625         lo = mid + 1;
 626       else if (offset < gaps[mid].end)
 627         hi = mid;
 628       else
 629         {
 630           /* Found it: offset == gaps[mid].end.  */
 631           index = mid;
 632           goto found;
 633         }
 634     }
 635   /* Invalid argument: block is not the start of a currently allocated
 636      block.  */
 637   abort ();
 638 
 639  found:
 640   /* Here 0 <= index < pageptr->num_gaps - 1.
 641      Combine the gaps index and index+1.  */
 642   pageptr->common.free_space += gaps[index + 1].start - gaps[index].end;
 643   if (pageptr->common.free_space < gaps[index + 1].start - gaps[index].end)
 644     /* Wrap around.  */
 645     abort ();
 646 
 647   gaps[index].end = gaps[index + 1].end;
 648 
 649   size_t num_gaps = pageptr->num_gaps - 1;
 650   size_t i;
 651   for (i = index + 1; i < num_gaps; i++)
 652     gaps[i] = gaps[i + 1];
 653   pageptr->num_gaps = num_gaps;
 654 }
 655 
 656 /* Management of pages of medium blocks.  */
 657 struct page_pool medium_block_pages =
 658   {
 659     init_medium_block_page_pool,
 660     init_medium_block_page,
 661     allocate_medium_block_in_page,
 662     free_medium_block_in_page
 663   };
 664 
 665 /* ==================== Pages of small and medium blocks ==================== */
 666 
 667 /* Allocates a block of memory from the given pool, aligned to ALIGNMENT bytes.
 668    Returns 0 upon failure.  */
 669 static inline uintptr_t
 670 allocate_block_from_pool (size_t size, struct page_pool *pool)
     /* [previous][next][first][last][top][bottom][index][help] */
 671 {
 672   uintptr_t page;
 673 
 674   /* Try in the last used page first.  */
 675   page = pool->last_page;
 676   if (likely (page != 0))
 677     {
 678       uintptr_t block = pool->allocate_block_in_page (size, page);
 679       if (likely (block != 0))
 680         {
 681           add_update (page, pool);
 682           return block;
 683         }
 684     }
 685 
 686   /* Ensure that the pool and its managed_pages is initialized.  */
 687   if (unlikely (pool->managed_pages == NULL))
 688     {
 689       pool->managed_pages =
 690         gl_oset_nx_create_empty (GL_RBTREE_OSET, compare_pages_by_free_space, NULL);
 691       if (unlikely (pool->managed_pages == NULL))
 692         /* Could not allocate the managed_pages.  */
 693         return 0;
 694       pool->init_page_pool (pool);
 695     }
 696 
 697   /* Ensure that managed_pages is up-to-date.  */
 698   flush_all_updates (pool);
 699 
 700   /* Try in the other managed_pages.  */
 701   {
 702     gl_oset_iterator_t iter =
 703       gl_oset_iterator_atleast (pool->managed_pages,
 704                                 page_free_space_is_at_least,
 705                                 (void *) (uintptr_t) size);
 706     const void *elt;
 707     while (gl_oset_iterator_next (&iter, &elt))
 708       {
 709         struct page_tree_element *element = (struct page_tree_element *) elt;
 710         page = element->page;
 711         /* No need to try the last used page again.  */
 712         if (likely (page != pool->last_page))
 713           {
 714             uintptr_t block = pool->allocate_block_in_page (size, page);
 715             if (likely (block != 0))
 716               {
 717                 gl_oset_iterator_free (&iter);
 718                 add_update (page, pool);
 719                 pool->last_page = page;
 720                 return block;
 721               }
 722           }
 723       }
 724     gl_oset_iterator_free (&iter);
 725   }
 726 
 727   /* If we have a freeable page ready for reuse, use it.  */
 728   if (pool->freeable_page != 0)
 729     {
 730       page = pool->freeable_page;
 731       pool->init_page (page);
 732       struct page_tree_element *element =
 733         (struct page_tree_element *) malloc (sizeof (struct page_tree_element));
 734       if (unlikely (element == NULL))
 735         {
 736           /* Could not allocate the tree element.  */
 737           pool->last_page = 0;
 738           return 0;
 739         }
 740       element->page = page;
 741       element->free_space = ((struct dissected_page_header *) page)->free_space;
 742       if (unlikely (gl_oset_nx_add (pool->managed_pages, element) < 0))
 743         {
 744           /* Could not allocate the tree node.  */
 745           free (element);
 746           pool->last_page = 0;
 747           return 0;
 748         }
 749       ((struct dissected_page_header *) page)->tree_element = element;
 750       pool->freeable_page = 0;
 751 
 752       uintptr_t block = pool->allocate_block_in_page (size, page);
 753       if (block == 0)
 754         /* If the size is too large for an empty page, this function should not
 755            have been invoked.  */
 756         abort ();
 757       add_update (page, pool);
 758       pool->last_page = page;
 759       return block;
 760     }
 761 
 762   /* Allocate a fresh page.  */
 763   page = ALLOC_PAGES (PAGESIZE);
 764   if (unlikely (page == 0))
 765     {
 766       /* Failed.  */
 767       pool->last_page = 0;
 768       return 0;
 769     }
 770   if ((page & (PAGESIZE - 1)) != 0)
 771     /* ALLOC_PAGES's result is not aligned as expected.  */
 772     abort ();
 773 
 774   pool->init_page (page);
 775   struct page_tree_element *element =
 776     (struct page_tree_element *) malloc (sizeof (struct page_tree_element));
 777   if (unlikely (element == NULL))
 778     {
 779       /* Could not allocate the tree element.  */
 780       FREE_PAGES (page, PAGESIZE);
 781       pool->last_page = 0;
 782       return 0;
 783     }
 784   element->page = page;
 785   element->free_space = ((struct dissected_page_header *) page)->free_space;
 786   if (unlikely (gl_oset_nx_add (pool->managed_pages, element) < 0))
 787     {
 788       /* Could not allocate the tree node.  */
 789       free (element);
 790       FREE_PAGES (page, PAGESIZE);
 791       pool->last_page = 0;
 792       return 0;
 793     }
 794   ((struct dissected_page_header *) page)->tree_element = element;
 795 
 796   uintptr_t block = pool->allocate_block_in_page (size, page);
 797   if (block == 0)
 798     /* If the size is too large for an empty page, this function should not
 799        have been invoked.  */
 800     abort ();
 801   add_update (page, pool);
 802   pool->last_page = page;
 803   return block;
 804 }
 805 
 806 static void
 807 free_block_from_pool (uintptr_t block, uintptr_t page, struct page_pool *pool)
     /* [previous][next][first][last][top][bottom][index][help] */
 808 {
 809   if (pool->page_capacity == 0)
 810     /* Invalid argument: The block is not valid, since the pool has not yet
 811        been initialized.  */
 812     abort ();
 813 
 814   pool->free_block_in_page (block, page);
 815 
 816   struct dissected_page_header *pageptr = (struct dissected_page_header *) page;
 817   if (likely (pageptr->free_space != pool->page_capacity))
 818     {
 819       /* The page is not entirely free.  */
 820       add_update (page, pool);
 821     }
 822   else
 823     {
 824       /* The page is now entirely free.  */
 825       /* Remove its tree element and free it.  */
 826       struct page_tree_element *element = pageptr->tree_element;
 827       if (!gl_oset_remove (pool->managed_pages, element))
 828         /* Invalid state: The element is not in the managed_pages.  */
 829         abort ();
 830       free (element);
 831 
 832       if (pool->last_page == page)
 833         pool->last_page = 0;
 834 
 835       drop_update (page, pool);
 836 
 837       /* If we would now have two freeable pages, free the old one.  */
 838       if (pool->freeable_page != 0)
 839         FREE_PAGES (pool->freeable_page, PAGESIZE);
 840 
 841       /* Don't free the page now, but later.  */
 842       pool->freeable_page = page;
 843     }
 844 }
 845 
 846 /* Lock that protects the management of small and medium blocks from
 847    simultaneous access from multiple threads.  */
 848 gl_lock_define_initialized(static, ssfmalloc_lock)
     /* [previous][next][first][last][top][bottom][index][help] */
 849 
 850 /* ============================== Large blocks ============================== */
 851 
 852 /* Header of a page sequence for a large block.
 853    Lies at an address that is a multiple of PAGESIZE.  */
 854 struct large_block_header
 855 {
 856   #if PAGE_RESERVED_HEADER_SIZE > 0
 857   void * reserved[PAGE_RESERVED_HEADER_SIZE / sizeof (void *)];
 858   #endif
 859   unsigned char page_type; /* large_page_type */
 860 };
 861 
 862 /* Information about a large block.
 863    Ends at an address that is a multiple of ALIGNMENT.  */
 864 struct large_block_caption
 865 {
 866   size_t pages_size; /* A multiple of PAGESIZE.  */
 867 };
 868 
 869 /* Size of large block page header, gap, and caption.  */
 870 #define LARGE_BLOCK_OFFSET \
 871   ((sizeof (struct large_block_header) + sizeof (struct large_block_caption) \
 872     + ALIGNMENT - 1) & -ALIGNMENT)
 873 
 874 /* =========================== Exported functions =========================== */
 875 
 876 /* Allocates a block of memory, aligned to ALIGNMENT bytes.
 877    Returns 0 upon failure.  */
 878 static uintptr_t
 879 allocate_block (size_t size)
     /* [previous][next][first][last][top][bottom][index][help] */
 880 {
 881   uintptr_t block;
 882 
 883   if (unlikely (size > MEDIUM_BLOCKS_PAGE_CAPACITY))
 884     {
 885       /* Allocate a large block.  */
 886       size_t pages_size = (size + LARGE_BLOCK_OFFSET + PAGESIZE - 1) & -PAGESIZE;
 887       uintptr_t pages = ALLOC_PAGES (pages_size);
 888       if (unlikely (pages == 0))
 889         /* Failed.  */
 890         return 0;
 891       if ((pages & (PAGESIZE - 1)) != 0)
 892         /* ALLOC_PAGES's result is not aligned as expected.  */
 893         abort ();
 894       ((struct any_page_header *) pages)->page_type = large_page_type;
 895       block = pages + LARGE_BLOCK_OFFSET;
 896       ((struct large_block_caption *) block)[-1].pages_size = pages_size;
 897     }
 898   else
 899     {
 900       bool mt = gl_multithreaded ();
 901       if (mt) gl_lock_lock (ssfmalloc_lock);
 902       struct page_pool *pool =
 903         (size <= SMALL_BLOCK_MAX_SIZE ? &small_block_pages : &medium_block_pages);
 904       block = allocate_block_from_pool (size, pool);
 905       if (mt) gl_lock_unlock (ssfmalloc_lock);
 906     }
 907   return block;
 908 }
 909 
 910 /* Frees a block of memory, returned by allocate_block.  */
 911 static void
 912 free_block (uintptr_t block)
     /* [previous][next][first][last][top][bottom][index][help] */
 913 {
 914   if (block == 0 || (block % ALIGNMENT) != 0)
 915     /* Invalid argument.  */
 916     abort ();
 917   uintptr_t pages = block & -PAGESIZE;
 918   unsigned char type = ((struct any_page_header *) pages)->page_type;
 919   if (unlikely (type == large_page_type))
 920     {
 921       if (block != pages + LARGE_BLOCK_OFFSET)
 922         /* Invalid argument.  */
 923         abort ();
 924       size_t pages_size = ((struct large_block_caption *) block)[-1].pages_size;
 925       if ((pages_size & (PAGESIZE - 1)) != 0)
 926         /* Invalid memory state: pages_size not as expected.  */
 927         abort ();
 928       FREE_PAGES (pages, pages_size);
 929     }
 930   else
 931     {
 932       bool mt = gl_multithreaded ();
 933       if (mt) gl_lock_lock (ssfmalloc_lock);
 934       struct page_pool *pool;
 935       if (type == small_page_type)
 936         pool = &small_block_pages;
 937       else if (type == medium_page_type)
 938         pool = &medium_block_pages;
 939       else
 940         /* Invalid memory state: type not as expected.  */
 941         abort ();
 942       free_block_from_pool (block, pages, pool);
 943       if (mt) gl_lock_unlock (ssfmalloc_lock);
 944     }
 945 }

/* [previous][next][first][last][top][bottom][index][help] */