This source file includes following definitions.
- compare_pages_by_free_space
- page_free_space_is_at_least
- set_free_space
- flush_all_updates
- add_update
- drop_update
- small_block_page_available_bitmap
- small_block_page_blockend_bitmap
- init_small_block_page_pool
- init_small_block_page
- allocate_small_block_in_page
- free_small_block_in_page
- init_medium_block_page_pool
- init_medium_block_page
- allocate_medium_block_in_page
- free_medium_block_in_page
- allocate_block_from_pool
- free_block_from_pool
- gl_lock_define_initialized
- allocate_block
- free_block
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83 #include <stdint.h>
84
85
86
87 static uintptr_t allocate_block (size_t size);
88
89
90 static void free_block (uintptr_t block);
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 #include <stdlib.h>
124 #include <string.h>
125 #include <strings.h>
126 #include "flexmember.h"
127 #include "glthread/lock.h"
128 #include "thread-optim.h"
129 #include "gl_oset.h"
130 #include "gl_rbtree_oset.h"
131
132
133 #if __GNUC__ >= 3
134 # define likely(cond) __builtin_expect ((cond), 1)
135 # define unlikely(cond) __builtin_expect ((cond), 0)
136 #else
137 # define likely(cond) (cond)
138 # define unlikely(cond) (cond)
139 #endif
140
141 enum { small_page_type = 1, medium_page_type = 2, large_page_type = 3 };
142
143
144
145 struct any_page_header
146 {
147 #if PAGE_RESERVED_HEADER_SIZE > 0
148 void * reserved[PAGE_RESERVED_HEADER_SIZE / sizeof (void *)];
149 #endif
150
151 unsigned char page_type;
152 };
153
154
155
156
157 #if PAGESIZE_MAX >= 0x10000
158 typedef unsigned int pg_offset_t;
159 #else
160 typedef unsigned short pg_offset_t;
161 #endif
162
163
164
165 struct page_tree_element
166 {
167 uintptr_t page;
168 pg_offset_t free_space;
169 };
170
171
172
173 struct dissected_page_header
174 {
175 struct any_page_header common;
176
177 pg_offset_t free_space;
178
179 struct page_tree_element *tree_element;
180 };
181
182
183 struct page_pool
184 {
185
186 void (*init_page_pool) (struct page_pool *pool);
187 void (*init_page) (uintptr_t page);
188 uintptr_t (*allocate_block_in_page) (size_t size, uintptr_t page);
189 void (*free_block_in_page) (uintptr_t block, uintptr_t page);
190
191
192 size_t page_capacity;
193
194
195
196 uintptr_t last_page;
197
198
199
200 gl_oset_t managed_pages;
201
202
203
204 #define UPDATE_QUEUE_SIZE 10
205 unsigned int update_queue_count;
206 uintptr_t update_queue[UPDATE_QUEUE_SIZE];
207
208
209
210
211
212
213 uintptr_t freeable_page;
214 };
215
216
217 static int
218 compare_pages_by_free_space (const void *elt1, const void *elt2)
219 {
220 struct page_tree_element *element1 = (struct page_tree_element *) elt1;
221 struct page_tree_element *element2 = (struct page_tree_element *) elt2;
222 int cmp = _GL_CMP (element1->free_space, element2->free_space);
223 if (unlikely (cmp == 0))
224 cmp = _GL_CMP (element1->page, element2->page);
225 return cmp;
226 }
227
228
229
230 static bool
231 page_free_space_is_at_least (const void *elt, const void *threshold)
232 {
233 struct page_tree_element *element = (struct page_tree_element *) elt;
234
235 return element->free_space >= (uintptr_t) threshold;
236 }
237
238
239
240 static void
241 set_free_space (const void *elt, void *action_data)
242 {
243 struct page_tree_element *element = (struct page_tree_element *) elt;
244
245 element->free_space = (pg_offset_t) (uintptr_t) action_data;
246 }
247
248
249 static void
250 flush_all_updates (struct page_pool *pool)
251 {
252 size_t count = pool->update_queue_count;
253 while (likely (count > 0))
254 {
255 --count;
256 uintptr_t page = pool->update_queue[count];
257 struct dissected_page_header *pageptr =
258 (struct dissected_page_header *) page;
259 struct page_tree_element *tree_element = pageptr->tree_element;
260 if (gl_oset_update (pool->managed_pages, tree_element,
261 set_free_space,
262 (void *) (uintptr_t) pageptr->free_space)
263 < 0)
264
265
266 abort ();
267 }
268 pool->update_queue_count = 0;
269 }
270
271
272
273
274 static inline void
275 add_update (uintptr_t page, struct page_pool *pool)
276 {
277 size_t count = pool->update_queue_count;
278 size_t i;
279 for (i = 0; i < count; i++)
280 if (pool->update_queue[i] == page)
281
282 return;
283
284
285 if (unlikely (count == UPDATE_QUEUE_SIZE))
286 flush_all_updates (pool);
287
288
289 pool->update_queue[pool->update_queue_count++] = page;
290 }
291
292
293 static inline void
294 drop_update (uintptr_t page, struct page_pool *pool)
295 {
296 size_t count = pool->update_queue_count;
297 size_t i;
298 for (i = 0; i < count; i++)
299 if (pool->update_queue[i] == page)
300 {
301
302 for (i = i + 1; i < count; i++)
303 pool->update_queue[i - 1] = pool->update_queue[i];
304 pool->update_queue_count--;
305 return;
306 }
307 }
308
309
310
311 #include "ssfmalloc-bitmap.h"
312
313
314
315 #define SMALL_BLOCK_MAX_SIZE (ALIGNMENT < 8 ? 32 * ALIGNMENT : 256)
316
317
318 static unsigned int small_block_page_num_bits;
319
320
321 static unsigned int small_block_page_blocks_start;
322
323 static unsigned int small_block_page_num_bitmap_words;
324
325
326
327 struct small_page_header
328 {
329 struct dissected_page_header common;
330
331
332
333
334 uint32_t bitmap_words[FLEXIBLE_ARRAY_MEMBER];
335 };
336
337 static inline uint32_t *
338 small_block_page_available_bitmap (struct small_page_header *pageptr)
339 {
340 return &pageptr->bitmap_words[0];
341 }
342
343 static inline uint32_t *
344 small_block_page_blockend_bitmap (struct small_page_header *pageptr)
345 {
346 return &pageptr->bitmap_words[small_block_page_num_bitmap_words];
347 }
348
349 static void
350 init_small_block_page_pool (struct page_pool *pool)
351 {
352
353
354
355 unsigned int num_bits = (unsigned int) (4 * PAGESIZE) / (4 * ALIGNMENT + 1);
356 unsigned int num_bitmap_words;
357 unsigned int blocks_start;
358
359 for (;;)
360 {
361 num_bitmap_words = (num_bits + 32 - 1) / 32;
362 blocks_start =
363 (FLEXSIZEOF (struct small_page_header, bitmap_words,
364 2 * num_bitmap_words * sizeof (uint32_t))
365 + ALIGNMENT - 1) & -ALIGNMENT;
366 unsigned int num_bits_r = (unsigned int) (PAGESIZE - blocks_start) / ALIGNMENT;
367 if (num_bits_r >= num_bits)
368 break;
369 num_bits = num_bits_r;
370 }
371
372 small_block_page_num_bits = num_bits;
373 small_block_page_num_bitmap_words = num_bitmap_words;
374 small_block_page_blocks_start = blocks_start;
375
376 pool->page_capacity = small_block_page_num_bits * ALIGNMENT;
377 }
378
379 static void
380 init_small_block_page (uintptr_t page)
381 {
382 struct small_page_header *pageptr = (struct small_page_header *) page;
383 pageptr->common.common.page_type = small_page_type;
384
385
386 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
387 init_bitmap_all_bits_set (small_block_page_num_bitmap_words,
388 available_bitmap);
389 if ((small_block_page_num_bits % 32) != 0)
390 available_bitmap[small_block_page_num_bitmap_words - 1] =
391 (1U << (small_block_page_num_bits % 32)) - 1;
392
393
394 init_bitmap_all_bits_clear (small_block_page_num_bitmap_words,
395 small_block_page_blockend_bitmap (pageptr));
396
397 pageptr->common.free_space = small_block_page_num_bits * ALIGNMENT;
398 }
399
400
401
402
403 static uintptr_t
404 allocate_small_block_in_page (size_t size, uintptr_t page)
405 {
406 struct small_page_header *pageptr = (struct small_page_header *) page;
407
408
409 if (size == 0)
410 size = 1;
411
412
413 size_t c = (size + ALIGNMENT - 1) / ALIGNMENT;
414
415
416 if (!(c > 0 && c <= 32))
417 abort ();
418
419 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
420 size_t k = find_first_packet_set (small_block_page_num_bitmap_words,
421 available_bitmap,
422 c);
423 if (unlikely (k == (size_t)(-1)))
424
425 return 0;
426
427 uint32_t *blockend_bitmap = small_block_page_blockend_bitmap (pageptr);
428 size_t j = k / 32;
429 size_t i = k % 32;
430 if (i + c <= 32)
431 {
432 available_bitmap[j] &= ~(((2U << (c - 1)) - 1) << i);
433 blockend_bitmap[j] |= (1U << (i + c - 1));
434 }
435 else
436 {
437 available_bitmap[j] &= ~(-1U << i);
438 available_bitmap[j + 1] &= ~((1U << (i + c - 32)) - 1);
439 blockend_bitmap[j + 1] |= (1U << (i + c - 1 - 32));
440 }
441
442 pageptr->common.free_space -= c * ALIGNMENT;
443
444 return page + small_block_page_blocks_start + k * ALIGNMENT;
445 }
446
447 static void
448 free_small_block_in_page (uintptr_t block, uintptr_t page)
449 {
450 struct small_page_header *pageptr = (struct small_page_header *) page;
451
452 if (!(block >= page + small_block_page_blocks_start
453 && (block % ALIGNMENT) == 0))
454
455 abort ();
456
457 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
458 uint32_t *blockend_bitmap = small_block_page_blockend_bitmap (pageptr);
459
460
461 size_t k = (block - page - small_block_page_blocks_start) / ALIGNMENT;
462
463 size_t ke = find_first_bit_set (small_block_page_num_bitmap_words,
464 blockend_bitmap,
465 k);
466 if ( ke >= k + 32)
467
468 abort ();
469
470
471 size_t c = ke - k + 1;
472
473 size_t j = k / 32;
474 size_t i = k % 32;
475 if (i + c <= 32)
476 {
477 available_bitmap[j] |= (((2U << (c - 1)) - 1) << i);
478 blockend_bitmap[j] &= ~(1U << (i + c - 1));
479 }
480 else
481 {
482 available_bitmap[j] |= (-1U << i);
483 available_bitmap[j + 1] |= ((1U << (i + c - 32)) - 1);
484 blockend_bitmap[j + 1] &= ~(1U << (i + c - 1 - 32));
485 }
486
487 pageptr->common.free_space += c * ALIGNMENT;
488 }
489
490
491 struct page_pool small_block_pages =
492 {
493 init_small_block_page_pool,
494 init_small_block_page,
495 allocate_small_block_in_page,
496 free_small_block_in_page
497 };
498
499
500
501
502
503
504 struct memory_range
505 {
506 pg_offset_t start;
507 pg_offset_t end;
508 };
509
510
511
512 struct medium_page_header
513 {
514 struct dissected_page_header common;
515
516
517 unsigned int num_gaps;
518 struct memory_range gaps[FLEXIBLE_ARRAY_MEMBER ];
519 };
520
521 #define MEDIUM_BLOCKS_PAGE_MAX_GAPS \
522 (PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
523 #define MEDIUM_BLOCKS_PAGE_FIRST_GAP_START \
524 ((FLEXSIZEOF (struct medium_page_header, gaps, \
525 MEDIUM_BLOCKS_PAGE_MAX_GAPS * sizeof (struct memory_range)) \
526 + ALIGNMENT - 1) & -ALIGNMENT)
527 #define MEDIUM_BLOCKS_PAGE_LAST_GAP_END \
528 PAGESIZE
529 #define MEDIUM_BLOCKS_PAGE_CAPACITY \
530 (MEDIUM_BLOCKS_PAGE_LAST_GAP_END - MEDIUM_BLOCKS_PAGE_FIRST_GAP_START)
531
532 static void
533 init_medium_block_page_pool (struct page_pool *pool)
534 {
535 pool->page_capacity = MEDIUM_BLOCKS_PAGE_CAPACITY;
536 }
537
538 static void
539 init_medium_block_page (uintptr_t page)
540 {
541 struct medium_page_header *pageptr = (struct medium_page_header *) page;
542 pageptr->common.common.page_type = medium_page_type;
543 pageptr->num_gaps = 1;
544 pageptr->gaps[0].start = MEDIUM_BLOCKS_PAGE_FIRST_GAP_START;
545 pageptr->gaps[0].end = MEDIUM_BLOCKS_PAGE_LAST_GAP_END;
546 pageptr->common.free_space = MEDIUM_BLOCKS_PAGE_CAPACITY;
547 }
548
549
550
551
552 static uintptr_t
553 allocate_medium_block_in_page (size_t size, uintptr_t page)
554 {
555 struct medium_page_header *pageptr = (struct medium_page_header *) page;
556
557
558
559 size_t best_i = (size_t)(-1);
560 size_t best_length = (size_t)(-1);
561 size_t num_gaps = pageptr->num_gaps;
562 size_t i;
563 for (i = 0; i < num_gaps; i++)
564 {
565 size_t length = pageptr->gaps[i].end - pageptr->gaps[i].start;
566 if (length >= size)
567 {
568
569 if (length < best_length)
570 {
571 best_i = i;
572 best_length = length;
573 }
574 }
575 }
576 if (unlikely (best_i == (size_t)(-1)))
577
578 return 0;
579
580 size_t aligned_size = (size + ALIGNMENT - 1) & -ALIGNMENT;
581
582 if (pageptr->common.free_space < aligned_size)
583
584 abort ();
585
586
587 for (i = num_gaps - 1; ; i--)
588 {
589 pageptr->gaps[i + 1] = pageptr->gaps[i];
590 if (i == best_i)
591 break;
592 }
593 size_t result = pageptr->gaps[best_i].start;
594 pageptr->gaps[best_i].end = result;
595 pageptr->gaps[best_i + 1].start = result + aligned_size;
596 pageptr->num_gaps = num_gaps + 1;
597 if (pageptr->num_gaps > PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
598
599 abort ();
600
601 pageptr->common.free_space -= aligned_size;
602
603 return page + result;
604 }
605
606 static void
607 free_medium_block_in_page (uintptr_t block, uintptr_t page)
608 {
609 struct medium_page_header *pageptr = (struct medium_page_header *) page;
610 size_t offset = block - page;
611
612
613
614 struct memory_range *gaps = pageptr->gaps;
615 size_t lo = 0;
616 size_t hi = pageptr->num_gaps - 1;
617 size_t index;
618 while (lo < hi)
619 {
620
621
622
623 size_t mid = (hi + lo) >> 1;
624 if (offset > gaps[mid].end)
625 lo = mid + 1;
626 else if (offset < gaps[mid].end)
627 hi = mid;
628 else
629 {
630
631 index = mid;
632 goto found;
633 }
634 }
635
636
637 abort ();
638
639 found:
640
641
642 pageptr->common.free_space += gaps[index + 1].start - gaps[index].end;
643 if (pageptr->common.free_space < gaps[index + 1].start - gaps[index].end)
644
645 abort ();
646
647 gaps[index].end = gaps[index + 1].end;
648
649 size_t num_gaps = pageptr->num_gaps - 1;
650 size_t i;
651 for (i = index + 1; i < num_gaps; i++)
652 gaps[i] = gaps[i + 1];
653 pageptr->num_gaps = num_gaps;
654 }
655
656
657 struct page_pool medium_block_pages =
658 {
659 init_medium_block_page_pool,
660 init_medium_block_page,
661 allocate_medium_block_in_page,
662 free_medium_block_in_page
663 };
664
665
666
667
668
669 static inline uintptr_t
670 allocate_block_from_pool (size_t size, struct page_pool *pool)
671 {
672 uintptr_t page;
673
674
675 page = pool->last_page;
676 if (likely (page != 0))
677 {
678 uintptr_t block = pool->allocate_block_in_page (size, page);
679 if (likely (block != 0))
680 {
681 add_update (page, pool);
682 return block;
683 }
684 }
685
686
687 if (unlikely (pool->managed_pages == NULL))
688 {
689 pool->managed_pages =
690 gl_oset_nx_create_empty (GL_RBTREE_OSET, compare_pages_by_free_space, NULL);
691 if (unlikely (pool->managed_pages == NULL))
692
693 return 0;
694 pool->init_page_pool (pool);
695 }
696
697
698 flush_all_updates (pool);
699
700
701 {
702 gl_oset_iterator_t iter =
703 gl_oset_iterator_atleast (pool->managed_pages,
704 page_free_space_is_at_least,
705 (void *) (uintptr_t) size);
706 const void *elt;
707 while (gl_oset_iterator_next (&iter, &elt))
708 {
709 struct page_tree_element *element = (struct page_tree_element *) elt;
710 page = element->page;
711
712 if (likely (page != pool->last_page))
713 {
714 uintptr_t block = pool->allocate_block_in_page (size, page);
715 if (likely (block != 0))
716 {
717 gl_oset_iterator_free (&iter);
718 add_update (page, pool);
719 pool->last_page = page;
720 return block;
721 }
722 }
723 }
724 gl_oset_iterator_free (&iter);
725 }
726
727
728 if (pool->freeable_page != 0)
729 {
730 page = pool->freeable_page;
731 pool->init_page (page);
732 struct page_tree_element *element =
733 (struct page_tree_element *) malloc (sizeof (struct page_tree_element));
734 if (unlikely (element == NULL))
735 {
736
737 pool->last_page = 0;
738 return 0;
739 }
740 element->page = page;
741 element->free_space = ((struct dissected_page_header *) page)->free_space;
742 if (unlikely (gl_oset_nx_add (pool->managed_pages, element) < 0))
743 {
744
745 free (element);
746 pool->last_page = 0;
747 return 0;
748 }
749 ((struct dissected_page_header *) page)->tree_element = element;
750 pool->freeable_page = 0;
751
752 uintptr_t block = pool->allocate_block_in_page (size, page);
753 if (block == 0)
754
755
756 abort ();
757 add_update (page, pool);
758 pool->last_page = page;
759 return block;
760 }
761
762
763 page = ALLOC_PAGES (PAGESIZE);
764 if (unlikely (page == 0))
765 {
766
767 pool->last_page = 0;
768 return 0;
769 }
770 if ((page & (PAGESIZE - 1)) != 0)
771
772 abort ();
773
774 pool->init_page (page);
775 struct page_tree_element *element =
776 (struct page_tree_element *) malloc (sizeof (struct page_tree_element));
777 if (unlikely (element == NULL))
778 {
779
780 FREE_PAGES (page, PAGESIZE);
781 pool->last_page = 0;
782 return 0;
783 }
784 element->page = page;
785 element->free_space = ((struct dissected_page_header *) page)->free_space;
786 if (unlikely (gl_oset_nx_add (pool->managed_pages, element) < 0))
787 {
788
789 free (element);
790 FREE_PAGES (page, PAGESIZE);
791 pool->last_page = 0;
792 return 0;
793 }
794 ((struct dissected_page_header *) page)->tree_element = element;
795
796 uintptr_t block = pool->allocate_block_in_page (size, page);
797 if (block == 0)
798
799
800 abort ();
801 add_update (page, pool);
802 pool->last_page = page;
803 return block;
804 }
805
806 static void
807 free_block_from_pool (uintptr_t block, uintptr_t page, struct page_pool *pool)
808 {
809 if (pool->page_capacity == 0)
810
811
812 abort ();
813
814 pool->free_block_in_page (block, page);
815
816 struct dissected_page_header *pageptr = (struct dissected_page_header *) page;
817 if (likely (pageptr->free_space != pool->page_capacity))
818 {
819
820 add_update (page, pool);
821 }
822 else
823 {
824
825
826 struct page_tree_element *element = pageptr->tree_element;
827 if (!gl_oset_remove (pool->managed_pages, element))
828
829 abort ();
830 free (element);
831
832 if (pool->last_page == page)
833 pool->last_page = 0;
834
835 drop_update (page, pool);
836
837
838 if (pool->freeable_page != 0)
839 FREE_PAGES (pool->freeable_page, PAGESIZE);
840
841
842 pool->freeable_page = page;
843 }
844 }
845
846
847
848 gl_lock_define_initialized(static, ssfmalloc_lock)
849
850
851
852
853
854 struct large_block_header
855 {
856 #if PAGE_RESERVED_HEADER_SIZE > 0
857 void * reserved[PAGE_RESERVED_HEADER_SIZE / sizeof (void *)];
858 #endif
859 unsigned char page_type;
860 };
861
862
863
864 struct large_block_caption
865 {
866 size_t pages_size;
867 };
868
869
870 #define LARGE_BLOCK_OFFSET \
871 ((sizeof (struct large_block_header) + sizeof (struct large_block_caption) \
872 + ALIGNMENT - 1) & -ALIGNMENT)
873
874
875
876
877
878 static uintptr_t
879 allocate_block (size_t size)
880 {
881 uintptr_t block;
882
883 if (unlikely (size > MEDIUM_BLOCKS_PAGE_CAPACITY))
884 {
885
886 size_t pages_size = (size + LARGE_BLOCK_OFFSET + PAGESIZE - 1) & -PAGESIZE;
887 uintptr_t pages = ALLOC_PAGES (pages_size);
888 if (unlikely (pages == 0))
889
890 return 0;
891 if ((pages & (PAGESIZE - 1)) != 0)
892
893 abort ();
894 ((struct any_page_header *) pages)->page_type = large_page_type;
895 block = pages + LARGE_BLOCK_OFFSET;
896 ((struct large_block_caption *) block)[-1].pages_size = pages_size;
897 }
898 else
899 {
900 bool mt = gl_multithreaded ();
901 if (mt) gl_lock_lock (ssfmalloc_lock);
902 struct page_pool *pool =
903 (size <= SMALL_BLOCK_MAX_SIZE ? &small_block_pages : &medium_block_pages);
904 block = allocate_block_from_pool (size, pool);
905 if (mt) gl_lock_unlock (ssfmalloc_lock);
906 }
907 return block;
908 }
909
910
911 static void
912 free_block (uintptr_t block)
913 {
914 if (block == 0 || (block % ALIGNMENT) != 0)
915
916 abort ();
917 uintptr_t pages = block & -PAGESIZE;
918 unsigned char type = ((struct any_page_header *) pages)->page_type;
919 if (unlikely (type == large_page_type))
920 {
921 if (block != pages + LARGE_BLOCK_OFFSET)
922
923 abort ();
924 size_t pages_size = ((struct large_block_caption *) block)[-1].pages_size;
925 if ((pages_size & (PAGESIZE - 1)) != 0)
926
927 abort ();
928 FREE_PAGES (pages, pages_size);
929 }
930 else
931 {
932 bool mt = gl_multithreaded ();
933 if (mt) gl_lock_lock (ssfmalloc_lock);
934 struct page_pool *pool;
935 if (type == small_page_type)
936 pool = &small_block_pages;
937 else if (type == medium_page_type)
938 pool = &medium_block_pages;
939 else
940
941 abort ();
942 free_block_from_pool (block, pages, pool);
943 if (mt) gl_lock_unlock (ssfmalloc_lock);
944 }
945 }