allchblk.c raw
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 * Copyright (c) 2008-2022 Ivan Maidanski
7 *
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 *
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
16 */
17
18 #include "private/gc_priv.h"
19
20 #ifdef GC_USE_ENTIRE_HEAP
21 int GC_use_entire_heap = TRUE;
22 #else
23 int GC_use_entire_heap = FALSE;
24 #endif
25
26 /*
27 * Free heap blocks are kept on one of several free lists, depending on
28 * the size of the block. Each free list is doubly linked. Adjacent
29 * free blocks are coalesced.
30 */
31
32 /*
33 * Largest block we will be allocated starting on a black-listed block.
34 * Must be not smaller than `HBLKSIZE`.
35 */
36 #define MAX_BLACK_LIST_ALLOC (2 * HBLKSIZE)
37
38 /* Sizes up to this many `hblk` entities each have their own free list. */
39 #define UNIQUE_THRESHOLD 32
40
41 /*
42 * Sizes of at least this many heap blocks are mapped to a single free
43 * list.
44 */
45 #define HUGE_THRESHOLD 256
46
47 /* In between sizes map this many distinct sizes to a single bin. */
48 #define FL_COMPRESSION 8
49
50 #define N_HBLK_FLS \
51 ((HUGE_THRESHOLD - UNIQUE_THRESHOLD) / FL_COMPRESSION + UNIQUE_THRESHOLD)
52
53 /*
54 * List of completely empty heap blocks. Linked through `hb_next` field
55 * of header structure associated with block. Remains externally visible
56 * as used by GNU `gcj`.
57 */
58 #ifndef GC_GCJ_SUPPORT
59 STATIC
60 #endif
61 struct hblk *GC_hblkfreelist[N_HBLK_FLS + 1] = { 0 };
62
63 GC_API void GC_CALL
64 GC_iterate_free_hblks(GC_walk_free_blk_fn fn, void *client_data)
65 {
66 int i;
67
68 for (i = 0; i <= N_HBLK_FLS; ++i) {
69 struct hblk *h;
70
71 for (h = GC_hblkfreelist[i]; h != NULL; h = HDR(h)->hb_next) {
72 fn(h, i, client_data);
73 }
74 }
75 }
76
77 /* Number of free bytes on each list. Remains visible to `gcj`. */
78 #ifndef GC_GCJ_SUPPORT
79 STATIC
80 #endif
81 word GC_free_bytes[N_HBLK_FLS + 1] = { 0 };
82
83 /*
84 * Return the largest `n` such that the number of free bytes on lists
85 * `n` .. `N_HBLK_FLS` is greater or equal to `GC_max_large_allocd_bytes`
86 * minus `GC_large_allocd_bytes`. If there is no such `n`, return 0.
87 */
88 GC_INLINE size_t
89 GC_enough_large_bytes_left(void)
90 {
91 size_t n;
92 word bytes = GC_large_allocd_bytes;
93
94 GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
95 for (n = N_HBLK_FLS; n > 0; n--) {
96 bytes += GC_free_bytes[n];
97 if (bytes >= GC_max_large_allocd_bytes)
98 break;
99 }
100 return n;
101 }
102
103 /* Map a number of blocks to the appropriate large block free-list index. */
104 STATIC size_t
105 GC_hblk_fl_from_blocks(size_t blocks_needed)
106 {
107 if (blocks_needed <= UNIQUE_THRESHOLD)
108 return blocks_needed;
109 if (blocks_needed >= HUGE_THRESHOLD)
110 return N_HBLK_FLS;
111 return (blocks_needed - UNIQUE_THRESHOLD) / FL_COMPRESSION
112 + UNIQUE_THRESHOLD;
113 }
114
115 #define PHDR(hhdr) HDR((hhdr)->hb_prev)
116 #define NHDR(hhdr) HDR((hhdr)->hb_next)
117
118 #ifdef USE_MUNMAP
119 # define IS_MAPPED(hhdr) (((hhdr)->hb_flags & WAS_UNMAPPED) == 0)
120 #else
121 # define IS_MAPPED(hhdr) TRUE
122 #endif /* !USE_MUNMAP */
123
124 #if !defined(NO_DEBUGGING) || defined(GC_ASSERTIONS)
125 static void GC_CALLBACK
126 add_hb_sz(struct hblk *h, int i, void *total_free_ptr)
127 {
128 UNUSED_ARG(i);
129 *(word *)total_free_ptr += HDR(h)->hb_sz;
130 # if defined(CPPCHECK)
131 GC_noop1_ptr(h);
132 # endif
133 }
134
135 GC_INNER word
136 GC_compute_large_free_bytes(void)
137 {
138 word total_free = 0;
139
140 GC_iterate_free_hblks(add_hb_sz, &total_free);
141 return total_free;
142 }
143 #endif /* !NO_DEBUGGING || GC_ASSERTIONS */
144
145 #ifndef NO_DEBUGGING
146 static void GC_CALLBACK
147 print_hblkfreelist_item(struct hblk *h, int i, void *prev_index_ptr)
148 {
149 hdr *hhdr = HDR(h);
150
151 # if defined(CPPCHECK)
152 GC_noop1_ptr(h);
153 # endif
154 if (i != *(int *)prev_index_ptr) {
155 GC_printf("Free list %d (total size %lu):\n", i,
156 (unsigned long)GC_free_bytes[i]);
157 *(int *)prev_index_ptr = i;
158 }
159
160 # ifdef NO_BLACK_LISTING
161 GC_printf("\t%p size %lu\n", (void *)h, (unsigned long)hhdr->hb_sz);
162 # else
163 GC_printf("\t%p size %lu %s black listed\n", (void *)h,
164 (unsigned long)hhdr->hb_sz,
165 GC_is_black_listed(h, HBLKSIZE) != NULL ? "start"
166 : GC_is_black_listed(h, hhdr->hb_sz) != NULL ? "partially"
167 : "not");
168 # endif
169 }
170
171 void
172 GC_print_hblkfreelist(void)
173 {
174 word total;
175 int prev_index = -1;
176
177 GC_iterate_free_hblks(print_hblkfreelist_item, &prev_index);
178 GC_printf("GC_large_free_bytes: %lu\n", (unsigned long)GC_large_free_bytes);
179 total = GC_compute_large_free_bytes();
180 if (total != GC_large_free_bytes)
181 GC_err_printf("GC_large_free_bytes INCONSISTENT!! Should be: %lu\n",
182 (unsigned long)total);
183 }
184
185 /*
186 * Return the free-list index on which the block described by the header
187 * appears, or -1 if it appears nowhere.
188 */
189 static int
190 free_list_index_of(const hdr *wanted)
191 {
192 int i;
193
194 for (i = 0; i <= N_HBLK_FLS; ++i) {
195 const struct hblk *h;
196 const hdr *hhdr;
197
198 for (h = GC_hblkfreelist[i]; h != NULL; h = hhdr->hb_next) {
199 hhdr = HDR(h);
200 if (hhdr == wanted)
201 return i;
202 }
203 }
204 return -1;
205 }
206
207 GC_API void GC_CALL
208 GC_foreach_heap_section_inner(GC_heap_section_proc fn, void *client_data)
209 {
210 size_t i;
211
212 /*
213 * The collector memory is organized in heap sections that are split in
214 * blocks. Each such block has a header (obtained by `HDR(p)`) and the
215 * block size is aligned to `HBLKSIZE`. The block headers are kept
216 * separately from the memory they point to.
217 */
218 for (i = 0; i < GC_n_heap_sects; ++i) {
219 ptr_t start = GC_heap_sects[i].hs_start;
220 ptr_t finish = start + GC_heap_sects[i].hs_bytes;
221 ptr_t p;
222
223 /* Merge in contiguous sections. */
224 while (i + 1 < GC_n_heap_sects
225 && GC_heap_sects[i + 1].hs_start == finish) {
226 ++i;
227 finish = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
228 }
229
230 fn(start, finish, GC_HEAP_SECTION_TYPE_WHOLE_SECT, client_data);
231 for (p = start; ADDR_LT(p, finish);) {
232 /*
233 * Lookup into 2-level tree data structure which uses address
234 * as a hash key to find the block header.
235 */
236 hdr *hhdr = HDR(p);
237
238 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
239 /*
240 * The pointer has no header registered in the headers cache.
241 * Skip one `HBLKSIZE` and retry. Might be mapped or not.
242 */
243 fn(p, p + HBLKSIZE, GC_HEAP_SECTION_TYPE_FORWARDING, client_data);
244 p += HBLKSIZE;
245 continue;
246 }
247
248 if (HBLK_IS_FREE(hhdr)) {
249 /*
250 * The block is marked as free. Note: `hb_sz` is the size in bytes
251 * of the whole block.
252 */
253 fn(p, p + hhdr->hb_sz,
254 IS_MAPPED(hhdr) ? GC_HEAP_SECTION_TYPE_FREE
255 : GC_HEAP_SECTION_TYPE_UNMAPPED,
256 client_data);
257 p += hhdr->hb_sz;
258 } else {
259 /*
260 * This heap block is used. Report also the padding, if any.
261 * Note: `hb_sz` is the size (in bytes) of objects in the block.
262 */
263 ptr_t blockEnd = p + HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr->hb_sz);
264 ptr_t usedBlockEnd = p + hhdr->hb_sz;
265
266 fn(p, usedBlockEnd, GC_HEAP_SECTION_TYPE_USED, client_data);
267 if (ADDR_LT(usedBlockEnd, blockEnd))
268 fn(usedBlockEnd, blockEnd, GC_HEAP_SECTION_TYPE_PADDING,
269 client_data);
270 p = blockEnd;
271 }
272 }
273 }
274 }
275
276 static void GC_CALLBACK
277 dump_regions_proc(void *start, void *finish, GC_heap_section_type type,
278 void *client_data)
279 {
280 hdr *hhdr;
281 int correct_index, actual_index;
282
283 UNUSED_ARG(client_data);
284 switch (type) {
285 case GC_HEAP_SECTION_TYPE_WHOLE_SECT:
286 GC_printf("***Section from %p to %p\n", start, finish);
287 break;
288 case GC_HEAP_SECTION_TYPE_FORWARDING:
289 GC_printf("\t%p Missing header!!(%p)\n", start, (void *)HDR(start));
290 break;
291 case GC_HEAP_SECTION_TYPE_FREE:
292 case GC_HEAP_SECTION_TYPE_UNMAPPED:
293 hhdr = HDR(start);
294 GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", start,
295 (unsigned long)hhdr->hb_sz,
296 type == GC_HEAP_SECTION_TYPE_UNMAPPED ? " (unmapped)" : "");
297 actual_index = free_list_index_of(hhdr);
298 correct_index = (int)GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz));
299 if (-1 == actual_index) {
300 GC_printf("\t\tBlock not on free list %d!!\n", correct_index);
301 } else if (correct_index != actual_index) {
302 GC_printf("\t\tBlock on list %d, should be on %d!!\n", actual_index,
303 correct_index);
304 }
305 break;
306 case GC_HEAP_SECTION_TYPE_USED:
307 GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", start,
308 (unsigned long)(ADDR(finish) - ADDR(start)));
309 break;
310 case GC_HEAP_SECTION_TYPE_PADDING:
311 /* Empty. */
312 break;
313 }
314 }
315
316 GC_API void GC_CALL
317 GC_dump_regions(void)
318 {
319 GC_foreach_heap_section_inner(dump_regions_proc, NULL);
320 }
321 #endif /* !NO_DEBUGGING */
322
323 /*
324 * Initialize `hhdr` for a `block` containing the indicated size
325 * `lb_adjusted` and `kind` of objects. Return `FALSE` on failure.
326 */
327 static GC_bool
328 setup_header(hdr *hhdr, struct hblk *block, size_t lb_adjusted, int kind,
329 unsigned flags)
330 {
331 const struct obj_kind *ok;
332 word descr;
333
334 GC_ASSERT(I_HOLD_LOCK());
335 GC_ASSERT(lb_adjusted >= ALIGNMENT);
336 #ifndef MARK_BIT_PER_OBJ
337 if (lb_adjusted > MAXOBJBYTES)
338 flags |= LARGE_BLOCK;
339 #endif
340 ok = &GC_obj_kinds[kind];
341 #ifdef ENABLE_DISCLAIM
342 if (ok->ok_disclaim_proc)
343 flags |= HAS_DISCLAIM;
344 if (ok->ok_mark_unconditionally)
345 flags |= MARK_UNCONDITIONALLY;
346 #endif
347
348 /* Set size, kind and mark procedure fields. */
349 hhdr->hb_sz = lb_adjusted;
350 hhdr->hb_obj_kind = (unsigned char)kind;
351 hhdr->hb_flags = (unsigned char)flags;
352 hhdr->hb_block = block;
353 descr = ok->ok_descriptor;
354 #if ALIGNMENT > GC_DS_TAGS
355 /*
356 * An extra byte is not added in case of ignore-off-page allocated objects
357 * not smaller than `HBLKSIZE`.
358 */
359 if (EXTRA_BYTES != 0 && (flags & IGNORE_OFF_PAGE) != 0 && kind == NORMAL
360 && lb_adjusted >= HBLKSIZE)
361 descr += ALIGNMENT; /*< or set to 0 */
362 #endif
363 if (ok->ok_relocate_descr)
364 descr += lb_adjusted;
365 hhdr->hb_descr = descr;
366
367 #ifdef MARK_BIT_PER_OBJ
368 /*
369 * Set `hb_inv_sz` as portably as possible. We set it to the smallest
370 * value such that `lb_adjusted * inv_sz >= 2**32`.
371 * This may be more precision than necessary.
372 */
373 if (lb_adjusted > MAXOBJBYTES) {
374 hhdr->hb_inv_sz = LARGE_INV_SZ;
375 } else {
376 unsigned32 inv_sz;
377
378 GC_ASSERT(lb_adjusted > 1);
379 # if CPP_WORDSZ > 32
380 inv_sz = (unsigned32)(((word)1 << 32) / lb_adjusted);
381 if (((inv_sz * (word)lb_adjusted) >> 32) == 0)
382 ++inv_sz;
383 # else
384 inv_sz = (((unsigned32)1 << 31) / lb_adjusted) << 1;
385 while ((inv_sz * lb_adjusted) > lb_adjusted)
386 inv_sz++;
387 # endif
388 # if (CPP_WORDSZ == 32) && defined(__GNUC__)
389 GC_ASSERT(((1ULL << 32) + lb_adjusted - 1) / lb_adjusted == inv_sz);
390 # endif
391 hhdr->hb_inv_sz = inv_sz;
392 }
393 #else
394 {
395 size_t lg = BYTES_TO_GRANULES(lb_adjusted);
396
397 if (UNLIKELY(!GC_add_map_entry(lg))) {
398 /* Make it look like a valid block. */
399 hhdr->hb_sz = HBLKSIZE;
400 hhdr->hb_descr = 0;
401 hhdr->hb_flags |= LARGE_BLOCK;
402 hhdr->hb_map = NULL;
403 return FALSE;
404 }
405 hhdr->hb_map = GC_obj_map[(hhdr->hb_flags & LARGE_BLOCK) != 0 ? 0 : lg];
406 }
407 #endif
408
409 /* Clear mark bits. */
410 GC_clear_hdr_marks(hhdr);
411
412 hhdr->hb_last_reclaimed = (unsigned short)GC_gc_no;
413 return TRUE;
414 }
415
416 /*
417 * Remove `hhdr` from the free list (it is assumed to be specified by
418 * `index`).
419 */
420 STATIC void
421 GC_remove_from_fl_at(hdr *hhdr, size_t index)
422 {
423 GC_ASSERT(modHBLKSZ(hhdr->hb_sz) == 0);
424 if (hhdr->hb_prev == 0) {
425 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
426 GC_hblkfreelist[index] = hhdr->hb_next;
427 } else {
428 hdr *phdr;
429 GET_HDR(hhdr->hb_prev, phdr);
430 phdr->hb_next = hhdr->hb_next;
431 }
432 /* We always need index to maintain free counts. */
433 GC_ASSERT(GC_free_bytes[index] >= hhdr->hb_sz);
434 GC_free_bytes[index] -= hhdr->hb_sz;
435 if (hhdr->hb_next != NULL) {
436 hdr *nhdr;
437
438 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
439 GET_HDR(hhdr->hb_next, nhdr);
440 nhdr->hb_prev = hhdr->hb_prev;
441 }
442 }
443
444 /*
445 * Remove `hhdr` from the appropriate free list (we assume it is on the
446 * size-appropriate free list).
447 */
448 GC_INLINE void
449 GC_remove_from_fl(hdr *hhdr)
450 {
451 GC_remove_from_fl_at(hhdr, GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz)));
452 }
453
454 /* Return a pointer to the block ending just before `h`, if any. */
455 static struct hblk *
456 get_block_ending_at(struct hblk *h)
457 {
458 struct hblk *p = h - 1;
459 hdr *hhdr;
460
461 GET_HDR(p, hhdr);
462 if (hhdr != NULL) {
463 return GC_find_starting_hblk(p, &hhdr);
464 }
465 p = GC_prev_block(p);
466 if (p != NULL) {
467 hhdr = HDR(p);
468 if ((ptr_t)p + hhdr->hb_sz == (ptr_t)h) {
469 return p;
470 }
471 }
472 return NULL;
473 }
474
475 /* Return a pointer to the free block ending just before `h`, if any. */
476 STATIC struct hblk *
477 GC_free_block_ending_at(struct hblk *h)
478 {
479 struct hblk *p = get_block_ending_at(h);
480
481 if (p /* `!= NULL` */) { /*< CPPCHECK */
482 const hdr *hhdr = HDR(p);
483
484 if (HBLK_IS_FREE(hhdr)) {
485 return p;
486 }
487 }
488 return 0;
489 }
490
491 /*
492 * Add `hhdr` to the appropriate free list. We maintain individual
493 * free lists sorted by address.
494 */
495 STATIC void
496 GC_add_to_fl(struct hblk *h, hdr *hhdr)
497 {
498 size_t index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz));
499 struct hblk *second = GC_hblkfreelist[index];
500
501 #if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP) && !defined(CHERI_PURECAP)
502 {
503 struct hblk *next = (struct hblk *)((ptr_t)h + hhdr->hb_sz);
504 const hdr *nexthdr = HDR(next);
505 struct hblk *prev = GC_free_block_ending_at(h);
506 const hdr *prevhdr = HDR(prev);
507
508 GC_ASSERT(NULL == nexthdr || !HBLK_IS_FREE(nexthdr)
509 || (GC_heapsize & SIGNB) != 0);
510 /* In the last case, blocks may be too large to be merged. */
511 GC_ASSERT(NULL == prev || !HBLK_IS_FREE(prevhdr)
512 || (GC_heapsize & SIGNB) != 0);
513 }
514 #endif
515 GC_ASSERT(modHBLKSZ(hhdr->hb_sz) == 0);
516 GC_hblkfreelist[index] = h;
517 GC_free_bytes[index] += hhdr->hb_sz;
518 GC_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes);
519 hhdr->hb_next = second;
520 hhdr->hb_prev = NULL;
521 if (second /* `!= NULL` */) { /*< CPPCHECK */
522 hdr *second_hdr;
523
524 GET_HDR(second, second_hdr);
525 second_hdr->hb_prev = h;
526 }
527 hhdr->hb_flags |= FREE_BLK;
528 }
529
530 #define BLOCKS_MERGE_OVERFLOW(hhdr, nexthdr) \
531 ((((hhdr)->hb_sz + (nexthdr)->hb_sz) & SIZET_SIGNB) != 0)
532
533 #ifdef USE_MUNMAP
534
535 /*
536 * `GC_unmap_old` will avoid creating more than this many unmapped regions,
537 * but an unmapped region may be split again so exceeding the limit.
538 */
539 # ifdef COUNT_UNMAPPED_REGIONS
540
541 /*
542 * Return the change in number of unmapped regions if the block `h` swaps
543 * from its current state of mapped/unmapped to the opposite state.
544 */
545 static int
546 calc_num_unmapped_regions_delta(struct hblk *h, hdr *hhdr)
547 {
548 struct hblk *prev = get_block_ending_at(h);
549 struct hblk *next;
550 GC_bool prev_unmapped = FALSE;
551 GC_bool next_unmapped = FALSE;
552
553 next = GC_next_block((struct hblk *)((ptr_t)h + hhdr->hb_sz), TRUE);
554 /* Ensure next is contiguous with `h`. */
555 if (next != HBLK_PAGE_ALIGNED((ptr_t)h + hhdr->hb_sz)) {
556 next = NULL;
557 }
558 if (prev != NULL) {
559 const hdr *prevhdr = HDR(prev);
560 prev_unmapped = !IS_MAPPED(prevhdr);
561 }
562 if (next != NULL) {
563 const hdr *nexthdr = HDR(next);
564 next_unmapped = !IS_MAPPED(nexthdr);
565 }
566
567 if (prev_unmapped && next_unmapped) {
568 /*
569 * If `h` is unmapped, merge two unmapped regions into one.
570 * If `h` is remapped, split one unmapped region into two.
571 */
572 return IS_MAPPED(hhdr) ? -1 : 1;
573 }
574 if (!prev_unmapped && !next_unmapped) {
575 /*
576 * If `h` is unmapped, create an isolated unmapped region.
577 * If `h` is remapped, remove it.
578 */
579 return IS_MAPPED(hhdr) ? 1 : -1;
580 }
581 /*
582 * If `h` is unmapped, merge it with previous or next unmapped region.
583 * If `h` is remapped, reduce either previous or next unmapped region.
584 * In either way, no change to the number of unmapped regions.
585 */
586 return 0;
587 }
588 # endif /* COUNT_UNMAPPED_REGIONS */
589
590 /*
591 * Update `GC_num_unmapped_regions` assuming the block `h` changes from
592 * its current state of mapped/unmapped to the opposite state.
593 */
594 GC_INLINE void
595 GC_adjust_num_unmapped(struct hblk *h, hdr *hhdr)
596 {
597 # ifdef COUNT_UNMAPPED_REGIONS
598 GC_num_unmapped_regions += calc_num_unmapped_regions_delta(h, hhdr);
599 # else
600 UNUSED_ARG(h);
601 UNUSED_ARG(hhdr);
602 # endif
603 }
604
605 GC_INNER void
606 GC_unmap_old(unsigned threshold)
607 {
608 size_t i;
609
610 # ifdef COUNT_UNMAPPED_REGIONS
611 /*
612 * Skip unmapping if we have already exceeded the soft limit.
613 * This forgoes any opportunities to merge unmapped regions though.
614 */
615 if (GC_num_unmapped_regions >= GC_UNMAPPED_REGIONS_SOFT_LIMIT)
616 return;
617 # endif
618
619 for (i = 0; i <= N_HBLK_FLS; ++i) {
620 struct hblk *h;
621 hdr *hhdr;
622
623 for (h = GC_hblkfreelist[i]; h != NULL; h = hhdr->hb_next) {
624 hhdr = HDR(h);
625 if (!IS_MAPPED(hhdr))
626 continue;
627
628 /*
629 * Check that the interval is not smaller than the `threshold`.
630 * The truncated counter value wrapping is handled correctly.
631 */
632 if ((unsigned short)(GC_gc_no - hhdr->hb_last_reclaimed)
633 >= (unsigned short)threshold) {
634 # ifdef COUNT_UNMAPPED_REGIONS
635 /*
636 * Continue with unmapping the block only if it will not create
637 * too many unmapped regions, or if unmapping reduces the number
638 * of regions.
639 */
640 int delta = calc_num_unmapped_regions_delta(h, hhdr);
641 GC_signed_word regions = GC_num_unmapped_regions + delta;
642
643 if (delta >= 0 && regions >= GC_UNMAPPED_REGIONS_SOFT_LIMIT) {
644 GC_COND_LOG_PRINTF("Unmapped regions limit reached!\n");
645 return;
646 }
647 GC_num_unmapped_regions = regions;
648 # endif
649 GC_unmap((ptr_t)h, hhdr->hb_sz);
650 hhdr->hb_flags |= WAS_UNMAPPED;
651 }
652 }
653 }
654 }
655
656 GC_INNER GC_bool
657 GC_merge_unmapped(void)
658 {
659 size_t i;
660 GC_bool merged = FALSE;
661
662 for (i = 0; i <= N_HBLK_FLS; ++i) {
663 struct hblk *h = GC_hblkfreelist[i];
664
665 while (h != NULL) {
666 struct hblk *next;
667 hdr *hhdr, *nexthdr;
668 size_t size, next_size;
669
670 GET_HDR(h, hhdr);
671 size = hhdr->hb_sz;
672 next = (struct hblk *)((ptr_t)h + size);
673 GET_HDR(next, nexthdr);
674 /* Coalesce with successor, if possible. */
675 if (NULL == nexthdr || !HBLK_IS_FREE(nexthdr)
676 || BLOCKS_MERGE_OVERFLOW(hhdr, nexthdr)) {
677 /* Not mergeable with the successor. */
678 h = hhdr->hb_next;
679 continue;
680 }
681
682 next_size = nexthdr->hb_sz;
683 # ifdef CHERI_PURECAP
684 /* FIXME: Coalesce with super-capability. */
685 if (!CAPABILITY_COVERS_RANGE(h, ADDR(next), ADDR(next) + nextsize)) {
686 h = hhdr->hb_next;
687 continue;
688 }
689 # endif
690
691 /*
692 * Note that we usually try to avoid adjacent free blocks that are
693 * either both mapped or both unmapped. But that is not guaranteed
694 * to hold since we remap blocks when we split them, and do not merge
695 * at that point. It may also not hold if the merged block would be
696 * too big.
697 */
698 if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
699 /* Make both consistent, so that we can merge. */
700 if (size > next_size) {
701 GC_adjust_num_unmapped(next, nexthdr);
702 GC_remap((ptr_t)next, next_size);
703 } else {
704 GC_adjust_num_unmapped(h, hhdr);
705 GC_unmap((ptr_t)h, size);
706 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, next_size);
707 hhdr->hb_flags |= WAS_UNMAPPED;
708 }
709 } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
710 if (size > next_size) {
711 GC_adjust_num_unmapped(next, nexthdr);
712 GC_unmap((ptr_t)next, next_size);
713 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, next_size);
714 } else {
715 GC_adjust_num_unmapped(h, hhdr);
716 GC_remap((ptr_t)h, size);
717 hhdr->hb_flags &= (unsigned char)~WAS_UNMAPPED;
718 hhdr->hb_last_reclaimed = nexthdr->hb_last_reclaimed;
719 }
720 } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
721 /* Unmap any gap in the middle. */
722 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, next_size);
723 }
724 /* If they are both unmapped, we merge, but leave unmapped. */
725 GC_remove_from_fl_at(hhdr, i);
726 GC_remove_from_fl(nexthdr);
727 hhdr->hb_sz += nexthdr->hb_sz;
728 GC_remove_header(next);
729 GC_add_to_fl(h, hhdr);
730 merged = TRUE;
731 /* Start over at the beginning of list. */
732 h = GC_hblkfreelist[i];
733 }
734 }
735 return merged;
736 }
737
738 #endif /* USE_MUNMAP */
739
740 /*
741 * Return a pointer to a block starting at `h`. Memory for the block
742 * is mapped. Remove the block from its free list, and return the
743 * remainder (if any) to its appropriate free list. May fail by
744 * returning `NULL`. The header for the returned block must be set up
745 * by the caller. If the returned pointer is not `NULL`, then `hhdr`
746 * is the header for it.
747 */
748 STATIC struct hblk *
749 GC_get_first_part(struct hblk *h, hdr *hhdr, size_t size_needed, size_t index)
750 {
751 size_t total_size;
752 struct hblk *rest;
753 hdr *rest_hdr;
754
755 GC_ASSERT(I_HOLD_LOCK());
756 GC_ASSERT(modHBLKSZ(size_needed) == 0);
757 total_size = hhdr->hb_sz;
758 GC_ASSERT(modHBLKSZ(total_size) == 0);
759 GC_remove_from_fl_at(hhdr, index);
760 if (total_size == size_needed)
761 return h;
762
763 rest = (struct hblk *)((ptr_t)h + size_needed);
764 rest_hdr = GC_install_header(rest);
765 if (UNLIKELY(NULL == rest_hdr)) {
766 /* FIXME: This is likely to be very bad news... */
767 WARN("Header allocation failed: dropping block\n", 0);
768 return NULL;
769 }
770 rest_hdr->hb_block = rest;
771 rest_hdr->hb_sz = total_size - size_needed;
772 rest_hdr->hb_flags = 0;
773 #ifdef GC_ASSERTIONS
774 /* Mark `h` as non-free, to avoid assertion about adjacent free blocks. */
775 hhdr->hb_flags &= (unsigned char)~FREE_BLK;
776 #endif
777 GC_add_to_fl(rest, rest_hdr);
778 return h;
779 }
780
781 /*
782 * Split the block. `hbp` is a free block; `last_hbp` points at address
783 * inside it; a new header for `last_hbp` is assumed to be already set up.
784 * Fix up the header of `hbp` to reflect the fact that it is being split,
785 * move it to the appropriate free list. `last_hbp` replaces `hbp` in the
786 * original free list. `last_hdr` is not completely filled in, since it
787 * is about to be allocated. It may, in fact, end up on the wrong free
788 * list for its size. That is not a disaster, since `last_hbp` is to be
789 * allocated by our caller. (Hence adding it to a free list is silly.
790 * But this path is hopefully rare enough that it does not matter.
791 * The code is cleaner this way.)
792 */
793 STATIC void
794 GC_split_block(struct hblk *hbp, hdr *hhdr, struct hblk *last_hbp,
795 hdr *last_hdr, size_t index /* of free list */)
796 {
797 size_t h_size = (size_t)((ptr_t)last_hbp - (ptr_t)hbp);
798 struct hblk *prev = hhdr->hb_prev;
799 struct hblk *next = hhdr->hb_next;
800
801 /* Replace `hbp` with `last_hbp` on its free list. */
802 last_hdr->hb_prev = prev;
803 last_hdr->hb_next = next;
804 last_hdr->hb_block = last_hbp;
805 last_hdr->hb_sz = hhdr->hb_sz - h_size;
806 last_hdr->hb_flags = 0;
807 if (prev /* `!= NULL` */) { /*< CPPCHECK */
808 HDR(prev)->hb_next = last_hbp;
809 } else {
810 GC_hblkfreelist[index] = last_hbp;
811 }
812 if (next /* `!= NULL` */) {
813 HDR(next)->hb_prev = last_hbp;
814 }
815 GC_ASSERT(GC_free_bytes[index] > h_size);
816 GC_free_bytes[index] -= h_size;
817 #ifdef USE_MUNMAP
818 hhdr->hb_last_reclaimed = (unsigned short)GC_gc_no;
819 #endif
820 hhdr->hb_sz = h_size;
821 GC_add_to_fl(hbp, hhdr);
822 last_hdr->hb_flags |= FREE_BLK;
823 }
824
825 STATIC struct hblk *GC_allochblk_nth(size_t lb_adjusted, int kind,
826 unsigned flags, size_t index,
827 int may_split, size_t align_m1);
828
829 #ifdef USE_MUNMAP
830 # define AVOID_SPLIT_REMAPPED 2
831 #endif
832
833 GC_INNER struct hblk *
834 GC_allochblk(size_t lb_adjusted, int kind,
835 unsigned flags /* `IGNORE_OFF_PAGE` or 0 */, size_t align_m1)
836 {
837 size_t blocks, start_list;
838 struct hblk *result;
839 int may_split;
840 size_t split_limit; /* highest index of free list whose blocks we split */
841
842 GC_ASSERT(I_HOLD_LOCK());
843 GC_ASSERT((lb_adjusted & (GC_GRANULE_BYTES - 1)) == 0);
844 blocks = OBJ_SZ_TO_BLOCKS_CHECKED(lb_adjusted);
845 if (UNLIKELY(SIZET_SAT_ADD(blocks * HBLKSIZE, align_m1)
846 >= (GC_SIZE_MAX >> 1)))
847 return NULL; /* overflow */
848
849 start_list = GC_hblk_fl_from_blocks(blocks);
850 /* Try for an exact match first. */
851 result = GC_allochblk_nth(lb_adjusted, kind, flags, start_list, FALSE,
852 align_m1);
853 if (result != NULL)
854 return result;
855
856 may_split = TRUE;
857 if (GC_use_entire_heap || GC_dont_gc
858 || GC_heapsize - GC_large_free_bytes < GC_requested_heapsize
859 || GC_incremental || !GC_should_collect()) {
860 /* Should use more of the heap, even if it requires splitting. */
861 split_limit = N_HBLK_FLS;
862 } else if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
863 /*
864 * If we are deallocating lots of memory from finalizers, then fail
865 * and collect sooner rather than later.
866 */
867 split_limit = 0;
868 } else {
869 /*
870 * If we have enough large blocks left to cover any previous request
871 * for large blocks, we go ahead and split. Assuming a steady state,
872 * that should be safe. It means that we can use the full heap
873 * if we allocate only small objects.
874 */
875 split_limit = GC_enough_large_bytes_left();
876 #ifdef USE_MUNMAP
877 if (split_limit > 0)
878 may_split = AVOID_SPLIT_REMAPPED;
879 #endif
880 }
881 if (start_list < UNIQUE_THRESHOLD && 0 == align_m1) {
882 /*
883 * No reason to try `start_list` again, since all blocks are exact
884 * matches.
885 */
886 ++start_list;
887 }
888 for (; start_list <= split_limit; ++start_list) {
889 result = GC_allochblk_nth(lb_adjusted, kind, flags, start_list, may_split,
890 align_m1);
891 if (result != NULL)
892 break;
893 }
894 return result;
895 }
896
897 #define ALIGN_PAD_SZ(p, align_m1) \
898 (((align_m1) + 1 - (size_t)ADDR(p)) & (align_m1))
899
900 static GC_bool
901 next_hblk_fits_better(const hdr *hhdr, size_t size_avail, size_t size_needed,
902 size_t align_m1)
903 {
904 const hdr *nexthdr;
905 size_t next_size;
906 size_t next_ofs;
907 struct hblk *next_hbp = hhdr->hb_next;
908
909 if (NULL == next_hbp)
910 return FALSE; /*< no next block */
911 GET_HDR(next_hbp, nexthdr);
912 next_size = nexthdr->hb_sz;
913 if (size_avail <= next_size)
914 return FALSE; /*< not enough size */
915
916 next_ofs = ALIGN_PAD_SZ(next_hbp, align_m1);
917 return next_size >= size_needed + next_ofs
918 #ifndef NO_BLACK_LISTING
919 && !GC_is_black_listed(next_hbp + divHBLKSZ(next_ofs), size_needed)
920 #endif
921 ;
922 }
923
924 static struct hblk *
925 find_nonbl_hblk(struct hblk *last_hbp, size_t size_remain,
926 size_t eff_size_needed, size_t align_m1)
927 {
928 #ifdef NO_BLACK_LISTING
929 UNUSED_ARG(size_remain);
930 UNUSED_ARG(eff_size_needed);
931 return last_hbp + divHBLKSZ(ALIGN_PAD_SZ(last_hbp, align_m1));
932 #else
933 ptr_t search_end
934 = PTR_ALIGN_DOWN((ptr_t)last_hbp + size_remain, align_m1 + 1);
935
936 do {
937 struct hblk *next_hbp;
938
939 last_hbp += divHBLKSZ(ALIGN_PAD_SZ(last_hbp, align_m1));
940 next_hbp = GC_is_black_listed(last_hbp, eff_size_needed);
941 if (NULL == next_hbp)
942 return last_hbp; /*< not black-listed */
943 last_hbp = next_hbp;
944 } while (ADDR_GE(search_end, (ptr_t)last_hbp));
945 return NULL;
946 #endif
947 }
948
949 #ifndef NO_BLACK_LISTING
950 /* Number of warnings suppressed so far. */
951 STATIC long GC_large_alloc_warn_suppressed = 0;
952
953 /*
954 * Counter of the cases when found block by `GC_allochblk_nth` is
955 * black-listed completely.
956 */
957 STATIC unsigned GC_drop_blacklisted_count = 0;
958
959 /*
960 * Allocate and drop the block in small chunks, to maximize the chance
961 * that we will recover some later. `hhdr` should correspond to `hbp`.
962 */
963 static void
964 drop_hblk_in_chunks(size_t n, struct hblk *hbp, hdr *hhdr)
965 {
966 size_t total_size = hhdr->hb_sz;
967 const struct hblk *limit = hbp + divHBLKSZ(total_size);
968
969 GC_ASSERT(HDR(hbp) == hhdr);
970 GC_ASSERT(modHBLKSZ(total_size) == 0 && total_size > 0);
971 GC_large_free_bytes -= total_size;
972 GC_bytes_dropped += total_size;
973 GC_remove_from_fl_at(hhdr, n);
974 do {
975 (void)setup_header(hhdr, hbp, HBLKSIZE, PTRFREE, 0); /*< cannot fail */
976 if (GC_debugging_started)
977 BZERO(hbp, HBLKSIZE);
978 hbp++;
979 if (ADDR_GE(hbp, limit))
980 break;
981
982 hhdr = GC_install_header(hbp);
983 } while (LIKELY(hhdr != NULL)); /*< no header allocation failure? */
984 }
985 #endif /* !NO_BLACK_LISTING */
986
987 #if defined(MPROTECT_VDB) && defined(DONT_PROTECT_PTRFREE)
988 static GC_bool
989 is_hblks_mix_in_page(struct hblk *hbp, GC_bool is_ptrfree)
990 {
991 struct hblk *h = HBLK_PAGE_ALIGNED(hbp);
992 size_t i, cnt = divHBLKSZ(GC_page_size);
993
994 /*
995 * Iterate over blocks in the page to check if all the occupied blocks
996 * are pointer-free if we are going to allocate a pointer-free one,
997 * and vice versa.
998 */
999 for (i = 0; i < cnt; i++) {
1000 hdr *hhdr;
1001
1002 GET_HDR(&h[i], hhdr);
1003 if (NULL == hhdr)
1004 continue;
1005 (void)GC_find_starting_hblk(&h[i], &hhdr);
1006 if (!HBLK_IS_FREE(hhdr)) {
1007 /* It is OK to check only the first found occupied block. */
1008 return IS_PTRFREE(hhdr) != is_ptrfree;
1009 }
1010 }
1011 /* All blocks are free. */
1012 return FALSE;
1013 }
1014 #endif /* MPROTECT_VDB && DONT_PROTECT_PTRFREE */
1015
1016 /*
1017 * The same as `GC_allochblk`, but with search restricted to the
1018 * `index`-th free list. `flags` should be `IGNORE_OFF_PAGE` or zero;
1019 * `may_split` indicates whether it is OK to split larger blocks; size
1020 * `lb_adjusted` is in bytes. If `may_split` is set to
1021 * `AVOID_SPLIT_REMAPPED`, then memory remapping followed by splitting
1022 * should be generally avoided. Rounded-up `lb_adjusted` plus
1023 * `align_m1` value should be less than `GC_SIZE_MAX / 2`.
1024 */
1025 STATIC struct hblk *
1026 GC_allochblk_nth(size_t lb_adjusted, int kind, unsigned flags, size_t index,
1027 int may_split, size_t align_m1)
1028 {
1029 struct hblk *hbp, *last_hbp;
1030 /* The header corresponding to `hbp`. */
1031 hdr *hhdr;
1032 /* Number of bytes in requested objects. */
1033 size_t size_needed = (lb_adjusted + HBLKSIZE - 1) & ~(HBLKSIZE - 1);
1034
1035 GC_ASSERT(I_HOLD_LOCK());
1036 GC_ASSERT(((align_m1 + 1) & align_m1) == 0 && lb_adjusted > 0);
1037 GC_ASSERT(0 == align_m1 || modHBLKSZ(align_m1 + 1) == 0);
1038 #ifndef NO_BLACK_LISTING
1039 retry:
1040 #endif
1041 /* Search for a big enough block in free list. */
1042 for (hbp = GC_hblkfreelist[index];; hbp = hhdr->hb_next) {
1043 size_t size_avail; /*< bytes available in this block */
1044 size_t align_ofs;
1045
1046 if (hbp /* `!= NULL` */) {
1047 /* CPPCHECK */
1048 } else {
1049 return NULL;
1050 }
1051 GET_HDR(hbp, hhdr); /*< set `hhdr` value */
1052 size_avail = hhdr->hb_sz;
1053 if (!may_split && size_avail != size_needed)
1054 continue;
1055
1056 align_ofs = ALIGN_PAD_SZ(hbp, align_m1);
1057 if (size_avail < size_needed + align_ofs)
1058 continue; /*< the block is too small */
1059
1060 if (size_avail != size_needed) {
1061 /*
1062 * If the next heap block is obviously better, go on.
1063 * This prevents us from disassembling a single large block to get
1064 * tiny blocks.
1065 */
1066 if (next_hblk_fits_better(hhdr, size_avail, size_needed, align_m1))
1067 continue;
1068 }
1069
1070 #if defined(MPROTECT_VDB) && defined(DONT_PROTECT_PTRFREE)
1071 /*
1072 * Avoid write-protecting pointer-free blocks (only the case
1073 * if page size is larger than the block size).
1074 */
1075 GC_ASSERT(GC_page_size != 0);
1076 if (GC_page_size != HBLKSIZE
1077 && (!GC_incremental /*< not enabled yet */
1078 || GC_incremental_protection_needs() != GC_PROTECTS_NONE)
1079 && is_hblks_mix_in_page(hbp, kind == PTRFREE))
1080 continue;
1081 #endif
1082
1083 if (IS_UNCOLLECTABLE(kind)
1084 || (kind == PTRFREE && size_needed <= MAX_BLACK_LIST_ALLOC)) {
1085 last_hbp = hbp + divHBLKSZ(align_ofs);
1086 break;
1087 }
1088
1089 last_hbp = find_nonbl_hblk(
1090 hbp, size_avail - size_needed,
1091 (flags & IGNORE_OFF_PAGE) != 0 ? HBLKSIZE : size_needed, align_m1);
1092 /* Is non-black-listed part of enough size? */
1093 if (last_hbp != NULL) {
1094 #ifdef USE_MUNMAP
1095 /* Avoid remapping followed by splitting. */
1096 if (may_split == AVOID_SPLIT_REMAPPED && last_hbp != hbp
1097 && !IS_MAPPED(hhdr))
1098 continue;
1099 #endif
1100 break;
1101 }
1102
1103 #ifndef NO_BLACK_LISTING
1104 /*
1105 * The block is completely black-listed. If so, we need to
1106 * drop some such blocks, since otherwise we spend all our
1107 * time traversing them if pointer-free blocks are unpopular.
1108 * A dropped block will be reconsidered at next collection.
1109 */
1110 if (size_needed == HBLKSIZE && 0 == align_m1 && !GC_find_leak_inner
1111 && IS_MAPPED(hhdr) && (++GC_drop_blacklisted_count & 3) == 0) {
1112 const struct hblk *prev = hhdr->hb_prev;
1113
1114 drop_hblk_in_chunks(index, hbp, hhdr);
1115 if (NULL == prev)
1116 goto retry;
1117 /* Restore `hhdr` to point at free block. */
1118 hhdr = HDR(prev);
1119 continue;
1120 }
1121
1122 if (size_needed > BL_LIMIT && size_avail - size_needed > BL_LIMIT) {
1123 /* Punt, since anything else risks unreasonable heap growth. */
1124 if (++GC_large_alloc_warn_suppressed >= GC_large_alloc_warn_interval) {
1125 WARN("Repeated allocation of very large block"
1126 " (appr. size %" WARN_PRIuPTR " KiB):\n"
1127 "\tMay lead to memory leak and poor performance\n",
1128 size_needed >> 10);
1129 GC_large_alloc_warn_suppressed = 0;
1130 }
1131 last_hbp = hbp + divHBLKSZ(align_ofs);
1132 break;
1133 }
1134 #endif
1135 }
1136
1137 GC_ASSERT((ADDR(last_hbp) & align_m1) == 0);
1138 if (last_hbp != hbp) {
1139 hdr *last_hdr = GC_install_header(last_hbp);
1140
1141 if (UNLIKELY(NULL == last_hdr))
1142 return NULL;
1143 #ifdef USE_MUNMAP
1144 /* Make sure it is mapped before we mangle it. */
1145 if (!IS_MAPPED(hhdr)) {
1146 GC_adjust_num_unmapped(hbp, hhdr);
1147 GC_remap((ptr_t)hbp, hhdr->hb_sz);
1148 hhdr->hb_flags &= (unsigned char)~WAS_UNMAPPED;
1149 }
1150 #endif
1151 /* Split the block at `last_hbp`. */
1152 GC_split_block(hbp, hhdr, last_hbp, last_hdr, index);
1153 /*
1154 * We must now allocate `last_hbp`, since it may be on the wrong
1155 * free list.
1156 */
1157 hbp = last_hbp;
1158 hhdr = last_hdr;
1159 }
1160 GC_ASSERT(hhdr->hb_sz >= size_needed);
1161
1162 #ifdef USE_MUNMAP
1163 if (!IS_MAPPED(hhdr)) {
1164 GC_adjust_num_unmapped(hbp, hhdr);
1165 GC_remap((ptr_t)hbp, hhdr->hb_sz);
1166 hhdr->hb_flags &= (unsigned char)~WAS_UNMAPPED;
1167 /* Note: this may leave adjacent, mapped free blocks. */
1168 }
1169 #endif
1170 /*
1171 * `hbp` may be on the wrong free list; the parameter `index` is
1172 * important.
1173 */
1174 hbp = GC_get_first_part(hbp, hhdr, size_needed, index);
1175 if (UNLIKELY(NULL == hbp))
1176 return NULL;
1177
1178 /* Add it to map of valid blocks. */
1179 if (UNLIKELY(!GC_install_counts(hbp, size_needed)))
1180 return NULL; /*< note: this leaks memory under very rare conditions */
1181
1182 /* Set up the header. */
1183 GC_ASSERT(HDR(hbp) == hhdr);
1184 #ifdef MARK_BIT_PER_OBJ
1185 (void)setup_header(hhdr, hbp, lb_adjusted, kind, flags);
1186 /* Result is always `TRUE`, not checked to avoid a cppcheck warning. */
1187 #else
1188 if (UNLIKELY(!setup_header(hhdr, hbp, lb_adjusted, kind, flags))) {
1189 GC_remove_counts(hbp, size_needed);
1190 return NULL; /*< ditto */
1191 }
1192 #endif
1193
1194 #ifndef GC_DISABLE_INCREMENTAL
1195 /*
1196 * Notify virtual dirty bit implementation that we are about to write.
1197 * Ensure that pointer-free objects are not protected if it is avoidable.
1198 * This also ensures that newly allocated blocks are treated as
1199 * dirty - it is necessary since we do not protect free blocks.
1200 */
1201 GC_ASSERT(modHBLKSZ(size_needed) == 0);
1202 GC_remove_protection(hbp, divHBLKSZ(size_needed), IS_PTRFREE(hhdr));
1203 #endif
1204 /*
1205 * We just successfully allocated a block. Restart count of consecutive
1206 * failures.
1207 */
1208 GC_fail_count = 0;
1209
1210 GC_large_free_bytes -= size_needed;
1211 GC_ASSERT(IS_MAPPED(hhdr));
1212 return hbp;
1213 }
1214
1215 #ifdef VALGRIND_TRACKING
1216 /*
1217 * Note: this is intentionally defined in a file other than `malloc.c`
1218 * and `reclaim.c` files.
1219 */
1220 GC_ATTR_NOINLINE
1221 GC_API void GC_CALLBACK
1222 GC_free_profiler_hook(void *p)
1223 {
1224 # ifndef PARALLEL_MARK
1225 GC_ASSERT(I_HOLD_LOCK());
1226 # endif
1227 /* Prevent treating this function by the compiler as a no-op one. */
1228 GC_noop1_ptr(p);
1229 }
1230 #endif /* VALGRIND_TRACKING */
1231
1232 GC_INNER void
1233 GC_freehblk(struct hblk *hbp)
1234 {
1235 struct hblk *next, *prev;
1236 hdr *hhdr, *prevhdr, *nexthdr;
1237 size_t size;
1238
1239 GET_HDR(hbp, hhdr);
1240 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr->hb_sz);
1241 if ((size & SIZET_SIGNB) != 0) {
1242 /*
1243 * Probably possible if we try to allocate more than half the address
1244 * space at once. If we do not catch it here, strange things happen
1245 * later.
1246 */
1247 ABORT("Deallocating excessively large block. Too large an allocation?");
1248 }
1249 GC_remove_counts(hbp, size);
1250 hhdr->hb_sz = size;
1251 #ifdef USE_MUNMAP
1252 hhdr->hb_last_reclaimed = (unsigned short)GC_gc_no;
1253 #endif
1254
1255 /* Check for duplicate deallocation in the easy case. */
1256 if (HBLK_IS_FREE(hhdr)) {
1257 ABORT_ARG1("Duplicate large block deallocation", " of %p", (void *)hbp);
1258 }
1259
1260 GC_ASSERT(IS_MAPPED(hhdr));
1261 hhdr->hb_flags |= FREE_BLK;
1262 next = (struct hblk *)((ptr_t)hbp + size);
1263 GET_HDR(next, nexthdr);
1264 prev = GC_free_block_ending_at(hbp);
1265 /* Coalesce with successor, if possible. */
1266 if (nexthdr != NULL && HBLK_IS_FREE(nexthdr)
1267 && IS_MAPPED(nexthdr)
1268 #ifdef CHERI_PURECAP
1269 /* FIXME: Coalesce with super-capability. */
1270 /*
1271 * Bounds of capability should span the entire coalesced memory;
1272 * bounds being larger than the block size is OK; bounded by the
1273 * imprecision of original capability obtained from system memory.
1274 */
1275 && CAPABILITY_COVERS_RANGE(hbp, ADDR(next), ADDR(next) + nexthdr->hb_sz)
1276 #endif
1277 && !BLOCKS_MERGE_OVERFLOW(hhdr, nexthdr)) {
1278 GC_remove_from_fl(nexthdr);
1279 hhdr->hb_sz += nexthdr->hb_sz;
1280 GC_remove_header(next);
1281 }
1282
1283 /* Coalesce with predecessor, if possible. */
1284 if (prev /* `!= NULL` */) { /*< CPPCHECK */
1285 prevhdr = HDR(prev);
1286 if (IS_MAPPED(prevhdr)
1287 #ifdef CHERI_PURECAP
1288 /* FIXME: Coalesce with super-capability. */
1289 && cheri_base_get(hbp) <= ADDR(prev)
1290 #endif
1291 && !BLOCKS_MERGE_OVERFLOW(prevhdr, hhdr)) {
1292 GC_remove_from_fl(prevhdr);
1293 prevhdr->hb_sz += hhdr->hb_sz;
1294 #ifdef USE_MUNMAP
1295 prevhdr->hb_last_reclaimed = (unsigned short)GC_gc_no;
1296 #endif
1297 GC_remove_header(hbp);
1298 hbp = prev;
1299 hhdr = prevhdr;
1300 }
1301 }
1302 /*
1303 * FIXME: It is not clear if we really always want to do these merges
1304 * with `USE_MUNMAP`, since it updates ages and hence prevents unmapping.
1305 */
1306
1307 GC_large_free_bytes += size;
1308 GC_add_to_fl(hbp, hhdr);
1309 }
1310