reclaim.c raw
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 * Copyright (c) 2009-2022 Ivan Maidanski
7 *
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 *
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
16 */
17
18 #include "private/gc_priv.h"
19
20 #ifdef ENABLE_DISCLAIM
21 # include "gc/gc_disclaim.h"
22 #endif
23
24 GC_INNER GC_signed_word GC_bytes_found = 0;
25
26 #ifdef PARALLEL_MARK
27 GC_INNER GC_signed_word GC_fl_builder_count = 0;
28 #endif
29
30 /*
31 * We defer printing of leaked objects until we are done with the
32 * collection cycle, since the routine for printing objects needs
33 * to run outside the collector, e.g. without the allocator lock.
34 */
35
36 #ifndef NO_FIND_LEAK
37 # ifndef MAX_LEAKED
38 # define MAX_LEAKED 40
39 # endif
40 STATIC ptr_t GC_leaked[MAX_LEAKED] = { NULL };
41 STATIC unsigned GC_n_leaked = 0;
42 #endif
43
44 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
45 STATIC void GC_reclaim_unconditionally_marked(void);
46 #endif
47
48 #ifndef SHORT_DBG_HDRS
49
50 # include "private/dbg_mlc.h"
51
52 # ifndef MAX_SMASHED
53 # define MAX_SMASHED 20
54 # endif
55
56 /*
57 * List of smashed (clobbered) locations. We defer printing these,
58 * since we cannot always print them nicely with the allocator lock held.
59 * We put them here instead of in `GC_arrays`, since it may be useful to
60 * be able to look at them with the debugger.
61 */
62 STATIC ptr_t GC_smashed[MAX_SMASHED] = { 0 };
63 STATIC unsigned GC_n_smashed = 0;
64
65 GC_INNER void
66 GC_add_smashed(ptr_t smashed)
67 {
68 GC_ASSERT(I_HOLD_LOCK());
69 GC_ASSERT(GC_is_marked(GC_base(smashed)));
70 /* FIXME: Prevent adding an object while printing smashed list. */
71 GC_smashed[GC_n_smashed] = smashed;
72 /*
73 * In case of overflow, we keep the first `MAX_SMASHED - 1` entries
74 * plus the last one.
75 */
76 if (GC_n_smashed < MAX_SMASHED - 1)
77 ++GC_n_smashed;
78 GC_SET_HAVE_ERRORS();
79 }
80
81 GC_INNER void
82 GC_print_smashed_obj(const char *msg, void *p, ptr_t clobbered)
83 {
84 oh *ohdr = (oh *)GC_base(p);
85
86 GC_ASSERT(I_DONT_HOLD_LOCK());
87 # ifdef LINT2
88 if (!ohdr)
89 ABORT("Invalid GC_print_smashed_obj argument");
90 # endif
91 if (ADDR_GE((ptr_t)(&ohdr->oh_sz), clobbered) || NULL == ohdr->oh_string) {
92 GC_err_printf("%s %p in or near object at %p(<smashed>, appr. sz= %lu)\n",
93 msg, (void *)clobbered, p,
94 (unsigned long)(GC_size(ohdr) - DEBUG_BYTES));
95 } else {
96 GC_err_printf("%s %p in or near object at %p (%s:%d, sz= %lu)\n", msg,
97 (void *)clobbered, p,
98 ADDR(ohdr->oh_string) < HBLKSIZE ? "(smashed string)"
99 : ohdr->oh_string[0] == '\0' ? "EMPTY(smashed?)"
100 : ohdr->oh_string,
101 GET_OH_LINENUM(ohdr), (unsigned long)ohdr->oh_sz);
102 PRINT_CALL_CHAIN(ohdr);
103 }
104 }
105
106 GC_INNER void
107 GC_print_all_smashed_proc(void)
108 {
109 unsigned i;
110
111 GC_ASSERT(I_DONT_HOLD_LOCK());
112 if (GC_n_smashed == 0)
113 return;
114 GC_err_printf("GC_check_heap_block: found %u smashed heap objects:\n",
115 GC_n_smashed);
116 for (i = 0; i < GC_n_smashed; ++i) {
117 ptr_t base = (ptr_t)GC_base(GC_smashed[i]);
118
119 # ifdef LINT2
120 if (!base)
121 ABORT("Invalid GC_smashed element");
122 # endif
123 GC_print_smashed_obj("", base + sizeof(oh), GC_smashed[i]);
124 GC_smashed[i] = 0;
125 }
126 GC_n_smashed = 0;
127 }
128
129 GC_INNER int
130 GC_has_other_debug_info(ptr_t base)
131 {
132 ptr_t body = (ptr_t)((oh *)base + 1);
133 size_t sz = GC_size(base);
134
135 if (HBLKPTR(base) != HBLKPTR(body) || sz < DEBUG_BYTES + EXTRA_BYTES) {
136 return 0;
137 }
138 if (((oh *)base)->oh_sf != (START_FLAG ^ (GC_uintptr_t)body)
139 && ((GC_uintptr_t *)base)[BYTES_TO_PTRS(sz) - 1]
140 != (END_FLAG ^ (GC_uintptr_t)body)) {
141 return 0;
142 }
143 if (((oh *)base)->oh_sz == (GC_uintptr_t)sz) {
144 /* Object may have had debug info, but has been deallocated. */
145 return -1;
146 }
147 return 1;
148 }
149 #endif /* !SHORT_DBG_HDRS */
150
151 GC_INNER void
152 GC_default_print_heap_obj_proc(ptr_t p)
153 {
154 ptr_t base = (ptr_t)GC_base(p);
155 int kind = HDR(base)->hb_obj_kind;
156
157 GC_err_printf("object at %p of appr. %lu bytes (%s)\n", (void *)base,
158 (unsigned long)GC_size(base),
159 kind == PTRFREE ? "atomic"
160 : IS_UNCOLLECTABLE(kind) ? "uncollectable"
161 : "composite");
162 }
163
164 GC_INNER void (*GC_print_heap_obj)(ptr_t p) = GC_default_print_heap_obj_proc;
165
166 #if !defined(NO_FIND_LEAK) || !defined(SHORT_DBG_HDRS)
167 # ifdef AO_HAVE_store
168 GC_INNER volatile AO_t GC_have_errors = 0;
169 # else
170 GC_INNER GC_bool GC_have_errors = FALSE;
171 # endif
172
173 GC_INNER GC_bool GC_debugging_started = FALSE;
174
175 GC_INNER void
176 GC_print_all_errors(void)
177 {
178 static GC_bool printing_errors = FALSE;
179 GC_bool have_errors;
180 # ifndef NO_FIND_LEAK
181 unsigned i, n_leaked;
182 ptr_t leaked[MAX_LEAKED];
183 # endif
184
185 LOCK();
186 if (printing_errors) {
187 UNLOCK();
188 return;
189 }
190 have_errors = get_have_errors();
191 printing_errors = TRUE;
192 # ifndef NO_FIND_LEAK
193 n_leaked = GC_n_leaked;
194 if (n_leaked > 0) {
195 GC_ASSERT(n_leaked <= MAX_LEAKED);
196 BCOPY(GC_leaked, leaked, n_leaked * sizeof(ptr_t));
197 GC_n_leaked = 0;
198 BZERO(GC_leaked, n_leaked * sizeof(ptr_t));
199 }
200 # endif
201 UNLOCK();
202
203 if (GC_debugging_started) {
204 GC_print_all_smashed();
205 } else {
206 have_errors = FALSE;
207 }
208
209 # ifndef NO_FIND_LEAK
210 if (n_leaked > 0) {
211 GC_err_printf("Found %u leaked objects:\n", n_leaked);
212 have_errors = TRUE;
213 }
214 for (i = 0; i < n_leaked; i++) {
215 ptr_t p = leaked[i];
216
217 # ifndef SKIP_LEAKED_OBJECTS_PRINTING
218 GC_print_heap_obj(p);
219 # endif
220 GC_free(p);
221 }
222 # endif
223
224 if (have_errors
225 # ifndef GC_ABORT_ON_LEAK
226 && GETENV("GC_ABORT_ON_LEAK") != NULL
227 # endif
228 ) {
229 ABORT("Leaked or smashed objects encountered");
230 }
231
232 LOCK();
233 printing_errors = FALSE;
234 UNLOCK();
235 }
236 #endif
237
238 /* The reclaim phase. */
239
240 GC_INNER GC_bool
241 GC_block_empty(const hdr *hhdr)
242 {
243 return 0 == hhdr->hb_n_marks;
244 }
245
246 STATIC GC_bool
247 GC_block_nearly_full(const hdr *hhdr, size_t sz)
248 {
249 return hhdr->hb_n_marks > HBLK_OBJS(sz) * 7 / 8;
250 }
251
252 /*
253 * TODO: This should perhaps again be specialized for `USE_MARK_BYTES`
254 * and `USE_MARK_BITS` cases.
255 */
256
257 GC_INLINE ptr_t
258 GC_clear_block(ptr_t q, size_t sz, word *pcount)
259 {
260 ptr_t *p = (ptr_t *)q;
261 ptr_t plim = q + sz;
262
263 /* Clear object, advance `p` to next object in the process. */
264 #ifdef USE_MARK_BYTES
265 GC_ASSERT((sz & 1) == 0);
266 GC_ASSERT((ADDR(p) & (2 * sizeof(ptr_t) - 1)) == 0);
267 p[1] = NULL; /*< but do not clear link field */
268 for (p += 2; ADDR_LT((ptr_t)p, plim); p += 2) {
269 CLEAR_DOUBLE(p);
270 }
271 #else
272 /* Skip link field. */
273 p++;
274
275 while (ADDR_LT((ptr_t)p, plim)) {
276 *p++ = NULL;
277 }
278 #endif
279 *pcount += sz;
280 return (ptr_t)p;
281 }
282
283 /*
284 * Restore unmarked small objects in `h` of size `sz` (in bytes) to the
285 * object free list. Returns the new list. Clears unmarked objects.
286 */
287 STATIC ptr_t
288 GC_reclaim_clear(struct hblk *hbp, const hdr *hhdr, size_t sz, ptr_t list,
289 word *pcount)
290 {
291 size_t bit_no;
292 ptr_t p, plim;
293
294 GC_ASSERT(hhdr == GC_find_header(hbp));
295 #ifndef THREADS
296 GC_ASSERT(sz == hhdr->hb_sz);
297 #else
298 /* Skip the assertion because of a potential race with `GC_realloc`. */
299 #endif
300 GC_ASSERT((sz & (sizeof(ptr_t) - 1)) == 0);
301
302 /* Go through all objects in the block. */
303 p = hbp->hb_body;
304 plim = p + HBLKSIZE - sz;
305 for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz)) {
306 if (mark_bit_from_hdr(hhdr, bit_no)) {
307 p += sz;
308 } else {
309 /* The object is available - put it on list. */
310 obj_link(p) = list;
311 list = p;
312 FREE_PROFILER_HOOK(p);
313 p = GC_clear_block(p, sz, pcount);
314 }
315 }
316 return list;
317 }
318
319 /* The same thing as `GC_reclaim_clear`, but do not clear objects. */
320 STATIC ptr_t
321 GC_reclaim_uninit(struct hblk *hbp, const hdr *hhdr, size_t sz, ptr_t list,
322 word *pcount)
323 {
324 size_t bit_no;
325 word n_bytes_found = 0;
326 ptr_t p, plim;
327
328 #ifndef THREADS
329 GC_ASSERT(sz == hhdr->hb_sz);
330 #endif
331
332 /* Go through all objects in the block. */
333 p = hbp->hb_body;
334 plim = (ptr_t)hbp + HBLKSIZE - sz;
335 for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz), p += sz) {
336 if (!mark_bit_from_hdr(hhdr, bit_no)) {
337 n_bytes_found += sz;
338 /* The object is available - put it on list. */
339 obj_link(p) = list;
340 list = p;
341 FREE_PROFILER_HOOK(p);
342 }
343 }
344 *pcount += n_bytes_found;
345 return list;
346 }
347
348 #ifdef ENABLE_DISCLAIM
349 /*
350 * Call reclaim notifier for block's kind on each unmarked object in block,
351 * all within a pair of corresponding enter/leave callbacks.
352 */
353 STATIC ptr_t
354 GC_disclaim_and_reclaim(struct hblk *hbp, hdr *hhdr, size_t sz, ptr_t list,
355 word *pcount)
356 {
357 size_t bit_no;
358 ptr_t p, plim;
359 int(GC_CALLBACK * disclaim)(void *)
360 = GC_obj_kinds[hhdr->hb_obj_kind].ok_disclaim_proc;
361
362 GC_ASSERT(disclaim != 0);
363 # ifndef THREADS
364 GC_ASSERT(sz == hhdr->hb_sz);
365 # endif
366 p = hbp->hb_body;
367 plim = p + HBLKSIZE - sz;
368
369 for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz)) {
370 if (mark_bit_from_hdr(hhdr, bit_no)) {
371 p += sz;
372 } else if (disclaim(p)) {
373 set_mark_bit_from_hdr(hhdr, bit_no);
374 INCR_MARKS(hhdr);
375 p += sz;
376 } else {
377 obj_link(p) = list;
378 list = p;
379 FREE_PROFILER_HOOK(p);
380 p = GC_clear_block(p, sz, pcount);
381 }
382 }
383 return list;
384 }
385 #endif /* ENABLE_DISCLAIM */
386
387 #ifndef NO_FIND_LEAK
388
389 # ifndef SHORT_DBG_HDRS
390 STATIC GC_bool
391 GC_check_leaked(ptr_t base)
392 {
393 size_t i;
394 size_t lpw;
395 ptr_t *p;
396
397 if (
398 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
399 (*(GC_uintptr_t *)base & 1) != 0 &&
400 # endif
401 GC_has_other_debug_info(base) >= 0)
402 return TRUE; /*< object has leaked */
403
404 /* Validate freed object's content. */
405 p = (ptr_t *)(base + sizeof(oh));
406 lpw = BYTES_TO_PTRS(HDR(base)->hb_sz - sizeof(oh));
407 for (i = 0; i < lpw; ++i)
408 if ((GC_uintptr_t)p[i] != GC_FREED_MEM_MARKER) {
409 /* Do not reclaim it in this cycle. */
410 GC_set_mark_bit(base);
411 /* Alter-after-free has been detected. */
412 GC_add_smashed((ptr_t)(&p[i]));
413 /* Do not report any other smashed locations in the object. */
414 break;
415 }
416
417 return FALSE; /*< `GC_debug_free()` has been called */
418 }
419 # endif /* !SHORT_DBG_HDRS */
420
421 GC_INLINE void
422 GC_add_leaked(ptr_t leaked)
423 {
424 GC_ASSERT(I_HOLD_LOCK());
425 # ifndef SHORT_DBG_HDRS
426 if (GC_findleak_delay_free && !GC_check_leaked(leaked))
427 return;
428 # endif
429
430 GC_SET_HAVE_ERRORS();
431 if (GC_n_leaked < MAX_LEAKED) {
432 GC_leaked[GC_n_leaked++] = leaked;
433 /* Make sure it is not reclaimed this cycle. */
434 GC_set_mark_bit(leaked);
435 }
436 }
437
438 /* Do not really reclaim objects, just check for unmarked ones. */
439 STATIC void
440 GC_reclaim_check(struct hblk *hbp, const hdr *hhdr, size_t sz)
441 {
442 size_t bit_no;
443 ptr_t p, plim;
444
445 # ifndef THREADS
446 GC_ASSERT(sz == hhdr->hb_sz);
447 # endif
448 /* Go through all objects in the block. */
449 p = hbp->hb_body;
450 plim = p + HBLKSIZE - sz;
451 for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz), p += sz) {
452 if (!mark_bit_from_hdr(hhdr, bit_no))
453 GC_add_leaked(p);
454 }
455 }
456
457 #endif /* !NO_FIND_LEAK */
458
459 /*
460 * Is a pointer-free block? Same as `IS_PTRFREE()` macro but uses
461 * unordered atomic access to avoid racing with `GC_realloc`.
462 */
463 #ifdef AO_HAVE_load
464 # define IS_PTRFREE_SAFE(hhdr) (AO_load((AO_t *)&(hhdr)->hb_descr) == 0)
465 #else
466 /*
467 * No race as `GC_realloc` holds the allocator lock when updating
468 * `hb_descr` field.
469 */
470 # define IS_PTRFREE_SAFE(hhdr) IS_PTRFREE(hhdr)
471 #endif
472
473 GC_INNER ptr_t
474 GC_reclaim_generic(struct hblk *hbp, hdr *hhdr, size_t sz, GC_bool init,
475 ptr_t list, word *pcount)
476 {
477 ptr_t result;
478
479 #ifndef PARALLEL_MARK
480 GC_ASSERT(I_HOLD_LOCK());
481 #endif
482 GC_ASSERT(GC_find_header(hbp) == hhdr);
483 #ifndef GC_DISABLE_INCREMENTAL
484 GC_remove_protection(hbp, 1, IS_PTRFREE_SAFE(hhdr));
485 #endif
486 #ifdef ENABLE_DISCLAIM
487 if ((hhdr->hb_flags & HAS_DISCLAIM) != 0) {
488 result = GC_disclaim_and_reclaim(hbp, hhdr, sz, list, pcount);
489 } else
490 #endif
491 /* else */ {
492 if (init || GC_debugging_started) {
493 result = GC_reclaim_clear(hbp, hhdr, sz, list, pcount);
494 } else {
495 #ifndef AO_HAVE_load
496 GC_ASSERT(IS_PTRFREE(hhdr));
497 #endif
498 result = GC_reclaim_uninit(hbp, hhdr, sz, list, pcount);
499 }
500 }
501 if (IS_UNCOLLECTABLE(hhdr->hb_obj_kind))
502 GC_set_hdr_marks(hhdr);
503 return result;
504 }
505
506 /*
507 * Restore unmarked small objects in the block pointed to by `hbp` to
508 * the appropriate object free list. If entirely empty blocks are to
509 * be completely deallocated, then caller should perform that check.
510 */
511 STATIC void
512 GC_reclaim_small_nonempty_block(struct hblk *hbp, size_t sz,
513 GC_bool report_if_found)
514 {
515 hdr *hhdr;
516
517 GC_ASSERT(I_HOLD_LOCK());
518 hhdr = HDR(hbp);
519 hhdr->hb_last_reclaimed = (unsigned short)GC_gc_no;
520 if (report_if_found) {
521 #ifndef NO_FIND_LEAK
522 GC_reclaim_check(hbp, hhdr, sz);
523 #endif
524 } else {
525 struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
526 void **flh = &ok->ok_freelist[BYTES_TO_GRANULES(sz)];
527
528 *flh = GC_reclaim_generic(hbp, hhdr, sz, ok->ok_init, (ptr_t)(*flh),
529 (/* unsigned */ word *)&GC_bytes_found);
530 }
531 }
532
533 #ifdef ENABLE_DISCLAIM
534 STATIC void
535 GC_disclaim_and_reclaim_or_free_small_block(struct hblk *hbp)
536 {
537 hdr *hhdr;
538 size_t sz;
539 struct obj_kind *ok;
540 void **flh;
541 void *flh_next;
542
543 GC_ASSERT(I_HOLD_LOCK());
544 hhdr = HDR(hbp);
545 sz = hhdr->hb_sz;
546 ok = &GC_obj_kinds[hhdr->hb_obj_kind];
547 flh = &ok->ok_freelist[BYTES_TO_GRANULES(sz)];
548
549 hhdr->hb_last_reclaimed = (unsigned short)GC_gc_no;
550 flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok->ok_init, (ptr_t)(*flh),
551 (/* unsigned */ word *)&GC_bytes_found);
552 if (hhdr->hb_n_marks) {
553 *flh = flh_next;
554 } else {
555 GC_ASSERT(hbp == hhdr->hb_block);
556 GC_bytes_found += (GC_signed_word)HBLKSIZE;
557 GC_freehblk(hbp);
558 }
559 }
560 #endif /* ENABLE_DISCLAIM */
561
562 /*
563 * Restore an unmarked large object or an entirely empty block of
564 * small objects to the heap block free list. Otherwise enqueue the
565 * block for later processing by `GC_reclaim_small_nonempty_block()`.
566 * If `report_if_found` is `TRUE`, then process any block immediately,
567 * and simply report free objects; do not actually reclaim them.
568 */
569 STATIC void GC_CALLBACK
570 GC_reclaim_block(struct hblk *hbp, void *report_if_found)
571 {
572 hdr *hhdr;
573 size_t sz; /*< size of objects in current block */
574 struct obj_kind *ok;
575
576 GC_ASSERT(I_HOLD_LOCK());
577 #if defined(CPPCHECK)
578 GC_noop1_ptr(report_if_found);
579 #endif
580 hhdr = HDR(hbp);
581 ok = &GC_obj_kinds[hhdr->hb_obj_kind];
582 #ifdef AO_HAVE_load
583 /* Atomic access is used to avoid racing with `GC_realloc`. */
584 sz = AO_load((volatile AO_t *)&hhdr->hb_sz);
585 #else
586 /*
587 * No race as `GC_realloc` holds the allocator lock while
588 * updating `hb_sz`.
589 */
590 sz = hhdr->hb_sz;
591 #endif
592 if (sz > MAXOBJBYTES) {
593 /* The case of 1 big object. */
594 if (!mark_bit_from_hdr(hhdr, 0)) {
595 if (report_if_found) {
596 GC_ASSERT(hbp == hhdr->hb_block);
597 #ifndef NO_FIND_LEAK
598 GC_add_leaked((ptr_t)hbp);
599 #endif
600 } else {
601 #ifdef ENABLE_DISCLAIM
602 if (UNLIKELY((hhdr->hb_flags & HAS_DISCLAIM) != 0)) {
603 if (ok->ok_disclaim_proc(hbp)) {
604 /* Not disclaimed, thus resurrect the object. */
605 set_mark_bit_from_hdr(hhdr, 0);
606 goto in_use;
607 }
608 }
609 #endif
610 GC_ASSERT(hbp == hhdr->hb_block);
611 if (sz > HBLKSIZE) {
612 GC_large_allocd_bytes -= HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
613 }
614 GC_bytes_found += (GC_signed_word)sz;
615 GC_freehblk(hbp);
616 FREE_PROFILER_HOOK(hbp);
617 }
618 } else {
619 #ifdef ENABLE_DISCLAIM
620 in_use:
621 #endif
622 if (IS_PTRFREE_SAFE(hhdr)) {
623 GC_atomic_in_use += sz;
624 } else {
625 GC_composite_in_use += sz;
626 }
627 }
628 } else {
629 GC_bool empty = GC_block_empty(hhdr);
630
631 #ifdef PARALLEL_MARK
632 /*
633 * Count can be low or one too high because we sometimes have to
634 * ignore decrements. Objects can also potentially be repeatedly
635 * marked by each marker. Here we assume 3 markers at most, but
636 * this is extremely unlikely to fail spuriously with more.
637 * And if it does, it should be looked at.
638 */
639 GC_ASSERT(sz != 0
640 && (GC_markers_m1 > 1 ? 3 : GC_markers_m1 + 1)
641 * (HBLKSIZE / sz + 1)
642 + 16
643 >= hhdr->hb_n_marks);
644 #else
645 GC_ASSERT(sz * hhdr->hb_n_marks <= HBLKSIZE);
646 #endif
647 #ifdef VALGRIND_TRACKING
648 /*
649 * Call `GC_free_profiler_hook()` on freed objects so that
650 * a profiling tool could track the allocations.
651 */
652 {
653 ptr_t p = hbp->hb_body;
654 ptr_t plim = p + HBLKSIZE - sz;
655 size_t bit_no;
656
657 for (bit_no = 0; ADDR_GE(plim, p);
658 bit_no += MARK_BIT_OFFSET(sz), p += sz) {
659 if (!mark_bit_from_hdr(hhdr, bit_no))
660 FREE_PROFILER_HOOK(p);
661 }
662 }
663 #endif
664 GC_ASSERT(hbp == hhdr->hb_block);
665 if (report_if_found) {
666 GC_reclaim_small_nonempty_block(hbp, sz, TRUE /* `report_if_found` */);
667 } else if (empty) {
668 #ifdef ENABLE_DISCLAIM
669 if ((hhdr->hb_flags & HAS_DISCLAIM) != 0) {
670 GC_disclaim_and_reclaim_or_free_small_block(hbp);
671 } else
672 #endif
673 /* else */ {
674 GC_bytes_found += (GC_signed_word)HBLKSIZE;
675 GC_freehblk(hbp);
676 FREE_PROFILER_HOOK(hbp);
677 }
678 } else if (GC_find_leak_inner || !GC_block_nearly_full(hhdr, sz)) {
679 /* Group of smaller objects, enqueue the real work. */
680 struct hblk **rlh = ok->ok_reclaim_list;
681
682 if (rlh != NULL) {
683 rlh += BYTES_TO_GRANULES(sz);
684 hhdr->hb_next = *rlh;
685 *rlh = hbp;
686 }
687 } else {
688 /* Not worth salvaging. */
689 }
690 /*
691 * We used to do the `GC_block_nearly_full` check later, but we
692 * already have the right cache context here. Also doing it here
693 * avoids some silly lock contention in `GC_malloc_many()`.
694 */
695 if (IS_PTRFREE_SAFE(hhdr)) {
696 GC_atomic_in_use += (word)sz * hhdr->hb_n_marks;
697 } else {
698 GC_composite_in_use += (word)sz * hhdr->hb_n_marks;
699 }
700 }
701 }
702
703 #if !defined(NO_DEBUGGING)
704 /*
705 * Routines to gather and print heap block info intended for debugging.
706 * Otherwise should be called with the allocator lock held.
707 */
708
709 struct Print_stats {
710 size_t number_of_blocks;
711 size_t total_bytes;
712 };
713
714 EXTERN_C_BEGIN /*< to avoid "no previous prototype" clang warning */
715 unsigned
716 GC_n_set_marks(const hdr *);
717 EXTERN_C_END
718
719 # ifdef USE_MARK_BYTES
720 /*
721 * Return the number of set mark bits in the given header.
722 * Remains externally visible as used by GNU `gcj` currently.
723 * There could be a race between `GC_clear_hdr_marks` and this
724 * function but the latter is for a debug purpose.
725 */
726 GC_ATTR_NO_SANITIZE_THREAD
727 unsigned
728 GC_n_set_marks(const hdr *hhdr)
729 {
730 unsigned result = 0;
731 size_t i;
732 size_t offset = MARK_BIT_OFFSET(hhdr->hb_sz);
733 size_t limit = FINAL_MARK_BIT(hhdr->hb_sz);
734
735 for (i = 0; i < limit; i += offset) {
736 result += (unsigned)hhdr->hb_marks[i];
737 }
738
739 /* The one should be set past the end. */
740 GC_ASSERT(hhdr->hb_marks[limit]);
741 return result;
742 }
743
744 # else
745 /* Number of set bits in a word. Not performance critical. */
746 static unsigned
747 count_ones(word v)
748 {
749 unsigned result = 0;
750
751 for (; v > 0; v >>= 1) {
752 if (v & 1)
753 result++;
754 }
755 return result;
756 }
757
758 unsigned
759 GC_n_set_marks(const hdr *hhdr)
760 {
761 unsigned result = 0;
762 size_t i;
763 # ifdef MARK_BIT_PER_OBJ
764 size_t n_objs = HBLK_OBJS(hhdr->hb_sz);
765 size_t n_mark_words = divWORDSZ(n_objs > 0 ? n_objs : 1); /*< round down */
766
767 for (i = 0; i <= n_mark_words; i++) {
768 result += count_ones(hhdr->hb_marks[i]);
769 }
770 # else
771
772 for (i = 0; i < HB_MARKS_SZ; i++) {
773 result += count_ones(hhdr->hb_marks[i]);
774 }
775 # endif
776 GC_ASSERT(result > 0);
777 /* Exclude the one bit set past the end. */
778 result--;
779
780 # ifndef MARK_BIT_PER_OBJ
781 if (IS_UNCOLLECTABLE(hhdr->hb_obj_kind)) {
782 size_t lg = BYTES_TO_GRANULES(hhdr->hb_sz);
783
784 /*
785 * As mentioned in `GC_set_hdr_marks`, all the bits are set instead of
786 * every `n`-th, thus the result should be adjusted.
787 */
788 GC_ASSERT((unsigned)lg != 0 && result % lg == 0);
789 result /= (unsigned)lg;
790 }
791 # endif
792 return result;
793 }
794 # endif /* !USE_MARK_BYTES */
795
796 GC_API unsigned GC_CALL
797 GC_count_set_marks_in_hblk(const void *p)
798 {
799 return GC_n_set_marks(HDR(p));
800 }
801
802 STATIC void GC_CALLBACK
803 GC_print_block_descr(struct hblk *h, void *raw_ps)
804 {
805 const hdr *hhdr = HDR(h);
806 size_t sz = hhdr->hb_sz;
807 struct Print_stats *ps = (struct Print_stats *)raw_ps;
808 size_t n_marks = (size_t)GC_n_set_marks(hhdr);
809 size_t n_objs = HBLK_OBJS(sz);
810
811 # ifndef PARALLEL_MARK
812 GC_ASSERT(hhdr->hb_n_marks == n_marks);
813 # endif
814 # if defined(CPPCHECK)
815 GC_noop1_ptr(h);
816 # endif
817 GC_ASSERT((n_objs > 0 ? n_objs : 1) >= n_marks);
818 GC_printf("%u,%u,%u,%u\n", hhdr->hb_obj_kind, (unsigned)sz,
819 (unsigned)n_marks, (unsigned)n_objs);
820 ps->number_of_blocks++;
821 ps->total_bytes += (sz + HBLKSIZE - 1) & ~(HBLKSIZE - 1); /*< round up */
822 }
823
824 void
825 GC_print_block_list(void)
826 {
827 struct Print_stats pstats;
828
829 GC_printf("kind(0=ptrfree/1=normal/2=unc.),"
830 "obj_sz,#marks_set,#objs_in_block\n");
831 BZERO(&pstats, sizeof(pstats));
832 GC_apply_to_all_blocks(GC_print_block_descr, &pstats);
833 GC_printf("blocks= %lu, total_bytes= %lu\n",
834 (unsigned long)pstats.number_of_blocks,
835 (unsigned long)pstats.total_bytes);
836 if (pstats.total_bytes + GC_large_free_bytes != GC_heapsize)
837 GC_err_printf("LOST SOME BLOCKS!! Total bytes should be: %lu\n",
838 (unsigned long)(GC_heapsize - GC_large_free_bytes));
839 }
840
841 GC_API void GC_CALL
842 GC_print_free_list(int kind, size_t lg)
843 {
844 void *flh_next;
845 int n;
846
847 GC_ASSERT(kind < MAXOBJKINDS);
848 GC_ASSERT(lg <= MAXOBJGRANULES);
849 flh_next = GC_obj_kinds[kind].ok_freelist[lg];
850 for (n = 0; flh_next != NULL; n++) {
851 GC_printf("Free object in heap block %p [%d]: %p\n",
852 (void *)HBLKPTR(flh_next), n, flh_next);
853 flh_next = obj_link(flh_next);
854 }
855 }
856 #endif /* !NO_DEBUGGING */
857
858 /*
859 * Clear all `obj_link` pointers in the list of free objects `*flp`.
860 * Clear `*flp`. This must be done before dropping a list of free
861 * `gcj`-style objects, since may otherwise end up with dangling
862 * "descriptor" pointers. It may help for other pointer-containing
863 * objects.
864 */
865 STATIC void
866 GC_clear_fl_links(void **flp)
867 {
868 void *next;
869
870 for (next = *flp; next != NULL; next = *flp) {
871 *flp = NULL;
872 flp = &obj_link(next);
873 }
874 }
875
876 GC_INNER void
877 GC_start_reclaim(GC_bool report_if_found)
878 {
879 int kind;
880
881 GC_ASSERT(I_HOLD_LOCK());
882 #if defined(PARALLEL_MARK)
883 GC_ASSERT(0 == GC_fl_builder_count);
884 #endif
885 /* Reset in-use counters. `GC_reclaim_block` recomputes them. */
886 GC_composite_in_use = 0;
887 GC_atomic_in_use = 0;
888
889 /* Clear reclaim- and free-lists. */
890 for (kind = 0; kind < (int)GC_n_kinds; kind++) {
891 struct hblk **rlist = GC_obj_kinds[kind].ok_reclaim_list;
892 GC_bool should_clobber = GC_obj_kinds[kind].ok_descriptor != 0;
893
894 if (NULL == rlist) {
895 /* Means this object kind is not used. */
896 continue;
897 }
898
899 if (!report_if_found) {
900 void **fop;
901 void **lim = &GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES + 1];
902
903 for (fop = GC_obj_kinds[kind].ok_freelist;
904 ADDR_LT((ptr_t)fop, (ptr_t)lim); fop++) {
905 if (*fop != NULL) {
906 if (should_clobber) {
907 GC_clear_fl_links(fop);
908 } else {
909 *fop = NULL;
910 }
911 }
912 }
913 } else {
914 /* Free-list objects are marked, and it is safe to leave them. */
915 }
916 BZERO(rlist, (MAXOBJGRANULES + 1) * sizeof(void *));
917 }
918
919 /*
920 * Go through all heap blocks, and reclaim unmarked objects or enqueue
921 * the block for later processing.
922 */
923 GC_apply_to_all_blocks(GC_reclaim_block, NUMERIC_TO_VPTR(report_if_found));
924
925 #ifdef EAGER_SWEEP
926 /*
927 * This is a very stupid thing to do. We make it possible anyway.
928 */
929 GC_reclaim_all((GC_stop_func)0, FALSE);
930 #elif defined(ENABLE_DISCLAIM)
931 /*
932 * However, make sure to clear reclaimable objects of kinds with
933 * unconditional marking enabled before we do any significant
934 * marking work.
935 */
936 GC_reclaim_unconditionally_marked();
937 #endif
938 #if defined(PARALLEL_MARK)
939 GC_ASSERT(0 == GC_fl_builder_count);
940 #endif
941 }
942
943 GC_INNER void
944 GC_continue_reclaim(size_t lg, int kind)
945 {
946 struct hblk *hbp;
947 struct obj_kind *ok = &GC_obj_kinds[kind];
948 struct hblk **rlh = ok->ok_reclaim_list;
949 void **flh;
950
951 GC_ASSERT(I_HOLD_LOCK());
952 if (NULL == rlh) {
953 /* No blocks of this kind. */
954 return;
955 }
956
957 flh = &ok->ok_freelist[lg];
958 for (rlh += lg; (hbp = *rlh) != NULL;) {
959 const hdr *hhdr = HDR(hbp);
960
961 *rlh = hhdr->hb_next;
962 GC_reclaim_small_nonempty_block(hbp, hhdr->hb_sz, FALSE);
963 if (*flh != NULL) {
964 /* The appropriate free list is nonempty. */
965 break;
966 }
967 }
968 }
969
970 GC_INNER GC_bool
971 GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
972 {
973 size_t lg;
974 int kind;
975 const hdr *hhdr;
976 struct hblk *hbp;
977 struct hblk **rlp;
978 struct hblk **rlh;
979 #ifndef NO_CLOCK
980 CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
981
982 if (GC_print_stats == VERBOSE)
983 GET_TIME(start_time);
984 #endif
985 GC_ASSERT(I_HOLD_LOCK());
986
987 for (kind = 0; kind < (int)GC_n_kinds; kind++) {
988 rlp = GC_obj_kinds[kind].ok_reclaim_list;
989 if (NULL == rlp)
990 continue;
991
992 for (lg = 1; lg <= MAXOBJGRANULES; lg++) {
993 for (rlh = rlp + lg; (hbp = *rlh) != NULL;) {
994 if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
995 return FALSE;
996 }
997 hhdr = HDR(hbp);
998 *rlh = hhdr->hb_next;
999 if (!ignore_old || (word)hhdr->hb_last_reclaimed == GC_gc_no - 1) {
1000 /*
1001 * It is likely we will need it this time, too. It has been
1002 * touched recently, so this should not trigger paging.
1003 */
1004 GC_reclaim_small_nonempty_block(hbp, hhdr->hb_sz, FALSE);
1005 }
1006 }
1007 }
1008 }
1009 #ifndef NO_CLOCK
1010 if (GC_print_stats == VERBOSE) {
1011 CLOCK_TYPE done_time;
1012
1013 GET_TIME(done_time);
1014 GC_verbose_log_printf("Disposing of reclaim lists took %lu ms %lu ns\n",
1015 MS_TIME_DIFF(done_time, start_time),
1016 NS_FRAC_TIME_DIFF(done_time, start_time));
1017 }
1018 #endif
1019 return TRUE;
1020 }
1021
1022 #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
1023 /*
1024 * We do an eager sweep on heap blocks where unconditional marking has
1025 * been enabled, so that any reclaimable objects have been reclaimed
1026 * before we start marking. This is a simplified `GC_reclaim_all`
1027 * restricted to kinds where `ok_mark_unconditionally` is `TRUE`.
1028 */
1029 STATIC void
1030 GC_reclaim_unconditionally_marked(void)
1031 {
1032 int kind;
1033
1034 GC_ASSERT(I_HOLD_LOCK());
1035 for (kind = 0; kind < (int)GC_n_kinds; kind++) {
1036 size_t lg;
1037 struct obj_kind *ok = &GC_obj_kinds[kind];
1038 struct hblk **rlp = ok->ok_reclaim_list;
1039
1040 if (NULL == rlp || !ok->ok_mark_unconditionally)
1041 continue;
1042
1043 for (lg = 1; lg <= MAXOBJGRANULES; lg++) {
1044 struct hblk **rlh = rlp + lg;
1045 struct hblk *hbp;
1046
1047 while ((hbp = *rlh) != NULL) {
1048 const hdr *hhdr = HDR(hbp);
1049
1050 *rlh = hhdr->hb_next;
1051 GC_reclaim_small_nonempty_block(hbp, hhdr->hb_sz, FALSE);
1052 }
1053 }
1054 }
1055 }
1056 #endif /* !EAGER_SWEEP && ENABLE_DISCLAIM */
1057
1058 struct enumerate_reachable_s {
1059 GC_reachable_object_proc proc;
1060 void *client_data;
1061 };
1062
1063 STATIC void GC_CALLBACK
1064 GC_do_enumerate_reachable_objects(struct hblk *hbp, void *ed_ptr)
1065 {
1066 const hdr *hhdr = HDR(hbp);
1067 ptr_t p, plim;
1068 const struct enumerate_reachable_s *ped
1069 = (struct enumerate_reachable_s *)ed_ptr;
1070 size_t sz = hhdr->hb_sz;
1071 size_t bit_no;
1072
1073 if (GC_block_empty(hhdr))
1074 return;
1075
1076 p = hbp->hb_body;
1077 if (sz > MAXOBJBYTES) {
1078 /* The case of 1 big object. */
1079 plim = p;
1080 } else {
1081 plim = p + HBLKSIZE - sz;
1082 }
1083 /* Go through all objects in the block. */
1084 for (bit_no = 0; ADDR_GE(plim, p); bit_no += MARK_BIT_OFFSET(sz), p += sz) {
1085 if (mark_bit_from_hdr(hhdr, bit_no)) {
1086 ped->proc(p, sz, ped->client_data);
1087 }
1088 }
1089 }
1090
1091 GC_API void GC_CALL
1092 GC_enumerate_reachable_objects_inner(GC_reachable_object_proc proc,
1093 void *client_data)
1094 {
1095 struct enumerate_reachable_s ed;
1096
1097 GC_ASSERT(I_HOLD_READER_LOCK());
1098 ed.proc = proc;
1099 ed.client_data = client_data;
1100 GC_apply_to_all_blocks(GC_do_enumerate_reachable_objects, &ed);
1101 }
1102