1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 * Copyright (c) 2008-2022 Ivan Maidanski
7 *
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 *
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
16 */
17 18 #ifndef GC_PRIVATE_H
19 #define GC_PRIVATE_H
20 21 #ifdef HAVE_CONFIG_H
22 # include "config.h"
23 #endif
24 25 #if !defined(GC_BUILD) && !defined(NOT_GCBUILD)
26 # define GC_BUILD
27 #endif
28 29 #if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__) \
30 || defined(__CYGWIN__) || defined(HAVE_DLADDR) \
31 || (defined(__COSMOPOLITAN__) && defined(USE_MUNMAP)) \
32 || defined(GC_HAVE_PTHREAD_SIGMASK) \
33 || defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) \
34 || defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG) \
35 || defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)) \
36 && !defined(_GNU_SOURCE)
37 /* Cannot test `LINUX`, since this must be defined before other includes. */
38 # define _GNU_SOURCE 1
39 #endif
40 41 #if defined(__INTERIX) && !defined(_ALL_SOURCE)
42 # define _ALL_SOURCE 1
43 #endif
44 45 #if (defined(DGUX) && defined(GC_THREADS) || defined(DGUX386_THREADS) \
46 || defined(GC_DGUX386_THREADS)) \
47 && !defined(_USING_POSIX4A_DRAFT10)
48 # define _USING_POSIX4A_DRAFT10 1
49 #endif
50 51 #if defined(__MINGW32__) && !defined(__MINGW_EXCPT_DEFINE_PSDK) \
52 && defined(__i386__) \
53 && defined(GC_EXTERN) /*< defined in `extra/gc.c` file */
54 /* See the description in `mark.c` file. */
55 # define __MINGW_EXCPT_DEFINE_PSDK 1
56 #endif
57 58 #if defined(NO_DEBUGGING) && !defined(GC_ASSERTIONS) && !defined(NDEBUG)
59 /* To turn off assertion checking (in `atomic_ops.h` file). */
60 # define NDEBUG 1
61 #endif
62 63 #ifndef GC_H
64 # include "gc/gc.h"
65 #endif
66 67 #include <stdlib.h>
68 #if !defined(sony_news)
69 # include <stddef.h>
70 #endif
71 72 #ifdef DGUX
73 # include <sys/resource.h>
74 # include <sys/time.h>
75 #endif
76 77 #ifdef BSD_TIME
78 # include <sys/resource.h>
79 # include <sys/time.h>
80 #endif
81 82 #ifdef PARALLEL_MARK
83 # define AO_REQUIRE_CAS
84 # if !defined(__GNUC__) && !defined(AO_ASSUME_WINDOWS98)
85 # define AO_ASSUME_WINDOWS98
86 # endif
87 #endif
88 89 #include "gc/gc_mark.h"
90 #include "gc/gc_tiny_fl.h"
91 92 typedef GC_word word;
93 94 #ifndef PTR_T_DEFINED
95 /*
96 * A generic pointer to which we can add byte displacements and which
97 * can be used for address comparisons.
98 */
99 typedef char *ptr_t;
100 # define PTR_T_DEFINED
101 #endif
102 103 #ifndef SIZE_MAX
104 # include <limits.h>
105 #endif
106 #if defined(SIZE_MAX) && !defined(CPPCHECK)
107 /*
108 * A constant representing maximum value for `size_t` type. Note: an extra
109 * cast is used to workaround some buggy `SIZE_MAX` definitions.
110 */
111 # define GC_SIZE_MAX ((size_t)SIZE_MAX)
112 #else
113 # define GC_SIZE_MAX (~(size_t)0)
114 #endif
115 116 #if (GC_GNUC_PREREQ(3, 0) || defined(__clang__)) && !defined(LINT2)
117 /* Equivalent to `e`, but predict that usually `e` is true (false). */
118 # define LIKELY(e) __builtin_expect(e, 1 /* `TRUE` */)
119 # define UNLIKELY(e) __builtin_expect(e, 0 /* `FALSE` */)
120 #else
121 # define LIKELY(e) (e)
122 # define UNLIKELY(e) (e)
123 #endif /* __GNUC__ */
124 125 /*
126 * Saturated addition of `size_t` values. Used to avoid value wrap around
127 * on overflow. The arguments should have no side effects.
128 */
129 #define SIZET_SAT_ADD(a, b) \
130 (LIKELY((a) < GC_SIZE_MAX - (b)) ? (a) + (b) : GC_SIZE_MAX)
131 132 #include "gcconfig.h"
133 134 #ifdef __cplusplus
135 typedef bool GC_bool;
136 #elif defined(__BORLANDC__) || defined(__WATCOMC__)
137 typedef int GC_bool;
138 #else
139 typedef char GC_bool;
140 #endif
141 142 #if defined(__cplusplus) && !defined(ANY_MSWIN)
143 /* Avoid macro redefinition on a Windows platform. */
144 # define TRUE true
145 # define FALSE false
146 #else
147 # define TRUE 1
148 # define FALSE 0
149 #endif
150 151 #if !defined(GC_ATOMIC_UNCOLLECTABLE) && defined(ATOMIC_UNCOLLECTABLE)
152 /* For compatibility with old-style naming. */
153 # define GC_ATOMIC_UNCOLLECTABLE
154 #endif
155 156 #ifndef GC_INNER
157 /*
158 * This tagging macro must be used at the start of every variable definition
159 * which is declared with `GC_EXTERN`. Should be also used for the GC-scope
160 * function definitions and prototypes. Must not be used in `gcconfig.h`
161 * file. Should not be used for the debugging-only functions.
162 */
163 # if defined(GC_DLL) && defined(__GNUC__) && !defined(ANY_MSWIN)
164 # if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY)
165 /* See the corresponding `GC_API` definition. */
166 # define GC_INNER __attribute__((__visibility__("hidden")))
167 # else
168 /* The attribute is unsupported. */
169 # define GC_INNER /*< empty */
170 # endif
171 # else
172 # define GC_INNER /*< empty */
173 # endif
174 175 # define GC_EXTERN extern GC_INNER
176 /*
177 * Used only for the GC-scope variables (prefixed with `GC_`) declared
178 * in the private header files. Must not be used for thread-local
179 * variables. Must not be used in `gcconfig.h` file.
180 * The corresponding variable definition must start with `GC_INNER`.
181 * Should not be used for the debugging- or profiling-only variables.
182 * As of now, there are some other exceptions, e.g. for the variables
183 * that are known to be used by some popular clients.
184 */
185 #endif /* !GC_INNER */
186 187 #ifdef __cplusplus
188 /* `register` storage specifier is deprecated in C++11. */
189 # define REGISTER /*< empty */
190 #else
191 /*
192 * Used only for several local variables in the performance-critical
193 * functions. Should not be used for new code.
194 */
195 # define REGISTER register
196 #endif
197 198 #if defined(CPPCHECK)
199 # define MACRO_BLKSTMT_BEGIN {
200 # define MACRO_BLKSTMT_END }
201 # define LOCAL_VAR_INIT_OK = 0 /*< to avoid "uninit var" false positive */
202 #else
203 # define MACRO_BLKSTMT_BEGIN do {
204 # define MACRO_BLKSTMT_END \
205 } \
206 while (0)
207 # define LOCAL_VAR_INIT_OK /*< empty */
208 #endif
209 210 #if defined(M68K) && defined(__GNUC__)
211 /*
212 * By default, `__alignof__(void *)` is 2 on m68k architecture.
213 * Use this attribute to have the machine-word alignment (i.e. 4-byte one
214 * on the given 32-bit architecture).
215 */
216 # define GC_ATTR_PTRT_ALIGNED __attribute__((__aligned__(sizeof(ptr_t))))
217 #else
218 # define GC_ATTR_PTRT_ALIGNED /*< empty */
219 #endif
220 221 #ifdef CHERI_PURECAP
222 # include <cheriintrin.h>
223 #endif
224 225 EXTERN_C_BEGIN
226 227 typedef GC_uintptr_t GC_funcptr_uint;
228 #define FUNCPTR_IS_DATAPTR
229 230 typedef unsigned int unsigned32;
231 232 #define hblk GC_hblk_s
233 struct hblk;
234 235 typedef struct hblkhdr hdr;
236 237 EXTERN_C_END
238 239 #include "gc_hdrs.h"
240 241 #ifndef GC_ATTR_NO_SANITIZE_ADDR
242 # ifndef ADDRESS_SANITIZER
243 # define GC_ATTR_NO_SANITIZE_ADDR /*< empty */
244 # elif GC_CLANG_PREREQ(3, 8)
245 # define GC_ATTR_NO_SANITIZE_ADDR \
246 __attribute__((__no_sanitize__("address")))
247 # else
248 # define GC_ATTR_NO_SANITIZE_ADDR __attribute__((__no_sanitize_address__))
249 # endif
250 #endif /* !GC_ATTR_NO_SANITIZE_ADDR */
251 252 #ifndef GC_ATTR_NO_SANITIZE_MEMORY
253 # ifndef MEMORY_SANITIZER
254 # define GC_ATTR_NO_SANITIZE_MEMORY /*< empty */
255 # elif GC_CLANG_PREREQ(3, 8)
256 # define GC_ATTR_NO_SANITIZE_MEMORY \
257 __attribute__((__no_sanitize__("memory")))
258 # else
259 # define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((__no_sanitize_memory__))
260 # endif
261 #endif /* !GC_ATTR_NO_SANITIZE_MEMORY */
262 263 #ifndef GC_ATTR_NO_SANITIZE_THREAD
264 # ifndef THREAD_SANITIZER
265 # define GC_ATTR_NO_SANITIZE_THREAD /*< empty */
266 # elif GC_CLANG_PREREQ(3, 8)
267 # define GC_ATTR_NO_SANITIZE_THREAD \
268 __attribute__((__no_sanitize__("thread")))
269 # else
270 /*
271 * It seems that `no_sanitize_thread` attribute has no effect if the
272 * function is inlined (as of gcc-11.1.0, at least).
273 */
274 # define GC_ATTR_NO_SANITIZE_THREAD \
275 GC_ATTR_NOINLINE __attribute__((__no_sanitize_thread__))
276 # endif
277 #endif /* !GC_ATTR_NO_SANITIZE_THREAD */
278 279 #define GC_ATTR_NO_SANITIZE_ADDR_MEM_THREAD \
280 GC_ATTR_NO_SANITIZE_ADDR GC_ATTR_NO_SANITIZE_MEMORY \
281 GC_ATTR_NO_SANITIZE_THREAD
282 283 #ifndef UNUSED_ARG
284 # define UNUSED_ARG(arg) ((void)(arg))
285 #endif
286 287 #ifdef HAVE_CONFIG_H
288 /* The `inline` keyword is determined by `AC_C_INLINE` of `autoconf`. */
289 # define GC_INLINE static inline
290 #elif defined(_MSC_VER) || defined(__INTEL_COMPILER) || defined(__DMC__) \
291 || (GC_GNUC_PREREQ(3, 0) && defined(__STRICT_ANSI__)) \
292 || defined(__BORLANDC__) || defined(__WATCOMC__)
293 # define GC_INLINE static __inline
294 #elif GC_GNUC_PREREQ(3, 0) || defined(__sun)
295 # define GC_INLINE static inline
296 #else
297 # define GC_INLINE static
298 #endif
299 300 #ifndef GC_ATTR_NOINLINE
301 # if GC_GNUC_PREREQ(4, 0)
302 # define GC_ATTR_NOINLINE __attribute__((__noinline__))
303 # elif _MSC_VER >= 1400
304 # define GC_ATTR_NOINLINE __declspec(noinline)
305 # else
306 # define GC_ATTR_NOINLINE /*< empty */
307 # endif
308 #endif
309 310 #ifndef GC_API_OSCALL
311 /* This is used to identify GC routines called by name from OS. */
312 # if defined(__GNUC__)
313 # if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY)
314 /* Same as `GC_API` macro if `GC_DLL` one is defined. */
315 # define GC_API_OSCALL extern __attribute__((__visibility__("default")))
316 # else
317 /* The attribute is unsupported. */
318 # define GC_API_OSCALL extern
319 # endif
320 # else
321 # define GC_API_OSCALL GC_API
322 # endif
323 #endif
324 325 #ifndef GC_API_PRIV
326 # define GC_API_PRIV GC_API
327 #endif
328 329 #ifndef GC_API_PATCHABLE
330 # define GC_API_PATCHABLE GC_ATTR_NOINLINE GC_API
331 #endif
332 333 #if defined(THREADS) && !defined(NN_PLATFORM_CTR)
334 # include "gc_atomic_ops.h"
335 # ifndef AO_HAVE_compiler_barrier
336 # define AO_HAVE_compiler_barrier 1
337 # endif
338 #endif
339 340 #ifdef ANY_MSWIN
341 # ifndef WIN32_LEAN_AND_MEAN
342 # define WIN32_LEAN_AND_MEAN 1
343 # endif
344 # define NOSERVICE
345 # include <windows.h>
346 347 /* This is included strictly after the platform `windows.h` file. */
348 # include <winbase.h>
349 #endif /* ANY_MSWIN */
350 351 #include "gc_locks.h"
352 353 #ifdef GC_ASSERTIONS
354 # define GC_ASSERT(e) \
355 do { \
356 if (UNLIKELY(!(e))) { \
357 GC_err_printf("Assertion failure: %s:%d\n", __FILE__, __LINE__); \
358 ABORT("assertion failure"); \
359 } \
360 } while (0)
361 #else
362 # define GC_ASSERT(e)
363 #endif
364 365 #include "gc/gc_inline.h"
366 367 /*
368 * Prevent certain compiler warnings by making a pointer-related cast
369 * through a "pointer-sized" numeric type.
370 */
371 #define CAST_THRU_UINTPTR(t, x) ((t)(GC_uintptr_t)(x))
372 373 #define CAST_AWAY_VOLATILE_PVOID(p) \
374 CAST_THRU_UINTPTR(/* no volatile */ void *, p)
375 376 /*
377 * Convert an `unsigned` value to a `void` pointer. Typically used to
378 * print a numeric value using "%p" format specifier. The pointer is not
379 * supposed to be dereferenced.
380 */
381 #define NUMERIC_TO_VPTR(v) ((void *)(GC_uintptr_t)(v))
382 383 /* Create a `ptr_t` pointer from a number (of `word` type). */
384 #define MAKE_CPTR(w) ((ptr_t)(GC_uintptr_t)(word)(w))
385 386 #define GC_WORD_MAX (~(word)0)
387 388 /* Convert given pointer to its address. Result is of `word` type. */
389 #ifdef CHERI_PURECAP
390 # define ADDR(p) cheri_address_get(p)
391 #else
392 # define ADDR(p) ((word)(GC_uintptr_t)(p))
393 #endif
394 395 #define ADDR_LT(p, q) GC_ADDR_LT(p, q)
396 #define ADDR_GE(p, q) (!ADDR_LT(p, q))
397 398 /*
399 * Check whether pointer `p` is in range [`s`, `e_p1`).
400 * `p` should not have side effects.
401 */
402 #define ADDR_INSIDE(p, s, e_p1) (ADDR_GE(p, s) && ADDR_LT(p, e_p1))
403 404 /* Handy definitions to compare and adjust pointers in a stack. */
405 #ifdef STACK_GROWS_UP
406 # define HOTTER_THAN(p, q) ADDR_LT(q, p) /*< inverse */
407 # define MAKE_COOLER(p, d) \
408 (void)((p) -= ADDR(p) > (word)((d) * sizeof(*(p))) ? (d) : 0)
409 # define MAKE_HOTTER(p, d) (void)((p) += (d))
410 #else
411 # define HOTTER_THAN(p, q) ADDR_LT(p, q)
412 # define MAKE_COOLER(p, d) \
413 (void)((p) \
414 += ADDR(p) <= (word)(GC_WORD_MAX - (d) * sizeof(*(p))) ? (d) : 0)
415 # define MAKE_HOTTER(p, d) (void)((p) -= (d))
416 #endif /* !STACK_GROWS_UP */
417 418 /* Clear/set flags (given by a mask) in a pointer. */
419 #define CPTR_CLEAR_FLAGS(p, mask) \
420 (ptr_t)((GC_uintptr_t)(p) & ~(GC_uintptr_t)(word)(mask))
421 #define CPTR_SET_FLAGS(p, mask) (ptr_t)((GC_uintptr_t)(p) | (word)(mask))
422 423 /* Easily changeable parameters are below. */
424 425 #ifdef ALL_INTERIOR_POINTERS
426 /*
427 * Forces all pointers into the interior of an object to be considered valid.
428 * Also causes the sizes of all objects to be inflated by at least one byte.
429 * This should suffice to guarantee that in the presence of a compiler that
430 * does not perform garbage-collector-unsafe optimizations, all portable,
431 * strictly ANSI conforming C programs should be safely usable with `malloc`
432 * replaced by `GC_malloc` and `free` calls removed. There are several
433 * disadvantages:
434 * 1. There are probably no interesting, portable, strictly ANSI-conforming
435 * C programs;
436 * 2. This option makes it hard for the collector to allocate space that is
437 * not "pointed to" by integers, etc. (Under SunOS 4.x with a statically
438 * linked `libc`, we empirically observed that it would be difficult to
439 * allocate individual objects larger than 100 KB; even if only smaller
440 * objects are allocated, more swap space is likely to be needed;
441 * fortunately, much of this will never be touched.)
442 *
443 * If you can easily avoid using this option, do. If not, try to keep
444 * individual objects small. This is really controlled at startup, through
445 * `GC_all_interior_pointers` variable.
446 */
447 #endif
448 449 EXTERN_C_BEGIN
450 451 #ifndef GC_NO_FINALIZATION
452 /*
453 * If `GC_finalize_on_demand` is not set, invoke eligible finalizers.
454 * Otherwise: call `(*GC_finalizer_notifier)()` if there are finalizers to
455 * be run, and we have not called this procedure yet this collection cycle.
456 */
457 GC_INNER void GC_notify_or_invoke_finalizers(void);
458 459 /*
460 * Perform all indicated finalization actions on unmarked objects.
461 * Unreachable finalizable objects are enqueued for processing by
462 * `GC_invoke_finalizers()`. Cause disappearing links to disappear
463 * and unreachable objects to be enqueued for finalization.
464 * Invoked with the allocator lock held but the world is running.
465 */
466 GC_INNER void GC_finalize(void);
467 468 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
469 /* Process the "toggle-refs" before GC starts. */
470 GC_INNER void GC_process_togglerefs(void);
471 # endif
472 # ifndef SMALL_CONFIG
473 GC_INNER void GC_print_finalization_stats(void);
474 # endif
475 #else
476 # define GC_notify_or_invoke_finalizers() (void)0
477 #endif /* GC_NO_FINALIZATION */
478 479 #if !defined(DONT_ADD_BYTE_AT_END)
480 # ifdef LINT2
481 /*
482 * Explicitly instruct the code analysis tool that `GC_all_interior_pointers`
483 * is assumed to have only value of 0 or 1.
484 */
485 # define EXTRA_BYTES ((size_t)(GC_all_interior_pointers ? 1 : 0))
486 # else
487 # define EXTRA_BYTES ((size_t)GC_all_interior_pointers)
488 # endif
489 # define MAX_EXTRA_BYTES 1
490 #else
491 # define EXTRA_BYTES 0
492 # define MAX_EXTRA_BYTES 0
493 #endif
494 495 #ifdef LARGE_CONFIG
496 # define MINHINCR 64
497 # define MAXHINCR 4096
498 #else
499 /*
500 * Minimum heap increment, in blocks of `HBLKSIZE`. Note: must be multiple
501 * of largest page size.
502 */
503 # define MINHINCR 16
504 505 /* Maximum heap increment, in blocks. */
506 # define MAXHINCR 2048
507 #endif /* !LARGE_CONFIG */
508 509 /* Stack saving for debugging. */
510 511 #ifdef NEED_CALLINFO
512 struct callinfo {
513 GC_return_addr_t ci_pc; /*< `pc` of caller, not callee */
514 # if NARGS > 0
515 GC_hidden_pointer ci_arg[NARGS]; /*< hide to avoid retention */
516 # endif
517 # if (NFRAMES * (NARGS + 1)) % 2 == 1
518 /* Likely alignment problem. */
519 ptr_t ci_dummy;
520 # endif
521 };
522 523 # ifdef SAVE_CALL_CHAIN
524 /*
525 * Fill in the `pc` and argument information for up to `NFRAMES` of
526 * my callers. Ignore my frame and my callers frame.
527 */
528 GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]);
529 # endif
530 531 /* Print `info` to `stderr`. We do not hold the allocator lock. */
532 GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]);
533 #endif /* NEED_CALLINFO */
534 535 EXTERN_C_END
536 537 /*
538 * Macros to ensure same formatting of C array/struct/union initializer
539 * across multiple versions of clang-format.
540 */
541 #define C_INITIALIZER_BEGIN {
542 #define C_INITIALIZER_END }
543 544 /* OS interface routines. */
545 546 #ifndef NO_CLOCK
547 # ifdef BSD_TIME
548 # undef CLOCK_TYPE
549 # undef GET_TIME
550 # undef MS_TIME_DIFF
551 # define CLOCK_TYPE struct timeval
552 # define CLOCK_TYPE_INITIALIZER C_INITIALIZER_BEGIN 0, 0 C_INITIALIZER_END
553 # define GET_TIME(x) \
554 do { \
555 struct rusage rusage; \
556 getrusage(RUSAGE_SELF, &rusage); \
557 x = rusage.ru_utime; \
558 } while (0)
559 560 /*
561 * Compute time difference. `a` time is expected to be not earlier
562 * than `b` one; the result has `unsigned long` type.
563 */
564 # define MS_TIME_DIFF(a, b) \
565 ((unsigned long)((long)(a.tv_sec - b.tv_sec) * 1000 \
566 + (long)(a.tv_usec - b.tv_usec) / 1000 \
567 - (a.tv_usec < b.tv_usec \
568 && (long)(a.tv_usec - b.tv_usec) % 1000 \
569 != 0 \
570 ? 1 \
571 : 0)))
572 573 /*
574 * The nanosecond part of the time difference. The total time difference
575 * could be computed as:
576 * `MS_TIME_DIFF(a, b) * 1000000 + NS_FRAC_TIME_DIFF(a, b)`.
577 */
578 # define NS_FRAC_TIME_DIFF(a, b) \
579 ((unsigned long)((a.tv_usec < b.tv_usec \
580 && (long)(a.tv_usec - b.tv_usec) % 1000 != 0 \
581 ? 1000L \
582 : 0) \
583 + (long)(a.tv_usec - b.tv_usec) % 1000) \
584 * 1000)
585 586 # elif defined(MSWIN32) || defined(MSWINCE) || defined(WINXP_USE_PERF_COUNTER)
587 # if defined(MSWINRT_FLAVOR) || defined(WINXP_USE_PERF_COUNTER)
588 # define CLOCK_TYPE ULONGLONG
589 /*
590 * Note: two standalone `if` statements below are used to avoid MS VC
591 * false warning (FP) about potentially uninitialized `tc` variable.
592 */
593 # define GET_TIME(x) \
594 do { \
595 LARGE_INTEGER freq, tc; \
596 if (!QueryPerformanceFrequency(&freq)) \
597 ABORT("QueryPerformanceFrequency requires WinXP+"); \
598 if (!QueryPerformanceCounter(&tc)) \
599 ABORT("QueryPerformanceCounter failed"); \
600 x = (CLOCK_TYPE)((double)tc.QuadPart / freq.QuadPart * 1e9); \
601 /* TODO: Call QueryPerformanceFrequency once at GC init. */ \
602 } while (0)
603 # define MS_TIME_DIFF(a, b) ((unsigned long)(((a) - (b)) / 1000000UL))
604 # define NS_FRAC_TIME_DIFF(a, b) \
605 ((unsigned long)(((a) - (b)) % 1000000UL))
606 # else
607 # define CLOCK_TYPE DWORD
608 # define GET_TIME(x) (void)(x = GetTickCount())
609 # define MS_TIME_DIFF(a, b) ((unsigned long)((a) - (b)))
610 # define NS_FRAC_TIME_DIFF(a, b) 0UL
611 # endif /* !WINXP_USE_PERF_COUNTER */
612 613 # elif defined(NN_PLATFORM_CTR)
614 # define CLOCK_TYPE long long
615 EXTERN_C_BEGIN
616 CLOCK_TYPE n3ds_get_system_tick(void);
617 CLOCK_TYPE n3ds_convert_tick_to_ms(CLOCK_TYPE tick);
618 EXTERN_C_END
619 # define GET_TIME(x) (void)(x = n3ds_get_system_tick())
620 # define MS_TIME_DIFF(a, b) \
621 ((unsigned long)n3ds_convert_tick_to_ms((a) - (b)))
622 /* TODO: Implement NS_FRAC_TIME_DIFF(). */
623 # define NS_FRAC_TIME_DIFF(a, b) 0UL
624 625 # elif defined(HAVE_CLOCK_GETTIME)
626 # include <time.h>
627 # define CLOCK_TYPE struct timespec
628 # define CLOCK_TYPE_INITIALIZER C_INITIALIZER_BEGIN 0, 0 C_INITIALIZER_END
629 # if defined(_POSIX_MONOTONIC_CLOCK) && !defined(NINTENDO_SWITCH)
630 # define GET_TIME(x) \
631 do { \
632 if (clock_gettime(CLOCK_MONOTONIC, &x) == -1) \
633 ABORT("clock_gettime failed"); \
634 } while (0)
635 # else
636 # define GET_TIME(x) \
637 do { \
638 if (clock_gettime(CLOCK_REALTIME, &x) == -1) \
639 ABORT("clock_gettime failed"); \
640 } while (0)
641 # endif
642 # define MS_TIME_DIFF(a, b) \
643 /* `a.tv_nsec - b.tv_nsec` is in range -1e9 to 1e9, exclusively. */ \
644 ((unsigned long)((a).tv_nsec + (1000000L * 1000 - (b).tv_nsec)) \
645 / 1000000UL \
646 + ((unsigned long)((a).tv_sec - (b).tv_sec) * 1000UL) - 1000UL)
647 # define NS_FRAC_TIME_DIFF(a, b) \
648 ((unsigned long)((a).tv_nsec + (1000000L * 1000 - (b).tv_nsec)) \
649 % 1000000UL)
650 651 # else /* !BSD_TIME && !LINUX && !NN_PLATFORM_CTR && !MSWIN32 */
652 # include <time.h>
653 # if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
654 # include <machine/limits.h>
655 # define CLOCKS_PER_SEC CLK_TCK
656 # endif
657 # if !defined(CLOCKS_PER_SEC)
658 /*
659 * This is technically a bug in the implementation.
660 * ANSI requires that `CLOCKS_PER_SEC` be defined. But at least under
661 * SunOS 4.1.1, it is not. Also note that the combination of ANSI C
662 * and POSIX is incredibly gross here. The type `clock_t` is used by
663 * both `clock()` and `times()`. But on some machines these use
664 * different notions of a clock tick, `CLOCKS_PER_SEC` seems to apply
665 * only to `clock()`. Hence we use it here. On many machines,
666 * including SunOS, `clock()` actually uses units of microseconds (that
667 * are not really clock ticks).
668 */
669 # define CLOCKS_PER_SEC 1000000
670 # endif
671 # define CLOCK_TYPE clock_t
672 # define GET_TIME(x) (void)(x = clock())
673 # define MS_TIME_DIFF(a, b) \
674 (CLOCKS_PER_SEC % 1000 == 0 \
675 ? (unsigned long)((a) - (b)) \
676 / (unsigned long)(CLOCKS_PER_SEC / 1000) \
677 : ((unsigned long)((a) - (b)) * 1000) \
678 / (unsigned long)CLOCKS_PER_SEC)
679 /*
680 * Avoid using `double` type since some targets (like ARM) might
681 * require `-lm` option for `double`-to-`long` conversion.
682 */
683 # define NS_FRAC_TIME_DIFF(a, b) \
684 (CLOCKS_PER_SEC <= 1000 \
685 ? 0UL \
686 : (unsigned long)(CLOCKS_PER_SEC <= (clock_t)1000000UL \
687 ? (((a) - (b)) \
688 * ((clock_t)1000000UL / CLOCKS_PER_SEC) \
689 % 1000) \
690 * 1000 \
691 : (CLOCKS_PER_SEC \
692 <= (clock_t)1000000UL * 1000 \
693 ? ((a) - (b)) \
694 * ((clock_t)1000000UL * 1000 \
695 / CLOCKS_PER_SEC) \
696 : (((a) - (b)) * (clock_t)1000000UL \
697 * 1000) \
698 / CLOCKS_PER_SEC) \
699 % (clock_t)1000000UL))
700 # endif /* !BSD_TIME && !MSWIN32 */
701 # ifndef CLOCK_TYPE_INITIALIZER
702 /*
703 * This is used to initialize `CLOCK_TYPE` variables (to some value)
704 * to avoid "variable might be uninitialized" compiler warnings.
705 */
706 # define CLOCK_TYPE_INITIALIZER 0
707 # endif
708 #endif /* !NO_CLOCK */
709 710 /* We use `bzero()` and `bcopy()` internally. They may not be available. */
711 #if defined(M68K) && defined(NEXT) || defined(VAX)
712 # define BCOPY_EXISTS
713 #elif defined(DARWIN)
714 # include <string.h>
715 # define BCOPY_EXISTS
716 #endif
717 718 #if !defined(BCOPY_EXISTS) || defined(CPPCHECK)
719 # include <string.h>
720 # define BCOPY(x, y, n) memcpy(y, x, (size_t)(n))
721 # define BZERO(x, n) memset(x, 0, (size_t)(n))
722 #else
723 # define BCOPY(x, y, n) bcopy((void *)(x), (void *)(y), (size_t)(n))
724 # define BZERO(x, n) bzero((void *)(x), (size_t)(n))
725 #endif
726 727 EXTERN_C_BEGIN
728 729 #if defined(CPPCHECK) && defined(ANY_MSWIN)
730 # undef TEXT
731 # ifdef UNICODE
732 # define TEXT(s) L##s
733 # else
734 # define TEXT(s) s
735 # endif
736 #endif /* CPPCHECK && ANY_MSWIN */
737 738 /* Stop and restart mutator threads. */
739 #if defined(NN_PLATFORM_CTR) || defined(NINTENDO_SWITCH) \
740 || defined(GC_WIN32_THREADS) || defined(GC_PTHREADS)
741 GC_INNER void GC_stop_world(void);
742 GC_INNER void GC_start_world(void);
743 # define STOP_WORLD() GC_stop_world()
744 # define START_WORLD() GC_start_world()
745 #else
746 /* Just do a sanity check: we are not inside `GC_do_blocking()`. */
747 # define STOP_WORLD() GC_ASSERT(GC_blocked_sp == NULL)
748 # define START_WORLD()
749 #endif
750 751 /* Abandon ship. */
752 #ifdef SMALL_CONFIG
753 # define GC_on_abort(msg) (void)0 /*< be silent on abort */
754 #else
755 GC_API_PRIV GC_abort_func GC_on_abort;
756 #endif
757 #if defined(CPPCHECK)
758 # define ABORT(msg) \
759 { \
760 GC_on_abort(msg); \
761 abort(); \
762 }
763 #else
764 # if defined(MSWIN_XBOX1) && !defined(DebugBreak)
765 # define DebugBreak() __debugbreak()
766 # elif defined(MSWINCE) && !defined(DebugBreak) \
767 && (!defined(UNDER_CE) || (defined(__MINGW32CE__) && !defined(ARM32)))
768 /*
769 * This simplifies linking for WinCE (and, probably, does not
770 * hurt debugging much); use `-D DebugBreak=DebugBreak` to override
771 * this behavior if really needed. This is also a workaround for
772 * x86mingw32ce toolchain (if it is still declaring `DebugBreak()`
773 * instead of defining it as a macro).
774 */
775 # define DebugBreak() _exit(-1) /*< there is no `abort()` in WinCE */
776 # endif
777 # if defined(MSWIN32) && (defined(NO_DEBUGGING) || defined(LINT2))
778 /*
779 * A more user-friendly abort after showing fatal message.
780 * Exit on error without running "at-exit" callbacks.
781 */
782 # define ABORT(msg) (GC_on_abort(msg), _exit(-1))
783 # elif defined(MSWINCE) && defined(NO_DEBUGGING)
784 # define ABORT(msg) (GC_on_abort(msg), ExitProcess(-1))
785 # elif defined(MSWIN32) || defined(MSWINCE)
786 # if defined(_CrtDbgBreak) && defined(_DEBUG) && defined(_MSC_VER)
787 # define ABORT(msg) \
788 { \
789 GC_on_abort(msg); \
790 _CrtDbgBreak() /*< `__debugbreak()` */; \
791 }
792 # else
793 # define ABORT(msg) \
794 { \
795 GC_on_abort(msg); \
796 DebugBreak(); \
797 }
798 /*
799 * Note: on a WinCE box, this could be silently ignored (i.e., the program
800 * is not aborted); `DebugBreak()` is a statement in some toolchains.
801 */
802 # endif
803 # else /* !MSWIN32 */
804 # define ABORT(msg) (GC_on_abort(msg), abort())
805 # endif
806 #endif /* !CPPCHECK */
807 808 /*
809 * For the abort message with 1 .. 3 arguments. `C_msg` and `C_fmt`
810 * should be literals. `C_msg` should not contain format specifiers.
811 * Arguments should match their format specifiers.
812 */
813 #define ABORT_ARG1(C_msg, C_fmt, arg1) \
814 MACRO_BLKSTMT_BEGIN \
815 GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", arg1); \
816 ABORT(C_msg); \
817 MACRO_BLKSTMT_END
818 #define ABORT_ARG2(C_msg, C_fmt, arg1, arg2) \
819 MACRO_BLKSTMT_BEGIN \
820 GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", arg1, arg2); \
821 ABORT(C_msg); \
822 MACRO_BLKSTMT_END
823 #define ABORT_ARG3(C_msg, C_fmt, arg1, arg2, arg3) \
824 MACRO_BLKSTMT_BEGIN \
825 GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", arg1, arg2, arg3); \
826 ABORT(C_msg); \
827 MACRO_BLKSTMT_END
828 829 /*
830 * Same as `ABORT` but does not have a `noreturn` attribute.
831 * I.e. `ABORT` on a dummy condition (which is always true).
832 */
833 #define ABORT_RET(msg) \
834 if ((GC_funcptr_uint)GC_current_warn_proc == ~(GC_funcptr_uint)0) { \
835 } else \
836 ABORT(msg)
837 838 /* Exit process abnormally, but without making a mess (e.g. out of memory). */
839 #define EXIT() (GC_on_abort(NULL), exit(1 /* `EXIT_FAILURE` */))
840 841 /*
842 * Print warning message, e.g. almost out of memory. The argument (if any)
843 * format specifier should be: "%s", "%p", "%"`WARN_PRIdPTR` or
844 * "%"`WARN_PRIuPTR`.
845 */
846 #define WARN(msg, arg) \
847 GC_current_warn_proc("GC Warning: " msg, (GC_uintptr_t)(arg))
848 GC_EXTERN GC_warn_proc GC_current_warn_proc;
849 850 /*
851 * Print format type macro for decimal `GC_signed_word` value passed to
852 * `WARN()`. This could be redefined for Win64 or LLP64, but typically
853 * should not be done as the `WARN` format string is, possibly,
854 * processed on the client side, so non-standard print type modifiers
855 * (like MS "I64d") should be avoided here if possible.
856 * TODO: Assuming `sizeof(void *)` is equal to `sizeof(long)` or this
857 * is a little-endian machine.
858 */
859 #ifndef WARN_PRIdPTR
860 # define WARN_PRIdPTR "ld"
861 # define WARN_PRIuPTR "lu"
862 #endif
863 864 /*
865 * A tagging macro (for a code static analyzer) to indicate that the
866 * string obtained from an untrusted source (e.g., `argv[]`, `getenv`)
867 * is safe to use in a vulnerable operation (e.g., `open`, `exec`).
868 */
869 #define TRUSTED_STRING(s) COVERT_DATAFLOW_P(s)
870 871 #ifdef GC_READ_ENV_FILE
872 /*
873 * This routine scans `GC_envfile_content` for the specified environment
874 * variable (and returns its value if found).
875 */
876 GC_INNER char *GC_envfile_getenv(const char *name);
877 878 /* Get the process environment entry. */
879 # define GETENV(name) GC_envfile_getenv(name)
880 #elif defined(NO_GETENV) && !defined(CPPCHECK)
881 # define GETENV(name) NULL
882 #elif defined(EMPTY_GETENV_RESULTS) && !defined(CPPCHECK)
883 /* Workaround for a reputed Wine bug. */
884 GC_INLINE char *
885 fixed_getenv(const char *name)
886 {
887 char *value = getenv(name);
888 return value != NULL && *value != '\0' ? value : NULL;
889 }
890 # define GETENV(name) fixed_getenv(name)
891 #else
892 # define GETENV(name) getenv(name)
893 #endif
894 895 EXTERN_C_END
896 897 #if defined(DARWIN)
898 # include <mach/thread_status.h>
899 # ifndef MAC_OS_X_VERSION_MAX_ALLOWED
900 /* Include this header just to import the above macro. */
901 # include <AvailabilityMacros.h>
902 # endif
903 # if defined(POWERPC)
904 # if CPP_WORDSZ == 32
905 # define GC_THREAD_STATE_T ppc_thread_state_t
906 # else
907 # define GC_THREAD_STATE_T ppc_thread_state64_t
908 # define GC_MACH_THREAD_STATE PPC_THREAD_STATE64
909 # define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT
910 # endif
911 # elif defined(I386) || defined(X86_64)
912 # if CPP_WORDSZ == 32
913 # if defined(i386_THREAD_STATE_COUNT) \
914 && !defined(x86_THREAD_STATE32_COUNT)
915 /* Use old naming convention for i686. */
916 # define GC_THREAD_STATE_T i386_thread_state_t
917 # define GC_MACH_THREAD_STATE i386_THREAD_STATE
918 # define GC_MACH_THREAD_STATE_COUNT i386_THREAD_STATE_COUNT
919 # else
920 # define GC_THREAD_STATE_T x86_thread_state32_t
921 # define GC_MACH_THREAD_STATE x86_THREAD_STATE32
922 # define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT
923 # endif
924 # else
925 # define GC_THREAD_STATE_T x86_thread_state64_t
926 # define GC_MACH_THREAD_STATE x86_THREAD_STATE64
927 # define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
928 # endif
929 # elif defined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE) \
930 && !defined(CPPCHECK)
931 # define GC_THREAD_STATE_T arm_unified_thread_state_t
932 # define GC_MACH_THREAD_STATE ARM_UNIFIED_THREAD_STATE
933 # define GC_MACH_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT
934 # elif defined(ARM32)
935 # define GC_THREAD_STATE_T arm_thread_state_t
936 # ifdef ARM_MACHINE_THREAD_STATE_COUNT
937 # define GC_MACH_THREAD_STATE ARM_MACHINE_THREAD_STATE
938 # define GC_MACH_THREAD_STATE_COUNT ARM_MACHINE_THREAD_STATE_COUNT
939 # endif
940 # elif defined(AARCH64)
941 # define GC_THREAD_STATE_T arm_thread_state64_t
942 # define GC_MACH_THREAD_STATE ARM_THREAD_STATE64
943 # define GC_MACH_THREAD_STATE_COUNT ARM_THREAD_STATE64_COUNT
944 # elif !defined(CPPCHECK)
945 # error define GC_THREAD_STATE_T
946 # endif
947 # ifndef GC_MACH_THREAD_STATE
948 # define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE
949 # define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT
950 # endif
951 952 /*
953 * Try to work out the right way to access thread state structure members.
954 * The structure has different definition in different Darwin versions.
955 * This now defaults to the (older) names without `__`, thus hopefully
956 * not breaking any existing `Makefile.direct` builds.
957 */
958 # if __DARWIN_UNIX03
959 # define THREAD_FLD_NAME(x) __##x
960 # else
961 # define THREAD_FLD_NAME(x) x
962 # endif
963 # if defined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE)
964 # define THREAD_FLD(x) ts_32.THREAD_FLD_NAME(x)
965 # else
966 # define THREAD_FLD(x) THREAD_FLD_NAME(x)
967 # endif
968 #endif /* DARWIN */
969 970 #ifndef WASI
971 # include <setjmp.h>
972 #endif
973 974 #include <stdio.h>
975 976 #if defined(CAN_HANDLE_FORK) && defined(GC_PTHREADS)
977 # include <pthread.h> /*< for `pthread_t` */
978 #endif
979 980 #if __STDC_VERSION__ >= 201112L
981 # include <assert.h> /*< for `static_assert` */
982 #endif
983 984 EXTERN_C_BEGIN
985 986 /* Definitions depending on `word` size. */
987 988 #define modWORDSZ(n) ((n) & (CPP_WORDSZ - 1)) /*< `n mod size_of_word` */
989 #define divWORDSZ(n) ((n) / CPP_WORDSZ)
990 991 #define SIGNB ((word)1 << (CPP_WORDSZ - 1))
992 #define SIZET_SIGNB (GC_SIZE_MAX ^ (GC_SIZE_MAX >> 1))
993 994 #if CPP_PTRSZ / 8 != ALIGNMENT
995 # define UNALIGNED_PTRS
996 #endif
997 998 #define BYTES_TO_GRANULES(lb) ((lb) / GC_GRANULE_BYTES)
999 #define GRANULES_TO_BYTES(lg) (GC_GRANULE_BYTES * (lg))
1000 #define BYTES_TO_PTRS(lb) ((lb) / sizeof(ptr_t))
1001 #define PTRS_TO_BYTES(lpw) ((lpw) * sizeof(ptr_t))
1002 #define GRANULES_TO_PTRS(lg) (GC_GRANULE_PTRS * (lg))
1003 1004 /*
1005 * Convert size in bytes to that in pointers rounding up (but not adding
1006 * extra byte at end).
1007 */
1008 #define BYTES_TO_PTRS_ROUNDUP(lb) BYTES_TO_PTRS((lb) + sizeof(ptr_t) - 1)
1009 1010 /* Size parameters. */
1011 1012 /*
1013 * Heap block size, in bytes. Should be a power of two.
1014 * Incremental collection with `MPROTECT_VDB` currently requires the
1015 * page size to be a multiple of `HBLKSIZE`. Since most modern
1016 * architectures support variable page sizes down to 4 KB, and i686 and
1017 * x86_64 are generally 4 KB, we now default to 4 KB, except for:
1018 * - Alpha: seems to be used with 8 KB pages;
1019 * - `SMALL_CONFIG`: want less block-level fragmentation.
1020 */
1021 #ifndef HBLKSIZE
1022 # if defined(SMALL_CONFIG) && !defined(LARGE_CONFIG)
1023 # define CPP_LOG_HBLKSIZE 10
1024 # elif defined(ALPHA)
1025 # define CPP_LOG_HBLKSIZE 13
1026 # else
1027 # define CPP_LOG_HBLKSIZE 12
1028 # endif
1029 #else
1030 # if HBLKSIZE == 512
1031 # define CPP_LOG_HBLKSIZE 9
1032 # elif HBLKSIZE == 1024
1033 # define CPP_LOG_HBLKSIZE 10
1034 # elif HBLKSIZE == 2048
1035 # define CPP_LOG_HBLKSIZE 11
1036 # elif HBLKSIZE == 4096
1037 # define CPP_LOG_HBLKSIZE 12
1038 # elif HBLKSIZE == 8192
1039 # define CPP_LOG_HBLKSIZE 13
1040 # elif HBLKSIZE == 16384
1041 # define CPP_LOG_HBLKSIZE 14
1042 # elif HBLKSIZE == 32768
1043 # define CPP_LOG_HBLKSIZE 15
1044 # elif HBLKSIZE == 65536
1045 # define CPP_LOG_HBLKSIZE 16
1046 # elif !defined(CPPCHECK)
1047 # error Bad HBLKSIZE value
1048 # endif
1049 # undef HBLKSIZE
1050 #endif
1051 1052 #define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE)
1053 #define HBLKSIZE ((size_t)1 << CPP_LOG_HBLKSIZE)
1054 1055 #define GC_SQRT_SIZE_MAX ((((size_t)1) << (sizeof(size_t) * 8 / 2)) - 1)
1056 1057 /*
1058 * Maximum size of objects supported by free list (larger objects are
1059 * allocated directly with `GC_alloc_large()`, by rounding to the next
1060 * multiple of `HBLKSIZE`).
1061 */
1062 #define MAXOBJBYTES (HBLKSIZE >> 1)
1063 #define MAXOBJGRANULES BYTES_TO_GRANULES(MAXOBJBYTES)
1064 1065 #define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
1066 1067 /*
1068 * Equivalent to subtracting one `hblk` pointer from another. We do it
1069 * this way because a compiler should find it hard to use an integer
1070 * division instead of a shift. The bundled SunOS 4.1 otherwise sometimes
1071 * pessimizes the subtraction to involve a call to `.div`.
1072 */
1073 #define HBLK_PTR_DIFF(p, q) divHBLKSZ((ptr_t)p - (ptr_t)q)
1074 1075 #define modHBLKSZ(n) ((n) & (HBLKSIZE - 1))
1076 1077 #define HBLKPTR(objptr) \
1078 ((struct hblk *)PTR_ALIGN_DOWN((ptr_t)(objptr), HBLKSIZE))
1079 #define HBLKDISPL(objptr) modHBLKSZ((size_t)ADDR(objptr))
1080 1081 /* Same as `HBLKPTR` but points to the first block in the page. */
1082 #define HBLK_PAGE_ALIGNED(objptr) \
1083 ((struct hblk *)PTR_ALIGN_DOWN((ptr_t)(objptr), GC_page_size))
1084 1085 /* Round up allocation size (in bytes) to a multiple of a granule. */
1086 #define ROUNDUP_GRANULE_SIZE(lb) /*< `lb` should have no side-effect */ \
1087 (SIZET_SAT_ADD(lb, GC_GRANULE_BYTES - 1) & ~(size_t)(GC_GRANULE_BYTES - 1))
1088 1089 /*
1090 * Round up byte allocation request (after adding `EXTRA_BYTES`) to
1091 * a multiple of a granule, then convert it to granules.
1092 */
1093 #define ALLOC_REQUEST_GRANS(lb) /*< `lb` should have no side-effect */ \
1094 BYTES_TO_GRANULES(SIZET_SAT_ADD(lb, GC_GRANULE_BYTES - 1 + EXTRA_BYTES))
1095 1096 #if MAX_EXTRA_BYTES == 0
1097 # define ADD_EXTRA_BYTES(lb) (lb)
1098 # define SMALL_OBJ(lb) LIKELY((lb) <= MAXOBJBYTES)
1099 #else
1100 # define ADD_EXTRA_BYTES(lb) /*< `lb` should have no side-effect */ \
1101 SIZET_SAT_ADD(lb, EXTRA_BYTES)
1102 1103 /*
1104 * This really just tests that `lb` is not greater than
1105 * `MAXOBJBYTES - EXTRA_BYTES`, but we try to avoid looking up `EXTRA_BYTES`.
1106 */
1107 # define SMALL_OBJ(lb) /*< `lb` should have no side-effect */ \
1108 (LIKELY((lb) <= MAXOBJBYTES - MAX_EXTRA_BYTES) \
1109 || (lb) <= MAXOBJBYTES - EXTRA_BYTES)
1110 #endif
1111 1112 /*
1113 * Hash table representation of sets of pages. Implements a map from
1114 * `HBLKSIZE`-aligned chunks of the address space to one bit each.
1115 * This assumes it is OK to spuriously set bits, e.g. because multiple
1116 * addresses are represented by a single location. Used by
1117 * black-listing code, and perhaps by dirty bit maintenance code.
1118 */
1119 #ifndef LOG_PHT_ENTRIES
1120 # ifdef LARGE_CONFIG
1121 # if CPP_WORDSZ == 32
1122 /*
1123 * Collisions are impossible (because of a 4 GB space limit).
1124 * Each table takes 128 KB, some of which may never be touched.
1125 */
1126 # define LOG_PHT_ENTRIES 20
1127 # else
1128 /*
1129 * Collisions likely at 2M blocks, which is greater than 8 GB.
1130 * Each table takes 256 KB, some of which may never be touched.
1131 */
1132 # define LOG_PHT_ENTRIES 21
1133 # endif
1134 # elif !defined(SMALL_CONFIG)
1135 /*
1136 * Collisions are likely if heap grows to more than 256K blocks,
1137 * which is greater than 1 GB. Each hash table occupies 32 KB.
1138 * Even for somewhat smaller heaps, say half of that, collisions may
1139 * be an issue because we blacklist addresses outside the heap.
1140 */
1141 # define LOG_PHT_ENTRIES 18
1142 # else
1143 /*
1144 * Collisions are likely if heap grows to more than 32K blocks,
1145 * which is 128 MB. Each hash table occupies 4 KB.
1146 */
1147 # define LOG_PHT_ENTRIES 15
1148 # endif
1149 #endif /* !LOG_PHT_ENTRIES */
1150 1151 #define PHT_ENTRIES (1 << LOG_PHT_ENTRIES)
1152 #define PHT_SIZE (PHT_ENTRIES > CPP_WORDSZ ? PHT_ENTRIES / CPP_WORDSZ : 1)
1153 typedef word page_hash_table[PHT_SIZE];
1154 1155 #define PHT_HASH(p) ((size_t)((ADDR(p) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1)))
1156 1157 #define get_pht_entry_from_index(bl, index) \
1158 (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
1159 #define set_pht_entry_from_index(bl, index) \
1160 (void)((bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index))
1161 1162 #if defined(THREADS) && defined(AO_HAVE_or)
1163 /*
1164 * And, one more variant for `GC_add_to_black_list_normal` and
1165 *`GC_add_to_black_list_stack` (invoked indirectly by `GC_do_local_mark()`)
1166 * and `async_set_pht_entry_from_index()` (invoked by `GC_dirty()` or the
1167 * write fault handler).
1168 */
1169 # define set_pht_entry_from_index_concurrent(bl, index) \
1170 AO_or((volatile AO_t *)&(bl)[divWORDSZ(index)], \
1171 (AO_t)1 << modWORDSZ(index))
1172 # ifdef MPROTECT_VDB
1173 # define set_pht_entry_from_index_concurrent_volatile(bl, index) \
1174 set_pht_entry_from_index_concurrent(bl, index)
1175 # endif
1176 #else
1177 # define set_pht_entry_from_index_concurrent(bl, index) \
1178 set_pht_entry_from_index(bl, index)
1179 # ifdef MPROTECT_VDB
1180 /*
1181 * Same as `set_pht_entry_from_index` but avoiding the compound assignment
1182 * for a `volatile` array.
1183 */
1184 # define set_pht_entry_from_index_concurrent_volatile(bl, index) \
1185 (void)((bl)[divWORDSZ(index)] \
1186 = (bl)[divWORDSZ(index)] | ((word)1 << modWORDSZ(index)))
1187 # endif
1188 #endif
1189 1190 /* Heap blocks. */
1191 1192 /*
1193 * The upper bound. We allocate 1 bit per allocation granule.
1194 * If `MARK_BIT_PER_OBJ` is not defined, we use every `n`-th bit, where
1195 * `n` is the number of allocation granules per object. Otherwise, we only
1196 * use the initial group of mark bits, and it is safe to allocate smaller
1197 * header for large objects.
1198 */
1199 #define MARK_BITS_PER_HBLK (HBLKSIZE / GC_GRANULE_BYTES)
1200 1201 #ifndef MARK_BIT_PER_OBJ
1202 /*
1203 * We maintain layout maps for heap blocks containing objects of
1204 * a given size. Each entry in this map describes a byte offset
1205 * (displacement) and has the following type.
1206 */
1207 # if (1 << (CPP_LOG_HBLKSIZE - 1)) / GC_GRANULE_BYTES <= 0x100
1208 typedef unsigned char hb_map_entry_t;
1209 # else
1210 typedef unsigned short hb_map_entry_t;
1211 # endif
1212 #endif /* !MARK_BIT_PER_OBJ */
1213 1214 struct hblkhdr {
1215 /*
1216 * Link field for `hblk` free list and for lists of chunks waiting to
1217 * be reclaimed.
1218 */
1219 struct hblk *hb_next;
1220 1221 struct hblk *hb_prev; /*< backwards link for free list */
1222 1223 struct hblk *hb_block; /*< the corresponding block */
1224 1225 /*
1226 * Kind of objects in the block. Each kind identifies a mark
1227 * procedure and a set of list headers. Sometimes called regions.
1228 */
1229 unsigned char hb_obj_kind;
1230 1231 unsigned char hb_flags;
1232 1233 /* Ignore pointers that do not point to the first `hblk` of this object. */
1234 #define IGNORE_OFF_PAGE 1
1235 1236 /*
1237 * This is a free block, which has been unmapped from the address space.
1238 * `GC_remap()` must be invoked on it before it can be reallocated.
1239 * Set only if `USE_MUNMAP` macro is defined.
1240 */
1241 #define WAS_UNMAPPED 2
1242 1243 /* Block is free, i.e. not in use. */
1244 #define FREE_BLK 4
1245 1246 #ifdef ENABLE_DISCLAIM
1247 /* This kind has a callback on reclaim. */
1248 # define HAS_DISCLAIM 8
1249 1250 /*
1251 * Mark from all objects, marked or not. Used to mark objects needed
1252 * by reclaim notifier.
1253 */
1254 # define MARK_UNCONDITIONALLY 0x10
1255 #endif
1256 1257 #ifndef MARK_BIT_PER_OBJ
1258 # define LARGE_BLOCK 0x20
1259 #endif
1260 1261 /*
1262 * Value of `GC_gc_no` when block was last allocated or swept.
1263 * May wrap. For a free block, this is maintained only for `USE_MUNMAP`,
1264 * and indicates when the header was allocated, or when the size of the
1265 * block last changed.
1266 */
1267 unsigned short hb_last_reclaimed;
1268 1269 #ifdef MARK_BIT_PER_OBJ
1270 # define LARGE_INV_SZ ((unsigned32)1 << 16)
1271 1272 /*
1273 * A good upper bound for `2**32 / hb_sz`.
1274 * For large objects, we use `LARGE_INV_SZ`.
1275 */
1276 unsigned32 hb_inv_sz;
1277 #endif
1278 1279 /*
1280 * If in use, size in bytes, of objects in the block.
1281 * Otherwise, the size of the whole free block. We assume that this is
1282 * convertible to `GC_signed_word` without generating a negative result.
1283 * We avoid generating free blocks larger than that.
1284 */
1285 size_t hb_sz;
1286 1287 /* Object descriptor for marking. See `gc_mark.h` file. */
1288 word hb_descr;
1289 1290 #ifndef MARK_BIT_PER_OBJ
1291 /*
1292 * A table of remainders `mod BYTES_TO_GRANULES(hb_sz)` essentially,
1293 * except for large blocks. See `GC_obj_map`.
1294 */
1295 hb_map_entry_t *hb_map;
1296 #endif
1297 1298 #ifdef PARALLEL_MARK
1299 /*
1300 * Number of set mark bits, excluding the one always set at the end.
1301 * Currently it is updated concurrently and hence only approximate.
1302 * But a zero value does guarantee that the block contains
1303 * no marked objects. Ensuring this property means that we never
1304 * decrement it to zero during a collection, and hence the count may
1305 * be one too high. Due to concurrent updates, an arbitrary number
1306 * of increments, but not all of them (!) may be lost, hence it may,
1307 * in theory, be much too low. The count may also be too high
1308 * if multiple mark threads mark the same object due to a race.
1309 */
1310 volatile AO_t hb_n_marks;
1311 #else
1312 /* Without parallel marking, the count is accurate. */
1313 size_t hb_n_marks;
1314 #endif
1315 1316 #ifdef USE_MARK_BYTES
1317 /*
1318 * Unlike the other case, this is in units of bytes. Since we force
1319 * certain alignment, we need at most one mark bit per a granule.
1320 * But we do allocate and set one extra mark bit to avoid
1321 * an explicit check for the partial object at the end of each block.
1322 */
1323 # define HB_MARKS_SZ (MARK_BITS_PER_HBLK + 1)
1324 union {
1325 /*
1326 * The `i`-th byte is 1 if the object starting at granule `i`
1327 * or object `i` is marked, 0 otherwise. The mark bit for the
1328 * "one past the end" object is always set to avoid a special case
1329 * test in the marker.
1330 */
1331 char _hb_marks[HB_MARKS_SZ];
1332 word dummy; /*< force word alignment of mark bytes */
1333 } _mark_byte_union;
1334 # define hb_marks _mark_byte_union._hb_marks
1335 #else
1336 # define HB_MARKS_SZ (MARK_BITS_PER_HBLK / CPP_WORDSZ + 1)
1337 1338 # if defined(PARALLEL_MARK) || (defined(THREAD_SANITIZER) && defined(THREADS))
1339 volatile AO_t hb_marks[HB_MARKS_SZ];
1340 # else
1341 word hb_marks[HB_MARKS_SZ];
1342 # endif
1343 #endif /* !USE_MARK_BYTES */
1344 };
1345 1346 /* A "random" mark bit index for assertions. */
1347 #define ANY_INDEX 23
1348 1349 /* Heap block body. */
1350 1351 #define HBLK_WORDS (HBLKSIZE / sizeof(word))
1352 #define HBLK_GRANULES (HBLKSIZE / GC_GRANULE_BYTES)
1353 1354 /*
1355 * The number of objects in a block dedicated to a certain size.
1356 * May erroneously yield zero (instead of one) for large objects.
1357 */
1358 #define HBLK_OBJS(sz_in_bytes) (HBLKSIZE / (sz_in_bytes))
1359 1360 struct hblk {
1361 char hb_body[HBLKSIZE];
1362 };
1363 1364 #define HBLK_IS_FREE(hhdr) (((hhdr)->hb_flags & FREE_BLK) != 0)
1365 1366 #define OBJ_SZ_TO_BLOCKS(lb) divHBLKSZ((lb) + HBLKSIZE - 1)
1367 1368 /*
1369 * Size of block (in units of `HBLKSIZE`) needed to hold objects of
1370 * given `lb` (in bytes). The checked variant prevents wrap around.
1371 */
1372 #define OBJ_SZ_TO_BLOCKS_CHECKED(lb) /*< `lb` should have no side-effect */ \
1373 divHBLKSZ(SIZET_SAT_ADD(lb, HBLKSIZE - 1))
1374 1375 /* The object free-list link. */
1376 #define obj_link(p) (*(void **)(p))
1377 1378 /*
1379 * Root sets. Logically private to `mark_rts.c` file. But we do not
1380 * want the tables scanned, so we put them here.
1381 */
1382 1383 /* The maximum number of ranges that can be registered as static roots. */
1384 #ifdef LARGE_CONFIG
1385 # define MAX_ROOT_SETS 8192
1386 #elif !defined(SMALL_CONFIG)
1387 # define MAX_ROOT_SETS 2048
1388 #else
1389 # define MAX_ROOT_SETS 512
1390 #endif
1391 1392 /* Maximum number of segments that can be excluded from root sets. */
1393 #define MAX_EXCLUSIONS (MAX_ROOT_SETS / 4)
1394 1395 /* A data structure for excluded static roots. */
1396 struct exclusion {
1397 ptr_t e_start;
1398 ptr_t e_end;
1399 };
1400 1401 /*
1402 * A data structure for list of root sets. We keep a hash table, so that
1403 * we can filter out duplicate additions. Under Win32, we need to do
1404 * a better job of filtering overlaps, so we resort to sequential search,
1405 * and pay the price.
1406 */
1407 struct roots {
1408 ptr_t r_start; /*< multiple of pointer size */
1409 ptr_t r_end; /*< multiple of pointer size and greater than `r_start` */
1410 #ifndef ANY_MSWIN
1411 struct roots *r_next;
1412 #endif
1413 /* Delete before registering new dynamic libraries if set. */
1414 GC_bool r_tmp;
1415 };
1416 1417 #ifndef ANY_MSWIN
1418 /* Size of hash table index to roots. */
1419 # define LOG_RT_SIZE 6
1420 1421 /* `RT_SIZE` should be a power of 2, may be not equal to `MAX_ROOT_SETS`. */
1422 # define RT_SIZE (1 << LOG_RT_SIZE)
1423 #endif
1424 1425 #if (!defined(MAX_HEAP_SECTS) || defined(CPPCHECK)) \
1426 && (defined(ANY_MSWIN) || defined(USE_PROC_FOR_LIBRARIES))
1427 # ifdef LARGE_CONFIG
1428 # if CPP_WORDSZ > 32
1429 # define MAX_HEAP_SECTS 81920
1430 # else
1431 # define MAX_HEAP_SECTS 7680
1432 # endif
1433 # elif defined(SMALL_CONFIG) && !defined(USE_PROC_FOR_LIBRARIES)
1434 # if defined(PARALLEL_MARK) && (defined(MSWIN32) || defined(CYGWIN32))
1435 # define MAX_HEAP_SECTS 384
1436 # else
1437 # define MAX_HEAP_SECTS 128 /*< roughly 256 MB (`128 * 2048 * 1024`) */
1438 # endif
1439 # elif CPP_WORDSZ > 32
1440 # define MAX_HEAP_SECTS 1024 /*< roughly 8 GB */
1441 # else
1442 # define MAX_HEAP_SECTS 512 /*< roughly 4 GB */
1443 # endif
1444 #endif /* !MAX_HEAP_SECTS */
1445 1446 typedef struct GC_ms_entry {
1447 ptr_t mse_start; /*< beginning of object, pointer-aligned one */
1448 #ifdef PARALLEL_MARK
1449 volatile AO_t mse_descr;
1450 #else
1451 /*
1452 * The descriptor; the low-order two bits are tags, as described
1453 * in `gc_mark.h` file.
1454 */
1455 word mse_descr;
1456 #endif
1457 } mse;
1458 1459 /*
1460 * Current state of marking. Used to remember where we are during the
1461 * concurrent marking.
1462 */
1463 typedef int mark_state_t;
1464 1465 struct disappearing_link;
1466 struct finalizable_object;
1467 1468 struct dl_hashtbl_s {
1469 struct disappearing_link **head;
1470 size_t entries;
1471 unsigned log_size;
1472 };
1473 1474 struct fnlz_roots_s {
1475 struct finalizable_object **fo_head;
1476 /* List of objects that should be finalized now. */
1477 struct finalizable_object *finalize_now;
1478 };
1479 1480 union toggle_ref_u {
1481 /* The least significant bit is used to distinguish between choices. */
1482 void *strong_ref;
1483 GC_hidden_pointer weak_ref;
1484 };
1485 1486 /*
1487 * Extended descriptors. `GC_typed_mark_proc` understands these.
1488 * These are used for simple objects that are larger than what can
1489 * be described by a `BITMAP_BITS`-sized bitmap.
1490 */
1491 typedef struct {
1492 /*
1493 * The least significant bit corresponds to the first "pointer-sized"
1494 * word.
1495 */
1496 word ed_bitmap;
1497 GC_bool ed_continued; /*< next entry is continuation */
1498 } typed_ext_descr_t;
1499 1500 struct HeapSect {
1501 ptr_t hs_start;
1502 size_t hs_bytes;
1503 };
1504 1505 /*
1506 * Lists of all heap blocks and free lists as well as other random data
1507 * structures that should not be scanned by the collector. These are
1508 * grouped together in a structure so that they can be easily skipped by
1509 * `GC_push_conditional_with_exclusions()`. The ordering is weird to
1510 * make `GC_malloc` faster by keeping the important fields sufficiently
1511 * close together that a single load of a base register will do.
1512 * Scalars that could easily appear to be pointers are also put here.
1513 * The main fields should precede any conditionally included fields, where
1514 * possible.
1515 */
1516 struct _GC_arrays {
1517 word _heapsize; /*< heap size in bytes (value never goes down) */
1518 1519 word _requested_heapsize; /*< heap size due to explicit expansion */
1520 1521 #define GC_heapsize_on_gc_disable GC_arrays._heapsize_on_gc_disable
1522 word _heapsize_on_gc_disable;
1523 1524 word _last_heap_addr;
1525 1526 /*
1527 * Total bytes contained in blocks on the free list of large objects.
1528 * (A large object is the one that occupies a block of at least
1529 * two `HBLKSIZE`.)
1530 */
1531 word _large_free_bytes;
1532 1533 /* Total number of bytes in allocated large objects blocks. */
1534 word _large_allocd_bytes;
1535 1536 /*
1537 * Maximum number of bytes that were ever allocated in large object blocks.
1538 * This is used to help decide when it is safe to split up a large block.
1539 */
1540 word _max_large_allocd_bytes;
1541 1542 /* Number of bytes allocated before this collection cycle. */
1543 word _bytes_allocd_before_gc;
1544 1545 #define GC_our_mem_bytes GC_arrays._our_mem_bytes
1546 word _our_mem_bytes;
1547 1548 #ifndef SEPARATE_GLOBALS
1549 /* Number of bytes allocated during this collection cycle. */
1550 # define GC_bytes_allocd GC_arrays._bytes_allocd
1551 word _bytes_allocd;
1552 #endif
1553 1554 /*
1555 * Number of black-listed bytes dropped during GC cycle as a result
1556 * of repeated scanning during allocation attempts. These are treated
1557 * largely as allocated, even though they are not useful to the client.
1558 */
1559 word _bytes_dropped;
1560 1561 /*
1562 * Approximate number of bytes in objects (and headers) that became
1563 * ready for finalization in the last collection.
1564 */
1565 word _bytes_finalized;
1566 1567 /*
1568 * Number of explicitly deallocated bytes of memory since last
1569 * collection.
1570 */
1571 word _bytes_freed;
1572 1573 /*
1574 * Bytes of memory explicitly deallocated while finalizers were running.
1575 * Used to approximate size of memory explicitly deallocated by finalizers.
1576 */
1577 word _finalizer_bytes_freed;
1578 1579 /*
1580 * Pointer to the first (lowest address) `bottom_index` entity;
1581 * assumes the allocator lock is held.
1582 */
1583 bottom_index *_all_bottom_indices;
1584 1585 /*
1586 * Pointer to the last (highest address) `bottom_index` entity;
1587 * assumes the allocator lock is held.
1588 */
1589 bottom_index *_all_bottom_indices_end;
1590 1591 ptr_t _scratch_free_ptr;
1592 1593 hdr *_hdr_free_list;
1594 1595 #define GC_scratch_end_addr GC_arrays._scratch_end_addr
1596 word _scratch_end_addr; /*< the end point of the current scratch area */
1597 1598 #if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX))
1599 # define USE_SCRATCH_LAST_END_PTR
1600 /*
1601 * The address of the end point of the last obtained scratch area.
1602 * Used by `GC_register_dynamic_libraries()`.
1603 */
1604 # define GC_scratch_last_end_addr GC_arrays._scratch_last_end_addr
1605 word _scratch_last_end_addr;
1606 #endif
1607 1608 #if defined(GC_ASSERTIONS) || defined(MAKE_BACK_GRAPH) \
1609 || defined(INCLUDE_LINUX_THREAD_DESCR) \
1610 || (defined(KEEP_BACK_PTRS) && ALIGNMENT == 1)
1611 # define SET_REAL_HEAP_BOUNDS
1612 1613 /*
1614 * Similar to `GC_least_plausible_heap_addr` and
1615 * `GC_greatest_plausible_heap_addr` but do not include future
1616 * (potential) heap expansion. Both variables are zero initially.
1617 */
1618 # define GC_least_real_heap_addr GC_arrays._least_real_heap_addr
1619 # define GC_greatest_real_heap_addr GC_arrays._greatest_real_heap_addr
1620 word _least_real_heap_addr;
1621 word _greatest_real_heap_addr;
1622 #endif
1623 1624 /* The limits of stack for `GC_mark_some()` and friends. */
1625 mse *_mark_stack;
1626 mse *_mark_stack_limit;
1627 1628 /*
1629 * All ranges between `GC_mark_stack` (incl.) and `GC_mark_stack_top`
1630 * (incl.) still need to be marked from.
1631 */
1632 #ifdef PARALLEL_MARK
1633 /* Updated only with the mark lock held, but read asynchronously. */
1634 mse *volatile _mark_stack_top;
1635 #else
1636 mse *_mark_stack_top;
1637 #endif
1638 1639 #ifdef DYNAMIC_POINTER_MASK
1640 /*
1641 * Both mask and shift are zeros by default; if mask is zero, then
1642 * correct it to ~0 at the collector initialization.
1643 */
1644 # define GC_pointer_mask GC_arrays._pointer_mask
1645 # define GC_pointer_shift GC_arrays._pointer_shift
1646 word _pointer_mask;
1647 unsigned char _pointer_shift;
1648 #endif
1649 1650 #ifdef THREADS
1651 # ifdef USE_SPIN_LOCK
1652 # define GC_allocate_lock GC_arrays._allocate_lock
1653 volatile AO_TS_t _allocate_lock;
1654 # endif
1655 # if !defined(HAVE_LOCKFREE_AO_OR) && defined(AO_HAVE_test_and_set_acquire) \
1656 && (!defined(NO_MANUAL_VDB) || defined(MPROTECT_VDB))
1657 # define NEED_FAULT_HANDLER_LOCK
1658 # define GC_fault_handler_lock GC_arrays._fault_handler_lock
1659 volatile AO_TS_t _fault_handler_lock;
1660 # endif
1661 1662 # define GC_roots_were_cleared GC_arrays._roots_were_cleared
1663 GC_bool _roots_were_cleared;
1664 #else
1665 # ifndef GC_NO_FINALIZATION
1666 /*
1667 * The variables to minimize the level of recursion when a client
1668 * finalizer allocates memory.
1669 */
1670 # define GC_finalizer_nested GC_arrays._finalizer_nested
1671 # define GC_finalizer_skipped GC_arrays._finalizer_skipped
1672 unsigned char _finalizer_nested;
1673 unsigned short _finalizer_skipped;
1674 # endif
1675 #endif
1676 1677 /*
1678 * Do we need a larger mark stack? May be set by client-supplied
1679 * mark routines.
1680 */
1681 #define GC_mark_stack_too_small GC_arrays._mark_stack_too_small
1682 GC_bool _mark_stack_too_small;
1683 1684 /* Are there collectible marked objects in the heap? */
1685 #define GC_objects_are_marked GC_arrays._objects_are_marked
1686 GC_bool _objects_are_marked;
1687 1688 #define GC_explicit_typing_initialized GC_arrays._explicit_typing_initialized
1689 #ifdef AO_HAVE_load_acquire
1690 volatile AO_t _explicit_typing_initialized;
1691 #else
1692 GC_bool _explicit_typing_initialized;
1693 #endif
1694 1695 /* Number of bytes in the accessible composite objects. */
1696 word _composite_in_use;
1697 1698 /* Number of bytes in the accessible atomic objects. */
1699 word _atomic_in_use;
1700 1701 /* GC number of latest successful `GC_expand_hp_inner()` call. */
1702 #define GC_last_heap_growth_gc_no GC_arrays._last_heap_growth_gc_no
1703 word _last_heap_growth_gc_no;
1704 1705 #ifdef USE_MUNMAP
1706 # define GC_unmapped_bytes GC_arrays._unmapped_bytes
1707 word _unmapped_bytes;
1708 #else
1709 # define GC_unmapped_bytes 0
1710 #endif
1711 1712 #if defined(COUNT_UNMAPPED_REGIONS) && defined(USE_MUNMAP)
1713 # define GC_num_unmapped_regions GC_arrays._num_unmapped_regions
1714 GC_signed_word _num_unmapped_regions;
1715 #else
1716 # define GC_num_unmapped_regions 0
1717 #endif
1718 1719 bottom_index *_all_nils;
1720 1721 #define GC_scan_ptr GC_arrays._scan_ptr
1722 struct hblk *_scan_ptr;
1723 1724 #ifdef PARALLEL_MARK
1725 # define GC_main_local_mark_stack GC_arrays._main_local_mark_stack
1726 mse *_main_local_mark_stack;
1727 1728 /*
1729 * The lowest entry on mark stack that may not be empty.
1730 * Updated only by the initiating thread.
1731 */
1732 # define GC_first_nonempty GC_arrays._first_nonempty
1733 volatile ptr_t _first_nonempty;
1734 #endif
1735 1736 #ifdef ENABLE_TRACE
1737 # define GC_trace_ptr GC_arrays._trace_ptr
1738 ptr_t _trace_ptr;
1739 #endif
1740 1741 #if CPP_PTRSZ > CPP_WORDSZ
1742 # define GC_noop_sink_ptr GC_arrays._noop_sink_ptr
1743 volatile ptr_t _noop_sink_ptr;
1744 #endif
1745 1746 #define GC_noop_sink GC_arrays._noop_sink
1747 #if defined(AO_HAVE_store) && defined(THREAD_SANITIZER)
1748 volatile AO_t _noop_sink;
1749 #else
1750 volatile word _noop_sink;
1751 #endif
1752 1753 #define GC_mark_stack_size GC_arrays._mark_stack_size
1754 size_t _mark_stack_size;
1755 1756 #define GC_mark_state GC_arrays._mark_state
1757 mark_state_t _mark_state; /*< initialized to `MS_NONE` (0) */
1758 1759 #define GC_capacity_heap_sects GC_arrays._capacity_heap_sects
1760 size_t _capacity_heap_sects;
1761 1762 #define GC_n_heap_sects GC_arrays._n_heap_sects
1763 size_t _n_heap_sects; /*< number of separately added heap sections */
1764 1765 #ifdef ANY_MSWIN
1766 # define GC_n_heap_bases GC_arrays._n_heap_bases
1767 size_t _n_heap_bases; /*< see `GC_heap_bases[]` */
1768 #endif
1769 1770 #ifdef USE_PROC_FOR_LIBRARIES
1771 /* Number of `GET_MEM`-allocated memory sections. */
1772 # define GC_n_memory GC_arrays._n_memory
1773 word _n_memory;
1774 #endif
1775 1776 #ifdef GC_GCJ_SUPPORT
1777 # define GC_last_finalized_no GC_arrays._last_finalized_no
1778 word _last_finalized_no;
1779 # define GC_gcjobjfreelist GC_arrays._gcjobjfreelist
1780 ptr_t *_gcjobjfreelist;
1781 #endif
1782 1783 #define GC_fo_entries GC_arrays._fo_entries
1784 size_t _fo_entries;
1785 1786 #ifndef GC_NO_FINALIZATION
1787 # define GC_dl_hashtbl GC_arrays._dl_hashtbl
1788 # define GC_fnlz_roots GC_arrays._fnlz_roots
1789 # define GC_log_fo_table_size GC_arrays._log_fo_table_size
1790 # ifndef GC_LONG_REFS_NOT_NEEDED
1791 # define GC_ll_hashtbl GC_arrays._ll_hashtbl
1792 struct dl_hashtbl_s _ll_hashtbl;
1793 # endif
1794 struct dl_hashtbl_s _dl_hashtbl;
1795 struct fnlz_roots_s _fnlz_roots;
1796 unsigned _log_fo_table_size;
1797 1798 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
1799 # define GC_toggleref_arr GC_arrays._toggleref_arr
1800 # define GC_toggleref_array_size GC_arrays._toggleref_array_size
1801 # define GC_toggleref_array_capacity GC_arrays._toggleref_array_capacity
1802 union toggle_ref_u *_toggleref_arr;
1803 size_t _toggleref_array_size;
1804 size_t _toggleref_array_capacity;
1805 # endif
1806 #endif
1807 1808 #ifdef TRACE_BUF
1809 # define GC_trace_buf_pos GC_arrays._trace_buf_pos
1810 size_t _trace_buf_pos; /*< an index in the circular buffer */
1811 #endif
1812 1813 #ifdef ENABLE_DISCLAIM
1814 # define GC_finalized_kind GC_arrays._finalized_kind
1815 unsigned _finalized_kind;
1816 #endif
1817 1818 /* `GC_static_roots[0..n_root_sets-1]` contains the valid root sets. */
1819 #define n_root_sets GC_arrays._n_root_sets
1820 size_t _n_root_sets;
1821 1822 #define GC_excl_table_entries GC_arrays._excl_table_entries
1823 size_t _excl_table_entries; /*< number of entries in use */
1824 1825 #define GC_ed_size GC_arrays._ed_size
1826 size_t _ed_size; /*< current size of above arrays */
1827 1828 #define GC_avail_descr GC_arrays._avail_descr
1829 size_t _avail_descr; /*< next available slot */
1830 1831 #if defined(CAN_HANDLE_FORK) && defined(GC_PTHREADS)
1832 /* Value of `pthread_self()` of the thread which called `fork()`. */
1833 # define GC_parent_pthread_self GC_arrays._parent_pthread_self
1834 pthread_t _parent_pthread_self;
1835 #endif
1836 1837 /* Points to array of extended descriptors. */
1838 #define GC_ext_descriptors GC_arrays._ext_descriptors
1839 typed_ext_descr_t *_ext_descriptors;
1840 1841 /*
1842 * Table of user-defined mark procedures. There is a small number
1843 * of these, which can be referenced by `DS_PROC` mark descriptors.
1844 * See `gc_mark.h` file.
1845 */
1846 GC_mark_proc _mark_procs[GC_MAX_MARK_PROCS];
1847 1848 /*
1849 * `GC_valid_offsets[i]` implies
1850 * `GC_modws_valid_offsets[i % sizeof(ptr_t)]`.
1851 */
1852 char _modws_valid_offsets[sizeof(ptr_t)];
1853 1854 #ifndef ANY_MSWIN
1855 /*
1856 * The hash table header. Used only to check whether a range
1857 * is already present.
1858 */
1859 # define GC_root_index GC_arrays._root_index
1860 struct roots *_root_index[RT_SIZE];
1861 #endif
1862 1863 #if defined(SAVE_CALL_CHAIN) && !defined(DONT_SAVE_TO_LAST_STACK) \
1864 && (!defined(REDIRECT_MALLOC) || !defined(GC_HAVE_BUILTIN_BACKTRACE))
1865 /*
1866 * Stack at last garbage collection. Useful for debugging mysterious
1867 * object disappearances. In the multi-threaded case, we currently only
1868 * save the calling stack. Not supported in case of `malloc` redirection
1869 * because `backtrace()` may call `malloc()`.
1870 */
1871 struct callinfo _last_stack[NFRAMES];
1872 # define SAVE_CALLERS_TO_LAST_STACK() GC_save_callers(GC_arrays._last_stack)
1873 #else
1874 # define SAVE_CALLERS_TO_LAST_STACK() (void)0
1875 #endif
1876 1877 #ifndef SEPARATE_GLOBALS
1878 /* Free list for objects. */
1879 # define GC_objfreelist GC_arrays._objfreelist
1880 void *_objfreelist[MAXOBJGRANULES + 1];
1881 1882 /* Free list for atomic objects. */
1883 # define GC_aobjfreelist GC_arrays._aobjfreelist
1884 void *_aobjfreelist[MAXOBJGRANULES + 1];
1885 #endif
1886 1887 /*
1888 * Uncollectible but traced objects. Objects on this and `_auobjfreelist`
1889 * are always marked, except during garbage collections.
1890 */
1891 void *_uobjfreelist[MAXOBJGRANULES + 1];
1892 1893 #ifdef GC_ATOMIC_UNCOLLECTABLE
1894 /* Atomic uncollectible but traced objects. */
1895 # define GC_auobjfreelist GC_arrays._auobjfreelist
1896 void *_auobjfreelist[MAXOBJGRANULES + 1];
1897 #endif
1898 1899 /*
1900 * Number of granules to allocate when asked for a certain number of bytes
1901 * (plus `EXTRA_BYTES`). Should be accessed with the allocator lock held.
1902 */
1903 size_t _size_map[MAXOBJBYTES + 1];
1904 1905 #ifndef MARK_BIT_PER_OBJ
1906 /*
1907 * If the element is not `NULL`, then it points to a map of valid object
1908 * addresses. `GC_obj_map[lg][i]` is `i % lg`. This is now used purely
1909 * to replace a division in the marker by a table lookup.
1910 * `GC_obj_map[0]` is used for large objects and contains all nonzero
1911 * entries. This gets us out of the marker fast path without an extra test.
1912 */
1913 # define GC_obj_map GC_arrays._obj_map
1914 hb_map_entry_t *_obj_map[MAXOBJGRANULES + 1];
1915 1916 # define OBJ_MAP_LEN BYTES_TO_GRANULES(HBLKSIZE)
1917 #endif
1918 1919 #define VALID_OFFSET_SZ HBLKSIZE
1920 /*
1921 * A nonzero `GC_valid_offsets[i]` means `i` is registered as
1922 * a displacement.
1923 */
1924 char _valid_offsets[VALID_OFFSET_SZ];
1925 1926 #ifndef GC_DISABLE_INCREMENTAL
1927 /* Pages that were dirty at last `GC_read_dirty()` call. */
1928 # define GC_grungy_pages GC_arrays._grungy_pages
1929 page_hash_table _grungy_pages;
1930 1931 /* Pages dirtied since last `GC_read_dirty()` call. */
1932 # define GC_dirty_pages GC_arrays._dirty_pages
1933 # ifdef MPROTECT_VDB
1934 volatile
1935 # endif
1936 page_hash_table _dirty_pages;
1937 #endif
1938 1939 #if (defined(CHECKSUMS) && (defined(GWW_VDB) || defined(SOFT_VDB))) \
1940 || defined(PROC_VDB)
1941 /* A table to indicate the pages ever dirtied. */
1942 # define GC_written_pages GC_arrays._written_pages
1943 page_hash_table _written_pages;
1944 #endif
1945 1946 /* Heap segments potentially containing client objects. */
1947 #define GC_heap_sects GC_arrays._heap_sects
1948 struct HeapSect *_heap_sects;
1949 1950 #if defined(USE_PROC_FOR_LIBRARIES)
1951 /* All `GET_MEM`-allocated memory. Includes block headers and the like. */
1952 # define GC_our_memory GC_arrays._our_memory
1953 struct HeapSect _our_memory[MAX_HEAP_SECTS];
1954 #endif
1955 1956 #ifdef ANY_MSWIN
1957 /* Start address of memory regions obtained from OS. */
1958 # define GC_heap_bases GC_arrays._heap_bases
1959 ptr_t _heap_bases[MAX_HEAP_SECTS];
1960 #endif
1961 1962 #ifdef MSWINCE
1963 /* Committed lengths of memory regions obtained from OS. */
1964 # define GC_heap_lengths GC_arrays._heap_lengths
1965 word _heap_lengths[MAX_HEAP_SECTS];
1966 #endif
1967 1968 struct roots _static_roots[MAX_ROOT_SETS];
1969 1970 /* Array of exclusions, ascending address order. */
1971 struct exclusion _excl_table[MAX_EXCLUSIONS];
1972 1973 /*
1974 * The block header index. Each entry points to a `bottom_index` entity.
1975 * On a 32-bit machine, it points to the index for a set of the high-order
1976 * bits equal to the index. For longer addresses, we hash the high-order
1977 * bits to compute the index in `GC_top_index`, and each entry points to
1978 * a hash chain. The last entry in each chain is `GC_all_nils`.
1979 */
1980 bottom_index *_top_index[TOP_SZ];
1981 };
1982 1983 GC_API_PRIV struct _GC_arrays GC_arrays;
1984 1985 #define GC_all_nils GC_arrays._all_nils
1986 #define GC_atomic_in_use GC_arrays._atomic_in_use
1987 #define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc
1988 #define GC_bytes_dropped GC_arrays._bytes_dropped
1989 #define GC_bytes_finalized GC_arrays._bytes_finalized
1990 #define GC_bytes_freed GC_arrays._bytes_freed
1991 #define GC_composite_in_use GC_arrays._composite_in_use
1992 #define GC_excl_table GC_arrays._excl_table
1993 #define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed
1994 #define GC_heapsize GC_arrays._heapsize
1995 #define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
1996 #define GC_large_free_bytes GC_arrays._large_free_bytes
1997 #define GC_last_heap_addr GC_arrays._last_heap_addr
1998 #define GC_mark_stack GC_arrays._mark_stack
1999 #define GC_mark_stack_limit GC_arrays._mark_stack_limit
2000 #define GC_mark_stack_top GC_arrays._mark_stack_top
2001 #define GC_mark_procs GC_arrays._mark_procs
2002 #define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
2003 #define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
2004 #define GC_requested_heapsize GC_arrays._requested_heapsize
2005 #define GC_all_bottom_indices GC_arrays._all_bottom_indices
2006 #define GC_all_bottom_indices_end GC_arrays._all_bottom_indices_end
2007 #define GC_scratch_free_ptr GC_arrays._scratch_free_ptr
2008 #define GC_hdr_free_list GC_arrays._hdr_free_list
2009 #define GC_size_map GC_arrays._size_map
2010 #define GC_static_roots GC_arrays._static_roots
2011 #define GC_top_index GC_arrays._top_index
2012 #define GC_uobjfreelist GC_arrays._uobjfreelist
2013 #define GC_valid_offsets GC_arrays._valid_offsets
2014 2015 #define beginGC_arrays ((ptr_t)(&GC_arrays))
2016 #define endGC_arrays (beginGC_arrays + sizeof(GC_arrays))
2017 2018 /* Object kinds. */
2019 #ifndef MAXOBJKINDS
2020 # ifdef SMALL_CONFIG
2021 # define MAXOBJKINDS 16
2022 # else
2023 # define MAXOBJKINDS 24
2024 # endif
2025 #endif
2026 GC_EXTERN struct obj_kind {
2027 /*
2028 * Array of free-list headers for this kind of object. Point either
2029 * to `GC_arrays` or to storage allocated with `GC_scratch_alloc()`.
2030 */
2031 void **ok_freelist;
2032 2033 /*
2034 * List headers for lists of blocks waiting to be swept.
2035 * Indexed by object size in granules.
2036 */
2037 struct hblk **ok_reclaim_list;
2038 2039 /* Descriptor template for objects in this block. */
2040 word ok_descriptor;
2041 2042 /*
2043 * Add object size in bytes to descriptor template to obtain descriptor.
2044 * Otherwise the template is used as is.
2045 */
2046 GC_bool ok_relocate_descr;
2047 2048 /* Clear objects before putting them on the free list. */
2049 GC_bool ok_init;
2050 2051 #ifdef ENABLE_DISCLAIM
2052 /*
2053 * Mark from all, including unmarked, objects in block.
2054 * Used to protect objects reachable from reclaim notifiers.
2055 */
2056 GC_bool ok_mark_unconditionally;
2057 2058 /*
2059 * The disclaim procedure is called before `obj` is reclaimed, but
2060 * must also tolerate being called with object from free list.
2061 * A nonzero exit prevents object from being reclaimed.
2062 */
2063 int(GC_CALLBACK *ok_disclaim_proc)(void * /* `obj` */);
2064 2065 # define OK_DISCLAIM_INITZ /* comma */ , FALSE, 0
2066 #else
2067 # define OK_DISCLAIM_INITZ /*< empty */
2068 #endif
2069 } GC_obj_kinds[MAXOBJKINDS];
2070 2071 #define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds[0]))
2072 #define endGC_obj_kinds (beginGC_obj_kinds + sizeof(GC_obj_kinds))
2073 2074 /*
2075 * Variables `GC_bytes_allocd`, `GC_objfreelist` and `GC_aobjfreelist`
2076 * are located inside `GC_arrays` by default.
2077 */
2078 #ifdef SEPARATE_GLOBALS
2079 2080 /* Number of bytes allocated during this collection cycle. */
2081 extern word GC_bytes_allocd;
2082 2083 /* The free list for `NORMAL` objects. */
2084 extern ptr_t GC_objfreelist[MAXOBJGRANULES + 1];
2085 # define beginGC_objfreelist ((ptr_t)(&GC_objfreelist[0]))
2086 # define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist))
2087 2088 /* The free list for atomic (`PTRFREE`) objects. */
2089 extern ptr_t GC_aobjfreelist[MAXOBJGRANULES + 1];
2090 # define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist[0]))
2091 # define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist))
2092 #endif /* SEPARATE_GLOBALS */
2093 2094 /* The predefined kinds. */
2095 #define PTRFREE GC_I_PTRFREE
2096 #define NORMAL GC_I_NORMAL
2097 #define UNCOLLECTABLE 2
2098 #ifdef GC_ATOMIC_UNCOLLECTABLE
2099 # define AUNCOLLECTABLE 3
2100 # define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
2101 # define GC_N_KINDS_INITIAL_VALUE 4
2102 #else
2103 # define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
2104 # define GC_N_KINDS_INITIAL_VALUE 3
2105 #endif
2106 2107 GC_EXTERN unsigned GC_n_kinds;
2108 2109 /* May mean the allocation granularity size, not page size. */
2110 GC_EXTERN size_t GC_page_size;
2111 2112 #ifdef REAL_PAGESIZE_NEEDED
2113 GC_EXTERN size_t GC_real_page_size;
2114 #else
2115 # define GC_real_page_size GC_page_size
2116 #endif
2117 2118 /*
2119 * Get heap memory from the OS.
2120 * Note that `sbrk`-like allocation is preferred, since it usually
2121 * makes it possible to merge consecutively allocated chunks.
2122 * It also avoids unintended recursion with `REDIRECT_MALLOC` macro
2123 * defined. `GET_MEM()` argument should be of `size_t` type and
2124 * have no side-effect. `GET_MEM()` returns `HBLKSIZE`-aligned chunk
2125 * (`NULL` means a failure). In case of `MMAP_SUPPORTED`, the argument
2126 * must also be a multiple of a physical page size.
2127 * `GET_MEM` is currently not assumed to retrieve zero-filled space.
2128 */
2129 /* TODO: Take advantage of `GET_MEM()` returning a zero-filled space. */
2130 #if defined(ANY_MSWIN) || defined(MSWIN_XBOX1) || defined(OS2)
2131 GC_INNER void *GC_get_mem(size_t lb);
2132 # define GET_MEM(lb) GC_get_mem(lb)
2133 # if defined(CYGWIN32) && !defined(USE_WINALLOC)
2134 # define NEED_UNIX_GET_MEM
2135 # endif
2136 #elif defined(DOS4GW) || defined(EMBOX) || defined(KOS) || defined(NEXT) \
2137 || defined(NONSTOP) || defined(RTEMS) || defined(__CC_ARM) \
2138 || (defined(SOLARIS) && !defined(USE_MMAP))
2139 /* TODO: Use `page_alloc()` directly on Embox. */
2140 # if defined(REDIRECT_MALLOC) && !defined(CPPCHECK)
2141 # error Malloc redirection is unsupported
2142 # endif
2143 # define GET_MEM(lb) \
2144 ((void *)HBLKPTR((ptr_t)calloc(1, SIZET_SAT_ADD(lb, GC_page_size)) \
2145 + GC_page_size - 1))
2146 #elif !defined(GET_MEM)
2147 GC_INNER void *GC_unix_get_mem(size_t lb);
2148 # define GET_MEM(lb) GC_unix_get_mem(lb)
2149 # define NEED_UNIX_GET_MEM
2150 #endif
2151 2152 /*
2153 * Round up allocation size to a multiple of a page size.
2154 * `GC_setpagesize()` is assumed to be already invoked.
2155 */
2156 #define ROUNDUP_PAGESIZE(lb) /*< `lb` should have no side-effect */ \
2157 (SIZET_SAT_ADD(lb, GC_page_size - 1) & ~(GC_page_size - 1))
2158 2159 /*
2160 * Same as `ROUNDUP_PAGESIZE` but is used to make `GET_MEM()` argument
2161 * safe.
2162 */
2163 #ifdef MMAP_SUPPORTED
2164 # define ROUNDUP_PAGESIZE_IF_MMAP(lb) ROUNDUP_PAGESIZE(lb)
2165 #else
2166 # define ROUNDUP_PAGESIZE_IF_MMAP(lb) (lb)
2167 #endif
2168 2169 #ifdef ANY_MSWIN
2170 GC_EXTERN SYSTEM_INFO GC_sysinfo;
2171 2172 /*
2173 * Is `p` the start of either the `malloc` heap, or of one of the collector
2174 * heap sections?
2175 */
2176 GC_INNER GC_bool GC_is_heap_base(const void *p);
2177 #endif
2178 2179 #ifdef GC_GCJ_SUPPORT
2180 /* Note: `GC_hblkfreelist` and `GC_free_bytes` remain visible to GNU `gcj`. */
2181 extern struct hblk *GC_hblkfreelist[];
2182 extern word GC_free_bytes[];
2183 #endif
2184 2185 /* Total size of registered root sections. */
2186 GC_EXTERN word GC_root_size;
2187 2188 /* This is used by `GC_do_blocking()`. */
2189 struct blocking_data {
2190 GC_fn_type fn;
2191 void *client_data; /*< and result */
2192 };
2193 2194 /* This is used by `GC_call_with_gc_active`, `GC_push_all_stack_sections`. */
2195 struct GC_traced_stack_sect_s {
2196 ptr_t saved_stack_ptr;
2197 #ifdef IA64
2198 ptr_t saved_backing_store_ptr;
2199 ptr_t backing_store_end;
2200 #endif
2201 struct GC_traced_stack_sect_s *prev;
2202 };
2203 2204 #ifdef THREADS
2205 /*
2206 * Process all "traced stack sections" - scan entire stack except for
2207 * frames belonging to the user functions invoked by `GC_do_blocking`.
2208 */
2209 GC_INNER void
2210 GC_push_all_stack_sections(ptr_t lo, ptr_t hi,
2211 struct GC_traced_stack_sect_s *traced_stack_sect);
2212 2213 /*
2214 * The total size, in bytes, of all stacks.
2215 * Updated on every `GC_push_all_stacks()` call.
2216 */
2217 GC_EXTERN word GC_total_stacksize;
2218 2219 #else
2220 /* Note: `NULL` value means we are not inside `GC_do_blocking()` call. */
2221 GC_EXTERN ptr_t GC_blocked_sp;
2222 2223 /*
2224 * Points to the "frame" data held in stack by the innermost
2225 * `GC_call_with_gc_active()`. `NULL` if no such "frame" active.
2226 */
2227 GC_EXTERN struct GC_traced_stack_sect_s *GC_traced_stack_sect;
2228 #endif /* !THREADS */
2229 2230 #if defined(E2K) && defined(THREADS) || defined(IA64)
2231 /*
2232 * The bottom of the register stack of the primordial thread.
2233 * E2K: holds the offset (`ps_ofs`) instead of a pointer.
2234 */
2235 GC_EXTERN ptr_t GC_register_stackbottom;
2236 #endif
2237 2238 #ifdef IA64
2239 /* Similar to `GC_push_all_stack_sections` but for IA-64 registers store. */
2240 GC_INNER void GC_push_all_register_sections(
2241 ptr_t bs_lo, ptr_t bs_hi, GC_bool eager,
2242 struct GC_traced_stack_sect_s *traced_stack_sect);
2243 #endif
2244 2245 /*
2246 * Mark bit operations.
2247 *
2248 * The marks are in a reserved area of each heap block.
2249 * Each object or granule has one mark bit associated with it.
2250 * Only those corresponding to the beginning of an object are used.
2251 */
2252 2253 /*
2254 * Retrieve, set, clear the `n`-th mark bit in a given heap block.
2255 * (Recall that bit `n` corresponds to `n`-th object or allocation granule
2256 * relative to the beginning of the block, including unused space.)
2257 */
2258 2259 #ifdef USE_MARK_BYTES
2260 # define mark_bit_from_hdr(hhdr, n) ((hhdr)->hb_marks[n])
2261 # define set_mark_bit_from_hdr(hhdr, n) (void)((hhdr)->hb_marks[n] = 1)
2262 # define clear_mark_bit_from_hdr(hhdr, n) (void)((hhdr)->hb_marks[n] = 0)
2263 #else
2264 /* Set mark bit correctly, even if mark bits may be concurrently accessed. */
2265 # if defined(PARALLEL_MARK) || (defined(THREAD_SANITIZER) && defined(THREADS))
2266 /*
2267 * Workaround TSan false positive: there is no race between
2268 * `mark_bit_from_hdr` and `set_mark_bit_from_hdr` when `n` is different
2269 * (alternatively, `USE_MARK_BYTES` could be used). If TSan is off, then
2270 * `AO_or()` is used only if we define `USE_MARK_BITS` macro explicitly.
2271 */
2272 # define OR_WORD(addr, bits) AO_or(addr, bits)
2273 # else
2274 # define OR_WORD(addr, bits) (void)(*(addr) |= (bits))
2275 # endif
2276 # define mark_bit_from_hdr(hhdr, n) \
2277 (((hhdr)->hb_marks[divWORDSZ(n)] >> modWORDSZ(n)) & (word)1)
2278 # define set_mark_bit_from_hdr(hhdr, n) \
2279 OR_WORD((hhdr)->hb_marks + divWORDSZ(n), (word)1 << modWORDSZ(n))
2280 # define clear_mark_bit_from_hdr(hhdr, n) \
2281 (void)(((word *)CAST_AWAY_VOLATILE_PVOID((hhdr)->hb_marks))[divWORDSZ(n)] \
2282 &= ~((word)1 << modWORDSZ(n)))
2283 #endif /* !USE_MARK_BYTES */
2284 2285 #ifdef MARK_BIT_PER_OBJ
2286 /*
2287 * Get the mark bit index corresponding to the given byte offset and
2288 * size (in bytes).
2289 */
2290 # define MARK_BIT_NO(offset, sz) ((offset) / (sz))
2291 2292 /* Spacing between useful mark bits. */
2293 # define MARK_BIT_OFFSET(sz) 1
2294 2295 /* Position of final, always set, mark bit. */
2296 # define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES ? 1 : HBLK_OBJS(sz))
2297 #else
2298 # define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES(offset)
2299 # define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz)
2300 # define FINAL_MARK_BIT(sz) \
2301 ((sz) > MAXOBJBYTES ? MARK_BITS_PER_HBLK \
2302 : BYTES_TO_GRANULES(HBLK_OBJS(sz) * (sz)))
2303 #endif /* !MARK_BIT_PER_OBJ */
2304 2305 /* Important internal collector routines. */
2306 2307 /* Return the current stack pointer, approximately. */
2308 GC_INNER ptr_t GC_approx_sp(void);
2309 2310 /*
2311 * Same as `GC_approx_sp` but a macro. `sp` should be a local variable
2312 * of `volatile` `ptr_t` type.
2313 */
2314 #if (defined(E2K) && defined(__clang__) \
2315 || (defined(S390) && __clang_major__ < 8)) \
2316 && !defined(CPPCHECK)
2317 /*
2318 * Workaround some bugs in clang:
2319 * - "undefined reference to llvm.frameaddress" error (clang-9/e2k);
2320 * - a crash in SystemZTargetLowering of libLLVM-3.8 (s390).
2321 */
2322 # define STORE_APPROX_SP_TO(sp) (void)(sp = (ptr_t)(&sp))
2323 #elif defined(CPPCHECK) \
2324 || ((__GNUC__ >= 4 /* `GC_GNUC_PREREQ(4, 0)` */) \
2325 && !defined(STACK_NOT_SCANNED))
2326 /* TODO: Use `GC_GNUC_PREREQ` after fixing a bug in cppcheck. */
2327 /* Note: l-value is passed instead of pointer to `sp` (because of cppcheck). */
2328 # define STORE_APPROX_SP_TO(sp) (void)(sp = (ptr_t)__builtin_frame_address(0))
2329 #else
2330 # define STORE_APPROX_SP_TO(sp) (void)(sp = (ptr_t)(&sp))
2331 #endif
2332 2333 /* Have we allocated enough to amortize a collection? */
2334 GC_INNER GC_bool GC_should_collect(void);
2335 2336 /*
2337 * Get the next block whose address is at least `h`. Returned block
2338 * is managed by the collector. The block must be in use unless
2339 * `allow_free` is TRUE. Return `NULL` if there is no such block.
2340 */
2341 GC_INNER struct hblk *GC_next_block(struct hblk *h, GC_bool allow_free);
2342 2343 /*
2344 * Get the last (highest address) block whose address is at most `h`.
2345 * Returned block is managed by the collector, but may or may not be in use.
2346 * Return `NULL` if there is no such block.
2347 */
2348 GC_INNER struct hblk *GC_prev_block(struct hblk *h);
2349 2350 GC_INNER void GC_mark_init(void);
2351 2352 /*
2353 * Clear mark bits in all allocated heap blocks (i.e. for all heap objects).
2354 * This invalidates the marker invariant, and sets `GC_mark_state` to
2355 * reflect this. (This implicitly starts marking to reestablish the
2356 * invariant.)
2357 */
2358 GC_INNER void GC_clear_marks(void);
2359 2360 /*
2361 * Tell the marker that marked objects may point to unmarked ones, and
2362 * roots may point to unmarked objects. Reset mark stack.
2363 */
2364 GC_INNER void GC_invalidate_mark_state(void);
2365 2366 /*
2367 * Perform a small amount of marking. We try to touch roughly a page
2368 * of memory. Returns quickly if no collection is in progress.
2369 * Returns `TRUE` if we just finished a mark phase.
2370 * `cold_gc_frame` argument is an address inside a frame of the
2371 * collector that remains valid until all marking is complete;
2372 * `NULL` value indicates that it is OK to miss some register values.
2373 * In the case of an incremental collection, the world may be running.
2374 */
2375 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame);
2376 2377 /*
2378 * Initiate a garbage collection. Initiates a full collection if the
2379 * mark state is invalid; otherwise it is a partial one.
2380 */
2381 GC_INNER void GC_initiate_gc(void);
2382 2383 /*
2384 * Is a collection in progress? Note that this can return `TRUE` in
2385 * the non-incremental case, if a collection has been abandoned and
2386 * the mark state is now `MS_INVALID`.
2387 */
2388 GC_INNER GC_bool GC_collection_in_progress(void);
2389 2390 /*
2391 * Push contents of the symbol residing in the static roots area excluded
2392 * from scanning by the collector for a reason. Note: it should be used only
2393 * for symbols of relatively small size (containing one or several pointers).
2394 */
2395 #define GC_PUSH_ALL_SYM(sym) GC_push_all_eager(&(sym), &(sym) + 1)
2396 2397 /* Same as `GC_push_all` but consider interior pointers as valid. */
2398 GC_INNER void GC_push_all_stack(ptr_t b, ptr_t t);
2399 2400 #ifdef NO_VDB_FOR_STATIC_ROOTS
2401 # define GC_push_conditional_static(b, t, all) \
2402 ((void)(all), GC_push_all(b, t))
2403 #else
2404 /*
2405 * Same as `GC_push_conditional` (does either of `GC_push_all` or
2406 * `GC_push_selected` depending on the third argument) but the caller
2407 * guarantees the region belongs to the registered static roots.
2408 */
2409 GC_INNER void GC_push_conditional_static(void *b, void *t, GC_bool all);
2410 #endif
2411 2412 #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK)
2413 /*
2414 * Similar to `GC_push_conditional` but scans the whole region immediately.
2415 * `GC_mark_local` does not handle memory protection faults yet.
2416 * So, the static data regions are scanned immediately by `GC_push_roots`.
2417 */
2418 GC_INNER void GC_push_conditional_eager(void *bottom, void *top, GC_bool all);
2419 #endif
2420 2421 /*
2422 * In the multi-threaded case, we push part of the current thread stack
2423 * with `GC_push_all_eager` when we push the registers. This gets the
2424 * callee-save registers that may disappear. The remainder of the stacks
2425 * are scheduled for scanning in `(*GC_push_other_roots)()`, which is
2426 * thread-package-specific.
2427 */
2428 2429 /*
2430 * Push all or dirty roots. Call the mark routine (`GC_push_one` for
2431 * a single pointer, `GC_push_conditional` on groups of pointers) on every
2432 * top level accessible pointer. If not `all`, then arrange to push only
2433 * possibly altered values. `cold_gc_frame` is an address inside
2434 * a collector frame that remains valid until all marking is complete;
2435 * a `NULL` pointer indicates that it is OK to miss some register values.
2436 */
2437 GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
2438 2439 /*
2440 * Push system or application specific roots onto the mark stack.
2441 * In some environments (e.g. a multi-threaded one) this is predefined
2442 * to be nonzero. A client-supplied replacement should also call the
2443 * original function. Remains externally visible as used by some
2444 * well-known 3rd-party software (e.g., ECL) currently.
2445 */
2446 GC_API_PRIV GC_push_other_roots_proc GC_push_other_roots;
2447 2448 #ifdef THREADS
2449 GC_INNER void GC_push_thread_structures(void);
2450 #endif
2451 2452 /*
2453 * A pointer set to `GC_push_typed_structures_proc` lazily so that we can
2454 * avoid linking in the typed allocation support if the latter is unused.
2455 */
2456 GC_EXTERN void (*GC_push_typed_structures)(void);
2457 2458 typedef void (*GC_with_callee_saves_func)(ptr_t arg, void *context);
2459 2460 /*
2461 * Ensure that either registers are pushed, or callee-save registers are
2462 * somewhere on the stack, and then call `fn(arg, ctxt)`. `ctxt` is either
2463 * a pointer to a `ucontext_t` entity we generated, or `NULL`. Could be
2464 * called with or w/o the allocator lock held; could be called from a signal
2465 * handler as well.
2466 */
2467 GC_INNER void GC_with_callee_saves_pushed(GC_with_callee_saves_func fn,
2468 ptr_t arg);
2469 2470 #if defined(IA64) || defined(SPARC)
2471 /*
2472 * Cause all stacked registers to be saved in memory. Return a pointer to
2473 * the top of the corresponding memory stack.
2474 */
2475 ptr_t GC_save_regs_in_stack(void);
2476 #endif
2477 2478 #ifdef E2K
2479 # include <asm/e2k_syswork.h>
2480 # include <errno.h>
2481 # include <sys/syscall.h>
2482 2483 # if defined(CPPCHECK)
2484 /*
2485 * Workaround "Uninitialized bs_lo" and "obsolete alloca() called"
2486 * false positive (FP) warnings.
2487 */
2488 # define PS_ALLOCA_BUF(pbuf, sz) \
2489 (void)(GC_noop1_ptr(pbuf), *(pbuf) = (ptr_t)__builtin_alloca(sz))
2490 # else
2491 # define PS_ALLOCA_BUF(pbuf, sz) (void)(*(pbuf) = (ptr_t)alloca(sz))
2492 # endif
2493 2494 /*
2495 * Approximate size (in bytes) of the obtained procedure stack part
2496 * belonging to `syscall()` itself.
2497 */
2498 # define PS_SYSCALL_TAIL_BYTES 0x100
2499 2500 /*
2501 * Determine the current size of the whole procedure stack. The size
2502 * is valid only within the current function.
2503 */
2504 # define GET_PROCEDURE_STACK_SIZE_INNER(psz_ull) \
2505 do { \
2506 *(psz_ull) = 0; /*< might be redundant */ \
2507 if (syscall(__NR_access_hw_stacks, E2K_GET_PROCEDURE_STACK_SIZE, NULL, \
2508 NULL, 0, psz_ull) \
2509 == -1) \
2510 ABORT_ARG1("Cannot get size of procedure stack", ": errno= %d", \
2511 errno); \
2512 GC_ASSERT(*(psz_ull) > 0 && *(psz_ull) % sizeof(ptr_t) == 0); \
2513 } while (0)
2514 2515 # ifdef THREADS
2516 # define PS_COMPUTE_ADJUSTED_OFS(padj_ps_ofs, ps_ofs, ofs_sz_ull) \
2517 do { \
2518 if ((ofs_sz_ull) <= (ps_ofs) /* `&& ofs_sz_ull > 0` */) \
2519 ABORT_ARG2("Incorrect size of procedure stack", \
2520 ": ofs= %lu, size= %lu", (unsigned long)(ps_ofs), \
2521 (unsigned long)(ofs_sz_ull)); \
2522 *(padj_ps_ofs) = (ps_ofs) > (unsigned)PS_SYSCALL_TAIL_BYTES \
2523 ? (ps_ofs) - (unsigned)PS_SYSCALL_TAIL_BYTES \
2524 : 0; \
2525 } while (0)
2526 # else
2527 /* A simplified variant of the above assuming `ps_ofs` is a zero const. */
2528 # define PS_COMPUTE_ADJUSTED_OFS(padj_ps_ofs, ps_ofs, ofs_sz_ull) \
2529 do { \
2530 GC_STATIC_ASSERT((ps_ofs) == 0); \
2531 (void)(ofs_sz_ull); \
2532 *(padj_ps_ofs) = 0; \
2533 } while (0)
2534 # endif /* !THREADS */
2535 2536 /*
2537 * Copy procedure (register) stack to a stack-allocated buffer.
2538 * Usable from a signal handler. The buffer (`*pbuf`) is valid only
2539 * within the current function. `ps_ofs` designates the offset in the
2540 * procedure stack to copy the contents from. Note: this macro cannot
2541 * be changed to a function because `alloca()` and both `syscall()`
2542 * should be called in the context of the caller.
2543 */
2544 # define GET_PROCEDURE_STACK_LOCAL(ps_ofs, pbuf, psz) \
2545 do { \
2546 unsigned long long ofs_sz_ull; \
2547 size_t adj_ps_ofs; \
2548 \
2549 GET_PROCEDURE_STACK_SIZE_INNER(&ofs_sz_ull); \
2550 PS_COMPUTE_ADJUSTED_OFS(&adj_ps_ofs, ps_ofs, ofs_sz_ull); \
2551 *(psz) = (size_t)ofs_sz_ull - adj_ps_ofs; \
2552 /* Allocate buffer on the stack; cannot return `NULL`. */ \
2553 PS_ALLOCA_BUF(pbuf, *(psz)); \
2554 /* Copy the procedure stack at the given offset to the buffer. */ \
2555 for (;;) { \
2556 ofs_sz_ull = adj_ps_ofs; \
2557 if (syscall(__NR_access_hw_stacks, E2K_READ_PROCEDURE_STACK_EX, \
2558 &ofs_sz_ull, *(pbuf), *(psz), NULL) \
2559 != -1) \
2560 break; \
2561 if (errno != EAGAIN) \
2562 ABORT_ARG2("Cannot read procedure stack", ": sz= %lu, errno= %d", \
2563 (unsigned long)(*(psz)), errno); \
2564 } \
2565 } while (0)
2566 #endif /* E2K */
2567 2568 #if defined(E2K) && defined(USE_PTR_HWTAG)
2569 /* Load value and get tag of the target memory. */
2570 # if defined(__ptr64__)
2571 # define LOAD_TAGGED_VALUE(v, tag, p) \
2572 do { \
2573 ptr_t val; \
2574 __asm__ __volatile__("ldd, sm %[adr], 0x0, %[val]\n\t" \
2575 "gettagd %[val], %[tag]\n" \
2576 : [val] "=r"(val), [tag] "=r"(tag) \
2577 : [adr] "r"(p)); \
2578 v = val; \
2579 } while (0)
2580 # elif !defined(CPPCHECK)
2581 # error Unsupported -march for e2k target
2582 # endif
2583 2584 # define LOAD_PTR_OR_CONTINUE(v, p) \
2585 { \
2586 int tag LOCAL_VAR_INIT_OK; \
2587 LOAD_TAGGED_VALUE(v, tag, p); \
2588 if (tag != 0) \
2589 continue; \
2590 }
2591 #elif defined(CHERI_PURECAP)
2592 # define HAS_TAG_AND_PERM_LOAD(cap) \
2593 (cheri_tag_get(cap) != 0 && (cheri_perms_get(cap) & CHERI_PERM_LOAD) != 0)
2594 2595 # define LOAD_PTR_OR_CONTINUE(v, p) \
2596 { \
2597 word base_addr; \
2598 v = *(ptr_t *)(p); \
2599 if (!HAS_TAG_AND_PERM_LOAD(v)) \
2600 continue; \
2601 base_addr = cheri_base_get(v); \
2602 if (ADDR(v) < base_addr || ADDR(v) >= base_addr + cheri_length_get(v)) \
2603 continue; \
2604 }
2605 2606 # define CAPABILITY_COVERS_RANGE(cap, b_addr, e_addr) \
2607 (cheri_base_get(cap) <= (b_addr) \
2608 && cheri_base_get(cap) + cheri_length_get(cap) >= (e_addr))
2609 # define SPANNING_CAPABILITY(cap, b_addr, e_addr) \
2610 (cheri_tag_get(cap) && CAPABILITY_COVERS_RANGE(cap, b_addr, e_addr) \
2611 && (cheri_perms_get(cap) & (CHERI_PERM_LOAD | CHERI_PERM_LOAD_CAP)) \
2612 != 0)
2613 #else
2614 # define LOAD_PTR_OR_CONTINUE(v, p) (void)(v = *(ptr_t *)(p))
2615 #endif /* !CHERI_PURECAP */
2616 2617 #if defined(DARWIN) && defined(THREADS)
2618 /*
2619 * If `p` points to an object, mark it and push contents on the mark stack.
2620 * Pointer recognition test always accepts interior pointers, i.e. this is
2621 * appropriate for pointers found on the thread stack.
2622 */
2623 void GC_push_one(word p);
2624 #endif
2625 2626 /*
2627 * Mark and push (i.e. gray) a single object `p` onto the main mark stack.
2628 * Consider `p` to be valid if it is an interior pointer. The object `p`
2629 * has passed a preliminary pointer validity test, but we do not definitely
2630 * know whether it is valid. Mark bits are not atomically updated; thus
2631 * this must be the only thread setting them.
2632 */
2633 #if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
2634 GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source);
2635 #else
2636 GC_INNER void GC_mark_and_push_stack(ptr_t p);
2637 #endif
2638 2639 /* Is the block with the given header containing no pointers? */
2640 #define IS_PTRFREE(hhdr) (0 == (hhdr)->hb_descr)
2641 2642 /* Clear all mark bits in the header. */
2643 GC_INNER void GC_clear_hdr_marks(hdr *hhdr);
2644 2645 /* Set all mark bits in the header. Used for uncollectible blocks. */
2646 GC_INNER void GC_set_hdr_marks(hdr *hhdr);
2647 2648 /* Set all mark bits associated with a free list. */
2649 GC_INNER void GC_set_fl_marks(ptr_t);
2650 2651 #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
2652 /*
2653 * Check that all mark bits associated with a free list are set.
2654 * Abort if not.
2655 */
2656 void GC_check_fl_marks(void **);
2657 #endif
2658 2659 /*
2660 * Add [`b`,`e`) to the root set. Adding the same interval a second
2661 * time is a moderately fast no-op, and hence benign. We do not handle
2662 * different but overlapping intervals efficiently. (But we do handle
2663 * them correctly.) `tmp` specifies that the interval may be deleted
2664 * before re-registering dynamic libraries.
2665 */
2666 GC_INNER void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp);
2667 2668 #ifdef USE_PROC_FOR_LIBRARIES
2669 /*
2670 * Remove given range from every static root which intersects with the range.
2671 * `GC_remove_tmp_roots` is assumed to be called before this function is
2672 * called (repeatedly) by `GC_register_map_entries`.
2673 */
2674 GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e);
2675 #endif
2676 2677 /*
2678 * Inform the collector that a certain section of statically allocated
2679 * memory contains no pointers to garbage-collected memory.
2680 * The range boundaries should be properly aligned and valid.
2681 */
2682 GC_INNER void GC_exclude_static_roots_inner(ptr_t start, ptr_t finish);
2683 2684 #if defined(ANY_MSWIN) || defined(DYNAMIC_LOADING)
2685 /* Add dynamic library data sections to the root set. */
2686 GC_INNER void GC_register_dynamic_libraries(void);
2687 #endif
2688 2689 /*
2690 * Remove and re-register dynamic libraries if we are configured to do
2691 * that at each collection.
2692 */
2693 GC_INNER void GC_cond_register_dynamic_libraries(void);
2694 2695 /* Machine-dependent startup routines. */
2696 2697 /*
2698 * Get the cold end of the stack of the primordial thread. This is always
2699 * called from the main (primordial) thread.
2700 */
2701 GC_INNER ptr_t GC_get_main_stack_base(void);
2702 2703 #ifdef IA64
2704 /* Get the cold end of register stack. */
2705 GC_INNER ptr_t GC_get_register_stack_base(void);
2706 #endif
2707 2708 GC_INNER void GC_register_data_segments(void);
2709 2710 #ifdef THREADS
2711 /* Both are invoked from `GC_init()` only. */
2712 GC_INNER void GC_thr_init(void);
2713 2714 /*
2715 * Perform all initializations, including those that may require allocation,
2716 * e.g. initialize thread-local free lists if used. Called by `GC_init()`.
2717 */
2718 GC_INNER void GC_init_parallel(void);
2719 2720 # ifndef DONT_USE_ATEXIT
2721 GC_INNER GC_bool GC_is_main_thread(void);
2722 # endif
2723 #else
2724 # ifdef TRACE_BUF
2725 void GC_add_trace_entry(const char *caller_fn_name, ptr_t arg1, ptr_t arg2);
2726 # endif
2727 #endif /* !THREADS */
2728 2729 #ifdef NO_BLACK_LISTING
2730 # define GC_bl_init() (void)0
2731 /* Do not define `GC_bl_init_no_interiors()`. */
2732 # define GC_ADD_TO_BLACK_LIST_NORMAL(p, source) ((void)(p))
2733 # define GC_ADD_TO_BLACK_LIST_STACK(p, source) ((void)(p))
2734 # define GC_promote_black_lists() (void)0
2735 # define GC_unpromote_black_lists() (void)0
2736 #else
2737 2738 /*
2739 * If we need a block of `n` bytes, and we have a block of `n + BL_LIMIT`
2740 * bytes available, and `n` is greater than `BL_LIMIT`, but all possible
2741 * positions in it are black-listed, we just use it anyway (and print
2742 * a warning, if warnings are enabled). This risks subsequently leaking
2743 * the block due to a false reference. But not using the block risks
2744 * unreasonable immediate heap growth.
2745 */
2746 # define BL_LIMIT GC_black_list_spacing
2747 2748 /*
2749 * Average number of bytes between black-listed blocks. Approximate.
2750 * Counts only blocks that are "stack black-listed", i.e. that are
2751 * problematic in the interior of an object.
2752 */
2753 GC_EXTERN word GC_black_list_spacing;
2754 2755 /*
2756 * The interval between unsuppressed warnings about repeated allocation
2757 * of a very large block.
2758 */
2759 GC_EXTERN long GC_large_alloc_warn_interval;
2760 2761 /* Initialize the black listing mechanism. */
2762 GC_INNER void GC_bl_init(void);
2763 GC_INNER void GC_bl_init_no_interiors(void);
2764 2765 # ifdef PRINT_BLACK_LIST
2766 /*
2767 * Register bits as a possible future false reference from the heap
2768 * or static data. The argument `p` is not a valid pointer reference,
2769 * but it falls inside the plausible heap bounds.
2770 */
2771 GC_INNER void GC_add_to_black_list_normal(ptr_t p, ptr_t source);
2772 # define GC_ADD_TO_BLACK_LIST_NORMAL(p, source) \
2773 if (GC_all_interior_pointers) { \
2774 GC_add_to_black_list_stack(p, source); \
2775 } else \
2776 GC_add_to_black_list_normal(p, source)
2777 GC_INNER void GC_add_to_black_list_stack(ptr_t p, ptr_t source);
2778 # define GC_ADD_TO_BLACK_LIST_STACK(p, source) \
2779 GC_add_to_black_list_stack(p, source)
2780 # else
2781 GC_INNER void GC_add_to_black_list_normal(ptr_t p);
2782 # define GC_ADD_TO_BLACK_LIST_NORMAL(p, source) \
2783 if (GC_all_interior_pointers) { \
2784 GC_add_to_black_list_stack(p); \
2785 } else \
2786 GC_add_to_black_list_normal(p)
2787 GC_INNER void GC_add_to_black_list_stack(ptr_t p);
2788 # define GC_ADD_TO_BLACK_LIST_STACK(p, source) GC_add_to_black_list_stack(p)
2789 # endif /* PRINT_BLACK_LIST */
2790 2791 /*
2792 * Declare an end to a black listing phase. (I.e. signal the completion of
2793 * a collection.) Turn the incomplete black lists into new black lists, etc.
2794 */
2795 GC_INNER void GC_promote_black_lists(void);
2796 2797 /*
2798 * Approximately undo the effect of `GC_promote_black_lists()`.
2799 * This actually loses some information, but only in a reasonably safe way.
2800 */
2801 GC_INNER void GC_unpromote_black_lists(void);
2802 #endif
2803 2804 /*
2805 * The collector internal memory allocation for small objects.
2806 * Deallocation is not possible. May return `NULL`.
2807 */
2808 GC_INNER ptr_t GC_scratch_alloc(size_t bytes);
2809 2810 #ifdef GWW_VDB
2811 /* `GC_scratch_recycle_no_gww()` is not used. */
2812 #else
2813 # define GC_scratch_recycle_no_gww GC_scratch_recycle_inner
2814 #endif
2815 /* Reuse the memory region by the heap. */
2816 GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t sz);
2817 2818 #ifndef MARK_BIT_PER_OBJ
2819 /*
2820 * Add a heap block map for objects of a size in granules to `GC_obj_map`.
2821 * A size of zero is used for large objects. Returns `FALSE` on failure.
2822 */
2823 GC_INNER GC_bool GC_add_map_entry(size_t lg);
2824 #endif
2825 2826 /*
2827 * Same as `GC_register_displacement` but assuming the allocator lock
2828 * is already held.
2829 */
2830 GC_INNER void GC_register_displacement_inner(size_t offset);
2831 2832 /*
2833 * Allocate a new heap block for small objects of size `lg` (in granules)
2834 * and `kind`. Add all of the block's objects to the free list for objects
2835 * of that size. Set all mark bits if objects are uncollectible.
2836 * Will fail to do anything if out of memory.
2837 */
2838 GC_INNER void GC_new_hblk(size_t lg, int kind);
2839 2840 /*
2841 * Build a free list for objects of size `lg` (in granules) inside heap
2842 * block `h`. Clear objects inside `h` if `clear` argument is set.
2843 * Add `list` to the end of the free list we build. Return the new
2844 * free list. Normally called by `GC_new_hblk()`, but this could also
2845 * be called without the allocator lock, if we ensure that there is no
2846 * concurrent collection which might reclaim objects that we have not
2847 * yet allocated.
2848 */
2849 GC_INNER ptr_t GC_build_fl(struct hblk *h, ptr_t list, size_t lg,
2850 GC_bool clear);
2851 2852 /*
2853 * Allocate (and return pointer to) a heap block for objects of the
2854 * given size and alignment (in bytes), searching over the appropriate
2855 * free block lists; inform the marker that the found block is valid
2856 * for objects of the indicated size. Assumes (as implied by the argument
2857 * name) that `EXTRA_BYTES` value is already added to the size, if needed.
2858 * The client is responsible for clearing the block, if needed.
2859 * Note: we set `GC_obj_map` field in the header correctly; the caller
2860 * is responsible for building an object's free list in the block.
2861 */
2862 GC_INNER struct hblk *GC_allochblk(size_t lb_adjusted, int kind,
2863 unsigned flags, size_t align_m1);
2864 2865 /*
2866 * Deallocate (free) a heap block and mark it as invalid. Coalesce it
2867 * with its neighbors if possible. All mark words are assumed to be cleared.
2868 */
2869 GC_INNER void GC_freehblk(struct hblk *p);
2870 2871 /* Miscellaneous GC routines. */
2872 2873 /*
2874 * This explicitly increases the size of the heap. It is used internally,
2875 * but may also be invoked from `GC_expand_hp` by client. The argument is
2876 * in units of `HBLKSIZE`. (An argument of zero is treated as 1.)
2877 * Returns `FALSE` on failure.
2878 */
2879 GC_INNER GC_bool GC_expand_hp_inner(word n);
2880 2881 /*
2882 * Restore unmarked objects to free lists, or (if `abort_if_found` is `TRUE`)
2883 * report them. (I.e. perform `GC_reclaim_block()` on the entire heap,
2884 * after first clearing small-object free lists if we are not just looking
2885 * for leaks.) Sweeping of small object pages is largely deferred.
2886 */
2887 GC_INNER void GC_start_reclaim(GC_bool abort_if_found);
2888 2889 /*
2890 * Sweep blocks of the indicated object size (in granules) and kind
2891 * until either the appropriate nonempty free list is found, or there
2892 * are no more blocks to sweep.
2893 */
2894 GC_INNER void GC_continue_reclaim(size_t lg, int kind);
2895 2896 /*
2897 * Reclaim all small blocks waiting to be reclaimed. Abort and return
2898 * `FALSE` when/if `(*stop_func)()` returns `TRUE`. If this returns `TRUE`,
2899 * then it is safe to restart the world with incorrectly cleared mark bits.
2900 * If `ignore_old`, then reclaim only blocks that have been reclaimed
2901 * recently, and discard the rest. `stop_func` may be 0.
2902 */
2903 GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);
2904 2905 /*
2906 * Generic procedure to rebuild a free list in `hbp` with header `hhdr`,
2907 * with objects of size `sz` bytes. Add `list` to the end of the free list.
2908 * Add the number of reclaimed bytes to `*pcount`. Note: it could be called
2909 * directly from `GC_malloc_many`.
2910 */
2911 GC_INNER ptr_t GC_reclaim_generic(struct hblk *hbp, hdr *hhdr, size_t sz,
2912 GC_bool init, ptr_t list, word *pcount);
2913 2914 /*
2915 * Is given heap block completely unmarked (i.e. contains no marked objects)?
2916 * This does not require the block to be in physical memory.
2917 */
2918 GC_INNER GC_bool GC_block_empty(const hdr *hhdr);
2919 2920 /* Always returns 0 (`FALSE`). */
2921 GC_INNER int GC_CALLBACK GC_never_stop_func(void);
2922 2923 /*
2924 * Stop-the-world garbage collection. The caller must have acquired
2925 * the allocator lock. If `stop_func` is not `GC_never_stop_func`, then
2926 * abort if `stop_func` returns `TRUE`. Return `TRUE` if we successfully
2927 * completed the collection (otherwise the collection is aborted).
2928 */
2929 GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func);
2930 2931 #define GC_gcollect_inner() (void)GC_try_to_collect_inner(GC_never_stop_func)
2932 2933 #ifdef THREADS
2934 /*
2935 * We may currently be in thread creation or destruction. Only set to `TRUE`
2936 * while the allocator lock is held. When set, it is OK to run the garbage
2937 * collection from an unknown thread. Protected by the allocator lock.
2938 */
2939 GC_EXTERN GC_bool GC_in_thread_creation;
2940 #endif
2941 2942 /* Has `GC_init()` been run? */
2943 GC_EXTERN GC_bool GC_is_initialized;
2944 2945 /*
2946 * Do `n_blocks` units of a garbage collection work, if appropriate.
2947 * A unit is an amount appropriate for `HBLKSIZE` bytes of allocation.
2948 */
2949 GC_INNER void GC_collect_a_little_inner(size_t n_blocks);
2950 2951 GC_INNER void *GC_malloc_kind_aligned_global(size_t lb, int kind,
2952 size_t align_m1);
2953 2954 GC_INNER void *GC_generic_malloc_aligned(size_t lb, int kind, unsigned flags,
2955 size_t align_m1);
2956 2957 /*
2958 * Allocate an object of the given `kind` but assuming the allocator
2959 * lock is already held. Should not be used to directly allocate
2960 * objects requiring special handling on allocation. `flags` argument
2961 * should be 0 or `IGNORE_OFF_PAGE`; in the latter case the client
2962 * guarantees there will always be a pointer to the beginning (i.e.
2963 * within the first `hblk`) of the object while it is live.
2964 */
2965 GC_INNER void *GC_generic_malloc_inner(size_t lb, int kind, unsigned flags);
2966 2967 /*
2968 * Collect or expand heap in an attempt make the indicated number of
2969 * free blocks available. Should be called until the blocks are
2970 * available (setting `retry` value to `TRUE` unless this is the first
2971 * call in a loop) or until it fails by returning `FALSE`. The `flags`
2972 * argument should be `IGNORE_OFF_PAGE` or 0.
2973 */
2974 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, unsigned flags,
2975 GC_bool retry);
2976 2977 /*
2978 * Make the indicated object free list nonempty, and return its head (the
2979 * first object on the free list). The object must be removed from the free
2980 * list by the caller. The size is in granules.
2981 */
2982 GC_INNER ptr_t GC_allocobj(size_t lg, int kind);
2983 2984 #ifdef GC_ADD_CALLER
2985 /*
2986 * `GC_DBG_EXTRAS` is used by the collector debug API functions (unlike
2987 * `GC_EXTRAS` used by the debug API macros) thus `GC_RETURN_ADDR_PARENT`
2988 * (pointing to client caller) should be used if possible.
2989 */
2990 # ifdef GC_HAVE_RETURN_ADDR_PARENT
2991 # define GC_DBG_EXTRAS GC_RETURN_ADDR_PARENT, NULL, 0
2992 # else
2993 # define GC_DBG_EXTRAS GC_RETURN_ADDR, NULL, 0
2994 # endif
2995 #else
2996 # define GC_DBG_EXTRAS "unknown", 0
2997 #endif /* !GC_ADD_CALLER */
2998 2999 #ifdef GC_COLLECT_AT_MALLOC
3000 /*
3001 * Parameter to force collection at every `malloc` of size greater or
3002 * equal to the given value. This might be handy during debugging.
3003 * Note: this variable is visible outside for debugging purpose.
3004 */
3005 extern size_t GC_dbg_collect_at_malloc_min_lb;
3006 3007 # define GC_DBG_COLLECT_AT_MALLOC(lb) \
3008 (void)((lb) >= GC_dbg_collect_at_malloc_min_lb ? (GC_gcollect(), 0) : 0)
3009 #else
3010 # define GC_DBG_COLLECT_AT_MALLOC(lb) (void)0
3011 #endif /* !GC_COLLECT_AT_MALLOC */
3012 3013 /* Allocation routines that bypass the thread-local cache. */
3014 3015 #if defined(THREAD_LOCAL_ALLOC) && defined(GC_GCJ_SUPPORT)
3016 /*
3017 * Allocate an object, clear it, and store the pointer to the type
3018 * structure ("vtable" in `gcj`). This adds a byte at the end of the
3019 * object if `GC_malloc` would.
3020 */
3021 GC_INNER void *GC_core_gcj_malloc(size_t lb, const void *vtable_ptr,
3022 unsigned flags);
3023 #endif
3024 3025 GC_INNER void GC_init_headers(void);
3026 3027 /*
3028 * Install a header for block `h`. Return `NULL` on failure, or the
3029 * uninitialized header otherwise.
3030 */
3031 GC_INNER hdr *GC_install_header(struct hblk *h);
3032 3033 /*
3034 * Set up forwarding counts for block `h` of size `sz`. Return `FALSE`
3035 * on failure.
3036 */
3037 GC_INNER GC_bool GC_install_counts(struct hblk *h, size_t sz);
3038 3039 /* Remove the header for block `h`. */
3040 GC_INNER void GC_remove_header(struct hblk *h);
3041 3042 /* Remove forwarding counts for `h`. */
3043 GC_INNER void GC_remove_counts(struct hblk *h, size_t sz);
3044 3045 /* A non-macro variant of the header location routine. */
3046 GC_INNER hdr *GC_find_header(const void *h);
3047 3048 /*
3049 * Get `HBLKSIZE`-aligned heap memory chunk from the OS and add the
3050 * chunk to `GC_our_memory`. Return `NULL` if out of memory.
3051 */
3052 GC_INNER ptr_t GC_os_get_mem(size_t bytes);
3053 3054 #if defined(NO_FIND_LEAK) && defined(SHORT_DBG_HDRS)
3055 # define GC_print_all_errors() (void)0
3056 # define GC_debugging_started FALSE
3057 # define GC_check_heap() (void)0
3058 # define GC_print_all_smashed() (void)0
3059 #else
3060 3061 /*
3062 * Print smashed and leaked objects, if any. Clear the lists of such
3063 * objects. Called without the allocator lock held.
3064 */
3065 GC_INNER void GC_print_all_errors(void);
3066 3067 /* `GC_debug_malloc()` has been called, once at least. */
3068 GC_EXTERN GC_bool GC_debugging_started;
3069 3070 /*
3071 * Check that all objects in the heap with debugging info are intact.
3072 * Add any that are not to `GC_smashed` list.
3073 */
3074 GC_EXTERN void (*GC_check_heap)(void);
3075 3076 /* Print `GC_smashed` list if it is not empty. Then clear the list. */
3077 GC_EXTERN void (*GC_print_all_smashed)(void);
3078 #endif
3079 3080 /*
3081 * If possible, print (using `GC_err_printf()`) a more detailed
3082 * description (terminated with "\n") of the object referred to by `p`.
3083 */
3084 GC_EXTERN void (*GC_print_heap_obj)(ptr_t p);
3085 3086 GC_INNER void GC_default_print_heap_obj_proc(ptr_t p);
3087 3088 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
3089 /*
3090 * Print an address map of the process. The caller should hold the
3091 * allocator lock.
3092 */
3093 void GC_print_address_map(void);
3094 #endif
3095 3096 #ifdef NO_FIND_LEAK
3097 # define GC_find_leak_inner FALSE
3098 #else
3099 # define GC_find_leak_inner GC_find_leak
3100 # ifndef SHORT_DBG_HDRS
3101 /*
3102 * Do not immediately deallocate object on `free()` in the find-leak mode,
3103 * just mark it as freed (and deallocate it after collection).
3104 */
3105 GC_EXTERN GC_bool GC_findleak_delay_free;
3106 # endif
3107 #endif /* !NO_FIND_LEAK */
3108 3109 #if defined(NO_FIND_LEAK) && defined(SHORT_DBG_HDRS)
3110 # define get_have_errors() FALSE
3111 3112 #elif defined(AO_HAVE_store)
3113 GC_EXTERN volatile AO_t GC_have_errors;
3114 # define GC_SET_HAVE_ERRORS() AO_store(&GC_have_errors, (AO_t)TRUE)
3115 # define get_have_errors() \
3116 ((GC_bool)AO_load(&GC_have_errors)) /*< no barrier */
3117 3118 #else
3119 GC_EXTERN GC_bool GC_have_errors;
3120 # define GC_SET_HAVE_ERRORS() (void)(GC_have_errors = TRUE)
3121 3122 /*
3123 * We saw a smashed or leaked object. Call error printing routine
3124 * occasionally. It is OK to read it not acquiring the allocator lock.
3125 * Once set to `TRUE`, it is never cleared.
3126 */
3127 # define get_have_errors() GC_have_errors
3128 #endif /* !AO_HAVE_store */
3129 3130 #define VERBOSE 2
3131 #if !defined(NO_CLOCK) || !defined(SMALL_CONFIG)
3132 /*
3133 * Value of 1 generates basic collector log; `VERBOSE` generates additional
3134 * messages.
3135 */
3136 GC_EXTERN int GC_print_stats;
3137 #else /* SMALL_CONFIG */
3138 /*
3139 * Defined as a macro to aid the compiler to remove the relevant message
3140 * character strings from the executable (with a particular level of
3141 * optimizations).
3142 */
3143 # define GC_print_stats 0
3144 #endif
3145 3146 #ifdef KEEP_BACK_PTRS
3147 /* Number of random backtraces to generate for each collection. */
3148 GC_EXTERN long GC_backtraces;
3149 #endif
3150 3151 /*
3152 * A trivial (linear congruential) pseudo-random numbers generator,
3153 * safe for the concurrent usage.
3154 */
3155 #define GC_RAND_MAX ((int)(~0U >> 1))
3156 #if defined(AO_HAVE_store) && defined(THREAD_SANITIZER)
3157 # define GC_RAND_STATE_T volatile AO_t
3158 # define GC_RAND_NEXT(pseed) GC_rand_next(pseed)
3159 GC_INLINE int
3160 GC_rand_next(GC_RAND_STATE_T *pseed)
3161 {
3162 AO_t next = (AO_t)((AO_load(pseed) * (unsigned32)1103515245UL + 12345)
3163 & (unsigned32)((unsigned)GC_RAND_MAX));
3164 AO_store(pseed, next);
3165 return (int)next;
3166 }
3167 #else
3168 # define GC_RAND_STATE_T unsigned32
3169 # define GC_RAND_NEXT(pseed) /*< overflow and race are OK */ \
3170 (int)(*(pseed) = (*(pseed) * (unsigned32)1103515245UL + 12345) \
3171 & (unsigned32)((unsigned)GC_RAND_MAX))
3172 #endif
3173 3174 #ifdef MAKE_BACK_GRAPH
3175 GC_EXTERN GC_bool GC_print_back_height;
3176 void GC_print_back_graph_stats(void);
3177 #endif
3178 3179 #ifdef THREADS
3180 /*
3181 * Explicitly deallocate the object when we already hold the allocator lock.
3182 * Only used for internally allocated objects.
3183 */
3184 GC_INNER void GC_free_inner(void *p);
3185 #endif
3186 3187 #ifdef VALGRIND_TRACKING
3188 # define FREE_PROFILER_HOOK(p) GC_free_profiler_hook(p)
3189 #else
3190 # define FREE_PROFILER_HOOK(p) (void)(p)
3191 #endif
3192 3193 /*
3194 * Macros used for collector internal allocation. These assume the
3195 * allocator lock is held.
3196 */
3197 #ifdef DBG_HDRS_ALL
3198 3199 /*
3200 * An allocation function for internal use. Normally internally allocated
3201 * objects do not have debug information. But in this case, we need to make
3202 * sure that all objects have debug headers.
3203 */
3204 GC_INNER void *GC_debug_generic_malloc_inner(size_t lb, int kind,
3205 unsigned flags);
3206 3207 # define GC_INTERNAL_MALLOC(lb, k) GC_debug_generic_malloc_inner(lb, k, 0)
3208 # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(lb, k) \
3209 GC_debug_generic_malloc_inner(lb, k, IGNORE_OFF_PAGE)
3210 # ifdef THREADS
3211 /* Used internally; we assume it is called correctly. */
3212 GC_INNER void GC_debug_free_inner(void *p);
3213 3214 # define GC_INTERNAL_FREE GC_debug_free_inner
3215 # else
3216 # define GC_INTERNAL_FREE GC_debug_free
3217 # endif
3218 #else
3219 # define GC_INTERNAL_MALLOC(lb, k) GC_generic_malloc_inner(lb, k, 0)
3220 # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(lb, k) \
3221 GC_generic_malloc_inner(lb, k, IGNORE_OFF_PAGE)
3222 # ifdef THREADS
3223 # define GC_INTERNAL_FREE GC_free_inner
3224 # else
3225 # define GC_INTERNAL_FREE GC_free
3226 # endif
3227 #endif /* !DBG_HDRS_ALL */
3228 3229 /* Memory unmapping routines. */
3230 #ifdef USE_MUNMAP
3231 3232 /*
3233 * Unmap blocks that have not been recently touched. This is the only
3234 * way blocks are ever unmapped.
3235 */
3236 GC_INNER void GC_unmap_old(unsigned threshold);
3237 3238 /*
3239 * Merge all unmapped blocks that are adjacent to other free blocks.
3240 * This may involve remapping, since all blocks are either fully mapped
3241 * or fully unmapped. Returns `TRUE` if at least one block was merged.
3242 */
3243 GC_INNER GC_bool GC_merge_unmapped(void);
3244 3245 GC_INNER void GC_unmap(ptr_t start, size_t bytes);
3246 GC_INNER void GC_remap(ptr_t start, size_t bytes);
3247 3248 /*
3249 * Two adjacent blocks have already been unmapped and are about to be merged.
3250 * Unmap the whole block. This typically requires that we unmap a small
3251 * section in the middle that was not previously unmapped due to alignment
3252 * constraints.
3253 */
3254 GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
3255 size_t bytes2);
3256 #endif
3257 3258 #ifdef CAN_HANDLE_FORK
3259 /*
3260 * Fork-handling mode:
3261 * - 0 means no `fork` handling is requested (but client could anyway
3262 * call `fork()` provided it is surrounded with `GC_atfork_prepare`,
3263 * `GC_atfork_parent`, `GC_atfork_child` calls);
3264 * - (-1) means the collector tries to use `pthread_at_fork()` if it is
3265 * available (if it succeeds, then `GC_handle_fork` value is changed to
3266 * one), a portable client should nonetheless surround `fork()` with
3267 * `GC_atfork_prepare()` and the accompanying routines (for the case
3268 * of `pthread_at_fork()` failure or absence);
3269 * - 1 (or other values) means client fully relies on `pthread_at_fork`
3270 * (so if it is missing or failed, then `abort` occurs in `GC_init()`),
3271 * `GC_atfork_prepare` and the accompanying routines are no-op in such
3272 * a case.
3273 *
3274 * Note: the value is examined by `GC_thr_init`.
3275 */
3276 GC_EXTERN int GC_handle_fork;
3277 3278 # ifdef THREADS
3279 # if defined(SOLARIS) && !defined(_STRICT_STDC)
3280 /* Update `pthreads` id in the child process right after `fork`. */
3281 GC_INNER void GC_stackbase_info_update_after_fork(void);
3282 # else
3283 # define GC_stackbase_info_update_after_fork() (void)0
3284 # endif
3285 # endif
3286 #endif /* CAN_HANDLE_FORK */
3287 3288 #ifdef NO_MANUAL_VDB
3289 # define GC_manual_vdb FALSE
3290 # define GC_auto_incremental GC_incremental
3291 # define GC_dirty(p) (void)(p)
3292 # define REACHABLE_AFTER_DIRTY(p) (void)(p)
3293 #else
3294 /*
3295 * The incremental collection is in the manual VDB mode.
3296 * Assumes `GC_incremental` is `TRUE`. Should not be modified once
3297 * `GC_incremental` is set to `TRUE`.
3298 */
3299 GC_EXTERN GC_bool GC_manual_vdb;
3300 3301 # define GC_auto_incremental (GC_incremental && !GC_manual_vdb)
3302 3303 /*
3304 * Manually mark the page containing `p` as dirty. Logically, this
3305 * dirties the entire object. Does not require locking.
3306 * Exported and marked as `noinline` for the purpose of some clients that
3307 * need to patch the symbol when using write barrier validation.
3308 */
3309 GC_API_PATCHABLE void GC_dirty_inner(const void *p);
3310 3311 # define GC_dirty(p) (GC_manual_vdb ? GC_dirty_inner(p) : (void)0)
3312 # define REACHABLE_AFTER_DIRTY(p) GC_reachable_here(p)
3313 #endif /* !NO_MANUAL_VDB */
3314 3315 #ifdef GC_DISABLE_INCREMENTAL
3316 # define GC_incremental FALSE
3317 #else
3318 /*
3319 * Using incremental/generational collection. Assumes dirty bits are
3320 * being maintained.
3321 */
3322 GC_EXTERN GC_bool GC_incremental;
3323 3324 /* Virtual dirty bit (VDB) implementations; each one exports the following. */
3325 3326 /*
3327 * Initialize the virtual dirty bit implementation. Returns `TRUE` if
3328 * virtual dirty bits are maintained (otherwise it is OK to be called again
3329 * if the client calls `GC_enable_incremental()` once more).
3330 */
3331 GC_INNER GC_bool GC_dirty_init(void);
3332 3333 /*
3334 * Retrieve system dirty bits for the heap to a local buffer (unless
3335 * `output_unneeded`). The caller should set `output_unneeded` to indicate
3336 * that reading of the retrieved dirty bits is not planned till the next
3337 * retrieval. Restore the systems notion of which pages are dirty.
3338 * We assume that either the world is stopped or it is OK to lose dirty bits
3339 * while it is happening (`GC_enable_incremental()` is the caller and
3340 * `output_unneeded` is `TRUE` at least if the multi-threading support is on).
3341 */
3342 GC_INNER void GC_read_dirty(GC_bool output_unneeded);
3343 3344 /*
3345 * Is the `HBLKSIZE`-sized page at `h` marked dirty in the local buffer?
3346 * If the actual page size is different, this returns `TRUE` if any of
3347 * the pages overlapping `h` are dirty. This routine may err on the side
3348 * of labeling pages as dirty (and this implementation does).
3349 */
3350 GC_INNER GC_bool GC_page_was_dirty(struct hblk *h);
3351 3352 /*
3353 * Block `h` is about to be written or allocated shortly. Ensure that
3354 * all pages containing any part of the `nblocks` `hblk` entities starting
3355 * at `h` are no longer write-protected (by the virtual dirty bit
3356 * implementation). I.e., this is a call that:
3357 * - hints that [`h`, `h + nblocks`) is about to be written;
3358 * - guarantees that protection is removed;
3359 * - may speed up some virtual dirty bit implementations;
3360 * - may be essential if we need to ensure that pointer-free system
3361 * call buffers in the heap are not protected.
3362 */
3363 GC_INNER void GC_remove_protection(struct hblk *h, size_t nblocks,
3364 GC_bool is_ptrfree);
3365 3366 # if !defined(NO_VDB_FOR_STATIC_ROOTS) && !defined(PROC_VDB)
3367 /* Is VDB working for static roots? */
3368 GC_INNER GC_bool GC_is_vdb_for_static_roots(void);
3369 # endif
3370 3371 # ifdef CAN_HANDLE_FORK
3372 # if defined(PROC_VDB) || defined(SOFT_VDB) \
3373 || (defined(MPROTECT_VDB) && defined(DARWIN) && defined(THREADS))
3374 /*
3375 * Update pid-specific resources (like `/proc` file descriptors) needed
3376 * by the dirty bits implementation after `fork` in the child process.
3377 */
3378 GC_INNER void GC_dirty_update_child(void);
3379 # else
3380 # define GC_dirty_update_child() (void)0
3381 # endif
3382 # endif /* CAN_HANDLE_FORK */
3383 3384 # if defined(MPROTECT_VDB) && defined(DARWIN)
3385 EXTERN_C_END
3386 # include <pthread.h>
3387 EXTERN_C_BEGIN
3388 # ifdef THREADS
3389 GC_INNER int GC_inner_pthread_create(pthread_t *t,
3390 GC_PTHREAD_CREATE_CONST pthread_attr_t *a,
3391 void *(*fn)(void *), void *arg);
3392 # else
3393 # define GC_inner_pthread_create pthread_create
3394 # endif
3395 # endif
3396 #endif /* !GC_DISABLE_INCREMENTAL */
3397 3398 #if defined(COUNT_PROTECTED_REGIONS) && defined(MPROTECT_VDB)
3399 /*
3400 * Do actions on heap growth, if needed, to prevent hitting the OS kernel
3401 * limit on the VM map regions.
3402 */
3403 GC_INNER void GC_handle_protected_regions_limit(void);
3404 #else
3405 # define GC_handle_protected_regions_limit() (void)0
3406 #endif
3407 3408 /* Same as `GC_base` but accepts and returns a pointer to `const` object. */
3409 #define GC_base_C(p) ((const void *)GC_base(GC_CAST_AWAY_CONST_PVOID(p)))
3410 3411 /* Some debugging print routines. */
3412 void GC_print_block_list(void);
3413 void GC_print_hblkfreelist(void);
3414 void GC_print_heap_sects(void);
3415 void GC_print_static_roots(void);
3416 3417 #ifdef KEEP_BACK_PTRS
3418 /*
3419 * Store back pointer to `source` in `dest`, if that appears to be
3420 * possible. This is not completely safe, since we may mistakenly
3421 * conclude that `dest` has a debugging wrapper. But the error
3422 * probability is very small, and this should not be used in
3423 * production code. We assume that `dest` is the real base pointer.
3424 * `source` should usually be a pointer to the interior of an object.
3425 */
3426 GC_INNER void GC_store_back_pointer(ptr_t source, ptr_t dest);
3427 3428 GC_INNER void GC_marked_for_finalization(ptr_t dest);
3429 # define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
3430 # define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
3431 #else
3432 # define GC_STORE_BACK_PTR(source, dest) (void)(source)
3433 # define GC_MARKED_FOR_FINALIZATION(dest)
3434 #endif /* !KEEP_BACK_PTRS */
3435 3436 /* Make arguments appear live to compiler. */
3437 void GC_noop6(word, word, word, word, word, word);
3438 3439 #ifndef GC_ATTR_FORMAT_PRINTF
3440 # if GC_GNUC_PREREQ(3, 0)
3441 # define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked) \
3442 __attribute__((__format__(__printf__, spec_argnum, first_checked)))
3443 # else
3444 # define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked)
3445 # endif
3446 #endif
3447 3448 /* Logging and diagnostic output. */
3449 3450 /*
3451 * `GC_printf` is used typically on client explicit print requests.
3452 * A variant of `printf` that does not allocate, 1 KB total output length.
3453 * (It uses `sprintf()` internally; hopefully the latter does not allocate
3454 * memory for `long` arguments.) For all `GC_*_printf` routines,
3455 * it is recommended to put "\n" at the end of `format` string (for the
3456 * output atomicity).
3457 */
3458 GC_API_PRIV void GC_printf(const char *format, ...)
3459 GC_ATTR_FORMAT_PRINTF(1, 2);
3460 GC_API_PRIV void GC_err_printf(const char *format, ...)
3461 GC_ATTR_FORMAT_PRINTF(1, 2);
3462 3463 /*
3464 * The basic logging routine. Typically, it is called directly only inside
3465 * various `DEBUG_*` blocks.
3466 */
3467 GC_API_PRIV void GC_log_printf(const char *format, ...)
3468 GC_ATTR_FORMAT_PRINTF(1, 2);
3469 3470 #ifndef GC_ANDROID_LOG
3471 # define GC_PRINT_STATS_FLAG (GC_print_stats != 0)
3472 # define GC_INFOLOG_PRINTF GC_COND_LOG_PRINTF
3473 /*
3474 * The "verbose" logging routine which is called only if `GC_print_stats`
3475 * is `VERBOSE`.
3476 */
3477 # define GC_verbose_log_printf GC_log_printf
3478 #else
3479 extern GC_bool GC_quiet;
3480 # define GC_PRINT_STATS_FLAG (!GC_quiet)
3481 /* `INFO`/`DBG` loggers are enabled even if `GC_print_stats` is off. */
3482 # ifndef GC_INFOLOG_PRINTF
3483 # define GC_INFOLOG_PRINTF \
3484 if (GC_quiet) { \
3485 } else \
3486 GC_info_log_printf
3487 # endif
3488 GC_INNER void GC_info_log_printf(const char *format, ...)
3489 GC_ATTR_FORMAT_PRINTF(1, 2);
3490 GC_INNER void GC_verbose_log_printf(const char *format, ...)
3491 GC_ATTR_FORMAT_PRINTF(1, 2);
3492 #endif /* GC_ANDROID_LOG */
3493 3494 #if defined(SMALL_CONFIG) || defined(GC_ANDROID_LOG)
3495 # define GC_ERRINFO_PRINTF GC_INFOLOG_PRINTF
3496 #else
3497 # define GC_ERRINFO_PRINTF GC_log_printf
3498 #endif
3499 3500 /*
3501 * Convenient wrapper macros over `GC_log_printf()` and
3502 * `GC_verbose_log_printf()`.
3503 */
3504 #define GC_COND_LOG_PRINTF \
3505 if (LIKELY(!GC_print_stats)) { \
3506 } else \
3507 GC_log_printf
3508 #define GC_VERBOSE_LOG_PRINTF \
3509 if (LIKELY(GC_print_stats != VERBOSE)) { \
3510 } else \
3511 GC_verbose_log_printf
3512 #ifndef GC_DBGLOG_PRINTF
3513 # define GC_DBGLOG_PRINTF \
3514 if (!GC_PRINT_STATS_FLAG) { \
3515 } else \
3516 GC_log_printf
3517 #endif
3518 3519 /* Write `s` to `stderr`, but do not buffer, do not add newlines, do not... */
3520 void GC_err_puts(const char *s);
3521 3522 /*
3523 * A handy macro for logging size values (of `word` type) in KiB, rounding
3524 * to nearest value.
3525 */
3526 #define TO_KiB_UL(v) ((unsigned long)(((v) + ((1 << 9) - 1)) >> 10))
3527 3528 /*
3529 * How many consecutive collection/expansion failures?
3530 * Reset by `GC_allochblk()`.
3531 */
3532 GC_EXTERN unsigned GC_fail_count;
3533 3534 /*
3535 * Number of bytes of memory reclaimed minus the number of bytes originally
3536 * on free lists that we had to drop. Protected by the allocator lock.
3537 */
3538 GC_EXTERN GC_signed_word GC_bytes_found;
3539 3540 #ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
3541 /*
3542 * Number of bytes reclaimed before this collection cycle; used for
3543 * statistics only.
3544 */
3545 GC_EXTERN word GC_reclaimed_bytes_before_gc;
3546 #endif
3547 3548 #ifdef USE_MUNMAP
3549 GC_EXTERN unsigned GC_unmap_threshold; /*< defined in `alloc.c` file */
3550 3551 /*
3552 * Force memory unmapping on every collection. Has no effect on
3553 * implicitly-initiated collections.
3554 */
3555 GC_EXTERN GC_bool GC_force_unmap_on_gcollect;
3556 #endif
3557 3558 #ifdef MSWIN32
3559 GC_EXTERN GC_bool GC_no_win32_dlls; /*< defined in `os_dep.c` file */
3560 3561 /* Is this a Windows NT derivative (i.e. NT, Win2K, XP or later)? */
3562 GC_EXTERN GC_bool GC_wnt;
3563 #endif
3564 3565 #ifdef THREADS
3566 # if (defined(MSWIN32) && !defined(CONSOLE_LOG)) || defined(MSWINCE)
3567 GC_EXTERN CRITICAL_SECTION GC_write_cs;
3568 # ifdef GC_ASSERTIONS
3569 /*
3570 * Set to `TRUE` only if `GC_stop_world()` has acquired `GC_write_cs`.
3571 * Protected by `GC_write_cs`.
3572 */
3573 GC_EXTERN GC_bool GC_write_disabled;
3574 # endif
3575 # endif /* MSWIN32 || MSWINCE */
3576 # ifdef NEED_FAULT_HANDLER_LOCK
3577 /*
3578 * Acquire the spin lock we use to update dirty bits. Threads should
3579 * not get stopped holding it. But we may acquire and release it during
3580 * `GC_remove_protection()` call.
3581 */
3582 # define GC_acquire_dirty_lock() \
3583 do { /* Empty. */ \
3584 } while (AO_test_and_set_acquire(&GC_fault_handler_lock) == AO_TS_SET)
3585 # define GC_release_dirty_lock() AO_CLEAR(&GC_fault_handler_lock)
3586 # else
3587 # define GC_acquire_dirty_lock() (void)0
3588 # define GC_release_dirty_lock() (void)0
3589 # endif
3590 # ifdef MSWINCE
3591 GC_EXTERN GC_bool GC_dont_query_stack_min;
3592 # endif
3593 #elif defined(IA64)
3594 /* Value returned from register flushing routine (`ar.bsp`). */
3595 GC_EXTERN ptr_t GC_save_regs_ret_val;
3596 #endif /* !THREADS */
3597 3598 #ifdef THREAD_LOCAL_ALLOC
3599 GC_EXTERN GC_bool GC_world_stopped; /*< defined in `alloc.c` file */
3600 3601 /*
3602 * We must explicitly mark `ptrfree` and `gcj` free lists, since the
3603 * free list links would not otherwise be found. We also set them in
3604 * the normal free lists, since that involves touching less memory than
3605 * if we scanned them normally.
3606 */
3607 GC_INNER void GC_mark_thread_local_free_lists(void);
3608 #endif
3609 3610 #if defined(GLIBC_2_19_TSX_BUG) && defined(GC_PTHREADS_PARAMARK)
3611 /* Parse string like `<major>[.<minor>[<tail>]]` and return `major` value. */
3612 GC_INNER int GC_parse_version(int *pminor, const char *pverstr);
3613 #endif
3614 3615 #if defined(MPROTECT_VDB) && defined(GWW_VDB)
3616 /*
3617 * Returns `TRUE` if `GetWriteWatch()` is available. May be called
3618 * repeatedly. May be called with or without the allocator lock held.
3619 */
3620 GC_INNER GC_bool GC_gww_dirty_init(void);
3621 #endif
3622 3623 #if defined(CHECKSUMS) || defined(PROC_VDB)
3624 /* Could any valid GC heap pointer ever have been written to this page? */
3625 GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h);
3626 #endif
3627 3628 #ifdef CHECKSUMS
3629 # ifdef MPROTECT_VDB
3630 void GC_record_fault(struct hblk *h);
3631 # endif
3632 void GC_check_dirty(void);
3633 #endif
3634 3635 GC_INNER void GC_setpagesize(void);
3636 3637 GC_INNER void GC_initialize_offsets(void);
3638 3639 #if defined(REDIR_MALLOC_AND_LINUXTHREADS) \
3640 && !defined(REDIRECT_MALLOC_IN_HEADER)
3641 GC_INNER void GC_init_lib_bounds(void);
3642 #else
3643 # define GC_init_lib_bounds() (void)0
3644 #endif
3645 3646 #ifdef REDIR_MALLOC_AND_LINUXTHREADS
3647 /*
3648 * Find the text (code) mapping for the library whose name, after
3649 * stripping the directory part, starts with `nm`.
3650 */
3651 GC_INNER GC_bool GC_text_mapping(const char *nm, ptr_t *startp, ptr_t *endp);
3652 #endif
3653 3654 #if defined(USE_WINALLOC) && !defined(REDIRECT_MALLOC)
3655 GC_INNER void GC_add_current_malloc_heap(void);
3656 #endif
3657 3658 #ifdef MAKE_BACK_GRAPH
3659 /*
3660 * Rebuild the representation of the backward reachability graph.
3661 * Does not examine mark bits. Could be called before collection.
3662 */
3663 GC_INNER void GC_build_back_graph(void);
3664 3665 GC_INNER void GC_traverse_back_graph(void);
3666 #endif
3667 3668 #ifdef MSWIN32
3669 GC_INNER void GC_init_win32(void);
3670 #endif
3671 3672 #ifndef ANY_MSWIN
3673 /*
3674 * Is a particular static root (with the given start) registered?
3675 * If so, then return a pointer to it, else `NULL`. The type is a lie,
3676 * since the real type does not make sense here, and we only test for `NULL`.
3677 */
3678 GC_INNER void *GC_roots_present(ptr_t);
3679 #endif
3680 3681 #if defined(GC_WIN32_THREADS)
3682 /* Same as `GC_push_one` but for a sequence of registers. */
3683 GC_INNER void GC_push_many_regs(const word *regs, unsigned count);
3684 3685 /*
3686 * Find stack with the lowest address which overlaps the interval
3687 * [`start`, `limit`). Return stack bounds in `*plo` and `*phi`.
3688 * If no such stack is found, both `*phi` and `*plo` will be set to an
3689 * address higher than `limit`.
3690 */
3691 GC_INNER void GC_get_next_stack(ptr_t start, ptr_t limit, ptr_t *plo,
3692 ptr_t *phi);
3693 3694 # if defined(MPROTECT_VDB) && !defined(CYGWIN32)
3695 GC_INNER void GC_set_write_fault_handler(void);
3696 # endif
3697 # if defined(WRAP_MARK_SOME) && !defined(GC_PTHREADS)
3698 /*
3699 * Did we invalidate mark phase with an unexpected thread start?
3700 * Return `TRUE` if a thread was attached since we last asked or since
3701 * `GC_attached_thread` was explicitly reset.
3702 */
3703 GC_INNER GC_bool GC_started_thread_while_stopped(void);
3704 # endif
3705 #endif /* GC_WIN32_THREADS */
3706 3707 #if defined(MPROTECT_VDB) && defined(DARWIN) && defined(THREADS)
3708 GC_INNER void GC_mprotect_stop(void);
3709 GC_INNER void GC_mprotect_resume(void);
3710 # ifndef GC_NO_THREADS_DISCOVERY
3711 GC_INNER void GC_darwin_register_self_mach_handler(void);
3712 # endif
3713 #endif
3714 3715 #ifndef NOT_GCBUILD
3716 /*
3717 * Iterate over forwarding addresses, if any, to get the beginning of
3718 * the block and its header. Assumes `*phhdr` is non-`NULL` on entry,
3719 * and guarantees `*phhdr` is non-`NULL` on return.
3720 */
3721 GC_INLINE struct hblk *
3722 GC_find_starting_hblk(struct hblk *h, hdr **phhdr)
3723 {
3724 hdr *hhdr = *phhdr;
3725 3726 GC_ASSERT(HDR(h) == hhdr);
3727 for (; IS_FORWARDING_ADDR_OR_NIL(hhdr); hhdr = HDR(h)) {
3728 GC_ASSERT(hhdr != NULL);
3729 h = FORWARDED_ADDR(h, hhdr);
3730 }
3731 *phhdr = hhdr;
3732 return h;
3733 }
3734 #endif /* !NOT_GCBUILD */
3735 3736 #if (defined(PARALLEL_MARK) \
3737 && !defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG) \
3738 && (defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID) \
3739 || defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) \
3740 || defined(HAVE_PTHREAD_SET_NAME_NP))) \
3741 || (defined(DYNAMIC_LOADING) \
3742 && ((defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX)) \
3743 || defined(DARWIN) || defined(IRIX5))) \
3744 || defined(PROC_VDB) || defined(SOFT_VDB)
3745 /*
3746 * A function to convert a long integer value `lv` to a string adding
3747 * the `prefix` and optional `suffix`. The resulting string is put to
3748 * `buf` of the designated size (`buf_sz`). Guaranteed to append
3749 * a trailing "\0" and not to exceed the buffer size. (Note that it is
3750 * recommended to reserve at least 20 characters for the number part of
3751 * the string in `buf` to avoid a compiler warning about potential
3752 * number truncation.)
3753 */
3754 # ifndef GC_DISABLE_SNPRINTF
3755 # define GC_snprintf_s_ld_s(buf, buf_sz, prefix, lv, suffix) \
3756 (void)(snprintf(buf, buf_sz, "%s%ld%s", prefix, lv, suffix), \
3757 (buf)[(buf_sz) - (size_t)1] = '\0')
3758 # else
3759 # define NEED_SNPRINTF_SLDS
3760 GC_INNER void GC_snprintf_s_ld_s(char *buf, size_t buf_sz, const char *prefix,
3761 long lv, const char *suffix);
3762 # endif
3763 #endif
3764 3765 #ifdef THREADS
3766 # ifndef GC_NO_FINALIZATION
3767 /* Called by `GC_finalize()` (in case of an allocation failure observed). */
3768 GC_INNER void GC_reset_finalizer_nested(void);
3769 3770 /*
3771 * Check and update the thread-local level of finalizers recursion.
3772 * Returns `NULL` if `GC_invoke_finalizers()` should not be called by
3773 * the collector (to minimize the risk of a deep finalizers recursion),
3774 * otherwise returns a pointer to the thread-local `finalizer_nested`.
3775 * Called by `GC_notify_or_invoke_finalizers()` only.
3776 */
3777 GC_INNER unsigned char *GC_check_finalizer_nested(void);
3778 # endif
3779 3780 GC_INNER void GC_do_blocking_inner(ptr_t data, void *context);
3781 3782 /*
3783 * Should do exactly the right thing if the world is stopped; should
3784 * not fail if it is not stopped.
3785 */
3786 GC_INNER void GC_push_all_stacks(void);
3787 3788 # ifdef USE_PROC_FOR_LIBRARIES
3789 GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi);
3790 # endif
3791 # if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \
3792 && defined(IA64)
3793 /*
3794 * Find the largest stack base smaller than `bound`. May be used to find
3795 * the boundary between a register stack and adjacent immediately preceding
3796 * memory stack.
3797 */
3798 GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound);
3799 # endif
3800 #endif /* THREADS */
3801 3802 #ifdef DYNAMIC_LOADING
3803 /* Do we need to separately register the main static data segment? */
3804 GC_INNER GC_bool GC_register_main_static_data(void);
3805 3806 # ifdef DARWIN
3807 GC_INNER void GC_init_dyld(void);
3808 # endif
3809 #endif /* DYNAMIC_LOADING */
3810 3811 #ifdef SEARCH_FOR_DATA_START
3812 GC_INNER void GC_init_linux_data_start(void);
3813 #endif
3814 3815 #ifdef NEED_PROC_MAPS
3816 # if defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES) \
3817 || defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) \
3818 || (defined(CHECK_SOFT_VDB) && defined(MPROTECT_VDB)) \
3819 || defined(REDIR_MALLOC_AND_LINUXTHREADS)
3820 /*
3821 * Assign various fields of the first line in `maps_ptr` to `*p_start`,
3822 * `*p_end`, `*p_prot`, `*p_maj_dev` and `*p_mapping_name`.
3823 * `p_mapping_name` may be `NULL`. `*p_prot` and `*p_mapping_name` are
3824 * assigned pointers into the original buffer.
3825 */
3826 GC_INNER const char *GC_parse_map_entry(const char *maps_ptr, ptr_t *p_start,
3827 ptr_t *p_end, const char **p_prot,
3828 unsigned *p_maj_dev,
3829 const char **p_mapping_name);
3830 # endif
3831 # if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) \
3832 || (defined(CHECK_SOFT_VDB) && defined(MPROTECT_VDB))
3833 /*
3834 * Try to read the backing store base from `/proc/self/maps` file.
3835 * Return the bounds of the writable mapping with a zero major device,
3836 * which includes the address passed as data. Return `FALSE` if there
3837 * is no such mapping.
3838 */
3839 GC_INNER GC_bool GC_enclosing_writable_mapping(ptr_t addr, ptr_t *startp,
3840 ptr_t *endp);
3841 # endif
3842 3843 /*
3844 * Copy the content of `/proc/self/maps` file to a buffer in our
3845 * address space. Return the address of the buffer.
3846 */
3847 GC_INNER const char *GC_get_maps(void);
3848 #endif /* NEED_PROC_MAPS */
3849 3850 #ifdef GC_ASSERTIONS
3851 /* Should return the same value as `GC_large_free_bytes`. */
3852 GC_INNER word GC_compute_large_free_bytes(void);
3853 3854 /* Should return the same value as `GC_root_size`. */
3855 GC_INNER word GC_compute_root_size(void);
3856 #endif
3857 3858 /* Check a compile time assertion at compile time. */
3859 #if defined(_MSC_VER) && (_MSC_VER >= 1700)
3860 # define GC_STATIC_ASSERT(e) static_assert(e, "static assertion failed: " #e)
3861 #elif defined(static_assert) && !defined(CPPCHECK) \
3862 && (__STDC_VERSION__ >= 201112L)
3863 # define GC_STATIC_ASSERT(e) \
3864 do { /* placed in `do`-`while` for proper formatting by clang-format */ \
3865 static_assert(e, #e); \
3866 } while (0)
3867 #elif defined(mips) && !defined(__GNUC__) && !defined(CPPCHECK)
3868 /*
3869 * DOB: MIPSPro C gets an internal error taking the `sizeof` an array type.
3870 * This code works correctly (ugliness is to avoid "unused var" warnings).
3871 */
3872 # define GC_STATIC_ASSERT(e) \
3873 do { \
3874 if (0) { \
3875 char j[(e) ? 1 : -1]; \
3876 j[0] = '\0'; \
3877 j[0] = j[0]; \
3878 } \
3879 } while (0)
3880 #else
3881 /* The error message for failure is a bit baroque, but... */
3882 # define GC_STATIC_ASSERT(e) (void)sizeof(char[(e) ? 1 : -1])
3883 #endif
3884 3885 /*
3886 * Runtime check for an argument declared as non-`NULL` is actually
3887 * not `NULL`.
3888 */
3889 #if GC_GNUC_PREREQ(4, 0)
3890 /* Workaround tautological-pointer-compare Clang warning. */
3891 # define NONNULL_ARG_NOT_NULL(arg) \
3892 (*CAST_THRU_UINTPTR(volatile void **, &(arg)) != NULL)
3893 #else
3894 # define NONNULL_ARG_NOT_NULL(arg) ((arg) != NULL)
3895 #endif
3896 3897 #define COND_DUMP_CHECKS \
3898 do { \
3899 GC_ASSERT(I_HOLD_LOCK()); \
3900 GC_ASSERT(GC_compute_large_free_bytes() == GC_large_free_bytes); \
3901 GC_ASSERT(GC_compute_root_size() == GC_root_size); \
3902 } while (0)
3903 3904 #ifndef NO_DEBUGGING
3905 /* A flag to generate regular debugging dumps. */
3906 GC_EXTERN GC_bool GC_dump_regularly;
3907 # define COND_DUMP \
3908 if (UNLIKELY(GC_dump_regularly)) { \
3909 GC_dump_named(NULL); \
3910 } else \
3911 COND_DUMP_CHECKS
3912 #else
3913 # define COND_DUMP COND_DUMP_CHECKS
3914 #endif
3915 3916 /*
3917 * We need additional synchronization facilities from the thread support.
3918 * We believe these are less performance critical than the allocator lock;
3919 * standard `pthreads`-based implementations should be sufficient.
3920 */
3921 #ifdef PARALLEL_MARK
3922 3923 /*
3924 * Number of mark threads we would like to have excluding the initiating
3925 * thread.
3926 */
3927 # define GC_markers_m1 GC_parallel
3928 3929 /* A flag to temporarily avoid parallel marking. */
3930 GC_EXTERN GC_bool GC_parallel_mark_disabled;
3931 3932 /*
3933 * The routines to deal with the mark lock and condition variables.
3934 * If the allocator lock is also acquired, it must be done first.
3935 * The mark lock is used to both protect some variables used by the
3936 * parallel marker, and to protect `GC_fl_builder_count`.
3937 * `GC_notify_all_marker()` is called when the state of the parallel marker
3938 * changes in some significant way (see `gc_mark.h` file for details).
3939 * The latter set of events includes incrementing `GC_mark_no`.
3940 * `GC_notify_all_builder()` is called when `GC_fl_builder_count` reaches
3941 * zero.
3942 */
3943 3944 /*
3945 * Wait all markers to finish initialization (i.e. store `marker_sp`,
3946 * `marker_bsp`, `marker_mach_threads`, `GC_marker_Id`).
3947 */
3948 GC_INNER void GC_wait_for_markers_init(void);
3949 3950 GC_INNER void GC_acquire_mark_lock(void);
3951 GC_INNER void GC_release_mark_lock(void);
3952 GC_INNER void GC_notify_all_builder(void);
3953 GC_INNER void GC_wait_for_reclaim(void);
3954 3955 /*
3956 * Number of threads currently building free lists without holding
3957 * the allocator lock. It is not safe to collect if this is nonzero.
3958 * Also, together with the mark lock, it is used as a semaphore during
3959 * marker threads startup. Protected by the mark lock.
3960 */
3961 GC_EXTERN GC_signed_word GC_fl_builder_count;
3962 3963 GC_INNER void GC_notify_all_marker(void);
3964 GC_INNER void GC_wait_marker(void);
3965 3966 GC_EXTERN word GC_mark_no; /*< protected by the mark lock */
3967 3968 /*
3969 * Try to help out parallel marker, if it is running, for mark cycle
3970 * `my_mark_no`. Returns if the mark cycle finishes or was already
3971 * done, or there was nothing to do for some other reason. We hold the
3972 * mark lock only, the initiating thread holds the allocator lock.
3973 */
3974 GC_INNER void GC_help_marker(word my_mark_no);
3975 3976 GC_INNER void GC_start_mark_threads_inner(void);
3977 3978 # define INCR_MARKS(hhdr) \
3979 AO_store(&(hhdr)->hb_n_marks, AO_load(&(hhdr)->hb_n_marks) + 1)
3980 #else
3981 # define INCR_MARKS(hhdr) (void)(++(hhdr)->hb_n_marks)
3982 #endif /* !PARALLEL_MARK */
3983 3984 #if defined(SIGNAL_BASED_STOP_WORLD) && !defined(SIG_SUSPEND)
3985 /*
3986 * We define the thread suspension signal here, so that we can refer
3987 * to it in the virtual dirty bit (VDB) implementation, if necessary.
3988 * Ideally we would allocate a (real-time?) signal using the standard
3989 * mechanism. Unfortunately, there is no such one. (There is one in
3990 * Linux `glibc`, but it is not exported.) Thus we continue to use
3991 * the same hard-coded signals we have always used.
3992 */
3993 # ifdef THREAD_SANITIZER
3994 /*
3995 * Unfortunately, use of an asynchronous signal to suspend threads leads to
3996 * the situation when the signal is not delivered (it is stored to
3997 * `pending_signals` in TSan runtime actually) while the destination thread
3998 * is blocked in `pthread_mutex_lock()`. Thus, we use some synchronous one
3999 * instead (which is again unlikely to be used by clients directly).
4000 */
4001 # define SIG_SUSPEND SIGSYS
4002 # elif (defined(DGUX) || defined(LINUX)) && !defined(GC_USESIGRT_SIGNALS)
4003 # if defined(SPARC) && !defined(SIGPWR)
4004 /*
4005 * Linux/SPARC does not properly define `SIGPWR` in platform `signal.h` file.
4006 * It is aliased to `SIGLOST` in platform `asm/signal.h` file, though.
4007 */
4008 # define SIG_SUSPEND SIGLOST
4009 # else
4010 /* LinuxThreads itself uses `SIGUSR1` and `SIGUSR2`. */
4011 # define SIG_SUSPEND SIGPWR
4012 # endif
4013 # elif defined(FREEBSD) && defined(__GLIBC__) && !defined(GC_USESIGRT_SIGNALS)
4014 # define SIG_SUSPEND (32 + 6)
4015 # elif (defined(FREEBSD) || defined(HURD) || defined(RTEMS)) \
4016 && !defined(GC_USESIGRT_SIGNALS)
4017 # define SIG_SUSPEND SIGUSR1
4018 /* `SIGTSTP` and `SIGCONT` could be used alternatively on FreeBSD. */
4019 # elif (defined(OPENBSD) && !defined(GC_USESIGRT_SIGNALS)) \
4020 || defined(SERENITY)
4021 # define SIG_SUSPEND SIGXFSZ
4022 # elif defined(_SIGRTMIN) && !defined(CPPCHECK)
4023 # define SIG_SUSPEND _SIGRTMIN + 6
4024 # else
4025 # define SIG_SUSPEND SIGRTMIN + 6
4026 # endif
4027 #endif /* GC_PTHREADS && !SIG_SUSPEND */
4028 4029 #if defined(GC_PTHREADS) && !defined(GC_SEM_INIT_PSHARED)
4030 # define GC_SEM_INIT_PSHARED 0
4031 #endif
4032 4033 /*
4034 * Some macros for `setjmp()` working across signal handlers, where
4035 * possible.
4036 */
4037 #if (defined(UNIX_LIKE) || (defined(NEED_FIND_LIMIT) && defined(CYGWIN32))) \
4038 && !defined(GC_NO_SIGSETJMP)
4039 # if defined(SUNOS5SIGS) && !defined(FREEBSD) && !defined(LINUX)
4040 EXTERN_C_END
4041 # include <sys/siginfo.h>
4042 EXTERN_C_BEGIN
4043 # endif
4044 /*
4045 * Define `SETJMP()` and friends to be the variant restoring the signal
4046 * mask.
4047 */
4048 # define SETJMP(env) sigsetjmp(env, 1)
4049 # define LONGJMP(env, val) siglongjmp(env, val)
4050 # define JMP_BUF sigjmp_buf
4051 #else
4052 # ifdef ECOS
4053 # define SETJMP(env) hal_setjmp(env)
4054 # else
4055 # define SETJMP(env) setjmp(env)
4056 # endif
4057 # define LONGJMP(env, val) longjmp(env, val)
4058 # define JMP_BUF jmp_buf
4059 #endif /* !UNIX_LIKE || GC_NO_SIGSETJMP */
4060 4061 #ifdef DATASTART_USES_XGETDATASTART
4062 # ifdef FREEBSD
4063 EXTERN_C_END
4064 # include <machine/trap.h>
4065 EXTERN_C_BEGIN
4066 # endif
4067 GC_INNER ptr_t GC_SysVGetDataStart(size_t, ptr_t);
4068 #endif /* DATASTART_USES_XGETDATASTART */
4069 4070 #if defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS) \
4071 || defined(NEED_FIND_LIMIT) || defined(SEARCH_FOR_DATA_START)
4072 # if (defined(HOST_ANDROID) || defined(__ANDROID__)) \
4073 && defined(IGNORE_DYNAMIC_LOADING)
4074 /* Declared as public one in `gc.h` file. */
4075 # else
4076 void *GC_find_limit(void *p, int up);
4077 # endif
4078 #endif
4079 4080 #if defined(NEED_FIND_LIMIT) \
4081 || (defined(UNIX_LIKE) && !defined(NO_DEBUGGING)) \
4082 || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)) \
4083 || (defined(WRAP_MARK_SOME) && defined(NO_SEH_AVAILABLE))
4084 typedef void (*GC_fault_handler_t)(int);
4085 GC_INNER void GC_set_and_save_fault_handler(GC_fault_handler_t);
4086 #endif
4087 4088 #if defined(NEED_FIND_LIMIT) \
4089 || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)) \
4090 || (defined(WRAP_MARK_SOME) && defined(NO_SEH_AVAILABLE))
4091 GC_EXTERN JMP_BUF GC_jmp_buf;
4092 4093 /*
4094 * Set up a handler for address faults which will `longjmp`
4095 * to `GC_jmp_buf`.
4096 */
4097 GC_INNER void GC_setup_temporary_fault_handler(void);
4098 4099 /* Undo the effect of `GC_setup_temporary_fault_handler`. */
4100 GC_INNER void GC_reset_fault_handler(void);
4101 #endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES || WRAP_MARK_SOME */
4102 4103 /* Some convenience macros for cancellation support. */
4104 #ifdef CANCEL_SAFE
4105 # if defined(GC_ASSERTIONS) \
4106 && (defined(USE_COMPILER_TLS) \
4107 || (defined(LINUX) && !defined(ARM32) && GC_GNUC_PREREQ(3, 3) \
4108 || defined(HPUX) /* and probably others... */))
4109 extern __thread unsigned char GC_cancel_disable_count;
4110 # define NEED_CANCEL_DISABLE_COUNT
4111 # define INCR_CANCEL_DISABLE() ++GC_cancel_disable_count
4112 # define DECR_CANCEL_DISABLE() --GC_cancel_disable_count
4113 # define ASSERT_CANCEL_DISABLED() GC_ASSERT(GC_cancel_disable_count > 0)
4114 # else
4115 # define INCR_CANCEL_DISABLE()
4116 # define DECR_CANCEL_DISABLE()
4117 # define ASSERT_CANCEL_DISABLED() (void)0
4118 # endif /* !GC_ASSERTIONS */
4119 # define DISABLE_CANCEL(state) \
4120 do { \
4121 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \
4122 INCR_CANCEL_DISABLE(); \
4123 } while (0)
4124 # define RESTORE_CANCEL(state) \
4125 do { \
4126 ASSERT_CANCEL_DISABLED(); \
4127 pthread_setcancelstate(state, NULL); \
4128 DECR_CANCEL_DISABLE(); \
4129 } while (0)
4130 #else
4131 # define DISABLE_CANCEL(state) (void)0
4132 # define RESTORE_CANCEL(state) (void)0
4133 # define ASSERT_CANCEL_DISABLED() (void)0
4134 #endif /* !CANCEL_SAFE */
4135 4136 /* Multiply 32-bit unsigned values (used by `GC_push_contents_hdr()`). */
4137 #ifdef NO_LONGLONG64
4138 # define LONG_MULT(hprod, lprod, x, y) \
4139 do { \
4140 unsigned32 lx = (x) & (0xffffU); \
4141 unsigned32 ly = (y) & (0xffffU); \
4142 unsigned32 hx = (x) >> 16; \
4143 unsigned32 hy = (y) >> 16; \
4144 unsigned32 lxhy = lx * hy; \
4145 unsigned32 mid = hx * ly + lxhy; /*< may overflow */ \
4146 unsigned32 lxly = lx * ly; \
4147 \
4148 lprod = (mid << 16) + lxly; /*< may overflow */ \
4149 hprod = hx * hy + ((lprod) < lxly ? 1U : 0) \
4150 + (mid < lxhy ? (unsigned32)0x10000UL : 0) + (mid >> 16); \
4151 } while (0)
4152 #elif defined(I386) && defined(__GNUC__) && !defined(NACL)
4153 # define LONG_MULT(hprod, lprod, x, y) \
4154 __asm__ __volatile__("mull %2" : "=a"(lprod), "=d"(hprod) : "r"(y), "0"(x))
4155 #else
4156 # if (defined(__int64) && !defined(__GNUC__) || defined(__BORLANDC__)) \
4157 && !defined(CPPCHECK)
4158 # define ULONG_MULT_T unsigned __int64
4159 # else
4160 # define ULONG_MULT_T unsigned long long
4161 # endif
4162 # define LONG_MULT(hprod, lprod, x, y) \
4163 do { \
4164 ULONG_MULT_T prod = (ULONG_MULT_T)(x) * (ULONG_MULT_T)(y); \
4165 \
4166 GC_STATIC_ASSERT(sizeof(x) + sizeof(y) <= sizeof(prod)); \
4167 hprod = (unsigned32)(prod >> 32); \
4168 lprod = (unsigned32)prod; \
4169 } while (0)
4170 #endif /* !I386 && !NO_LONGLONG64 */
4171 4172 EXTERN_C_END
4173 4174 #endif /* GC_PRIVATE_H */
4175