gc.h raw

   1  /*
   2   * Copyright (c) 1988-1989 Hans-J. Boehm, Alan J. Demers
   3   * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved.
   4   * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
   5   * Copyright (c) 1999 by Hewlett-Packard Company.  All rights reserved.
   6   * Copyright (c) 2007 Free Software Foundation, Inc.
   7   * Copyright (c) 2000-2011 by Hewlett-Packard Development Company.
   8   * Copyright (c) 2009-2022 Ivan Maidanski
   9   *
  10   * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  11   * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
  12   *
  13   * Permission is hereby granted to use or copy this program
  14   * for any purpose, provided the above notices are retained on all copies.
  15   * Permission to modify the code and to distribute modified code is granted,
  16   * provided the above notices are retained, and a notice that the code was
  17   * modified is included with the above copyright notice.
  18   */
  19  
  20  /*
  21   * Note that this defines a large number of tuning hooks, which can
  22   * safely be ignored in nearly all cases.  For normal use it suffices
  23   * to call only `GC_MALLOC` and, perhaps, `GC_REALLOC`.
  24   * For better performance, also look at `GC_MALLOC_ATOMIC`, and
  25   * `GC_enable_incremental`.  If you need an action to be performed
  26   * immediately before an object is collected, look at `GC_register_finalizer`.
  27   * Everything else is best ignored unless you encounter performance
  28   * problems.
  29   */
  30  
  31  #ifndef GC_H
  32  #define GC_H
  33  
  34  /* Help debug mixed up preprocessor symbols. */
  35  #if defined(WIN64) && !defined(_WIN64) && defined(_MSC_VER)
  36  #  pragma message("Warning: Expecting _WIN64 for x64 targets!")
  37  #endif
  38  
  39  /*
  40   * Define version numbers here to allow test on build machine for
  41   * cross-builds.  Note that this defines the header version number,
  42   * which may or may not match that of the dynamic library.
  43   * `GC_get_version()` can be used to obtain the latter.
  44   */
  45  #include "gc_version.h"
  46  
  47  #include "gc_config_macros.h"
  48  
  49  #ifdef __cplusplus
  50  extern "C" {
  51  #endif
  52  
  53  typedef void *GC_PTR; /*< preserved only for backward compatibility */
  54  
  55  /**
  56   * Define public `word` and `signed_word` to be unsigned and signed types
  57   * of the size same as of `size_t`, `ptrdiff_t`, and of the address part
  58   * of data pointers (like `char *` and `void *`).
  59   */
  60  typedef GC_UNSIGNEDWORD GC_word;
  61  typedef GC_SIGNEDWORD GC_signed_word;
  62  #undef GC_SIGNEDWORD
  63  #undef GC_UNSIGNEDWORD
  64  
  65  #if (defined(_UINTPTR_T) || defined(_UINTPTR_T_DECLARED) \
  66       || defined(_UINTPTR_T_DEFINED))                     \
  67      && !defined(__CYGWIN__) && !defined(__MSYS__)
  68  /* Note: Cygwin and MSYS2 might provide `__uintptr_t` but not `uintptr_t`. */
  69  typedef uintptr_t GC_uintptr_t;
  70  #else
  71  typedef GC_word GC_uintptr_t;
  72  #endif
  73  
  74  /**
  75   * Is first pointer has a smaller address than the second one?
  76   * The arguments should be of the same pointer type, e.g. of `char *` type.
  77   * Ancient compilers might treat a pointer as a signed value, thus we
  78   * need a cast to unsigned `word` of each compared pointer.
  79   */
  80  #if defined(__GNUC__) && !defined(__CHERI_PURE_CAPABILITY__)
  81  #  define GC_ADDR_LT(p, q) ((p) < (q))
  82  #else
  83  #  define GC_ADDR_LT(p, q) \
  84      ((GC_word)(GC_uintptr_t)(p) < (GC_word)(GC_uintptr_t)(q))
  85  #endif
  86  
  87  /**
  88   * Get the collector library version.  The returned value is a constant
  89   * in the form:
  90   * `((version_major << 16) | (version_minor << 8) | version_micro)`.
  91   */
  92  GC_API GC_VERSION_VAL_T GC_CALL GC_get_version(void);
  93  
  94  /*
  95   * Public read-only variables.  The supplied getter functions are
  96   * preferred for new client code.
  97   */
  98  
  99  /**
 100   * Counter incremented once per collection.  Includes empty collections
 101   * at startup.  `GC_get_gc_no()` is unsynchronized, so it requires
 102   * `GC_call_with_reader_lock()` to avoid data race on multiprocessors.
 103   */
 104  GC_API GC_ATTR_DEPRECATED GC_word GC_gc_no;
 105  GC_API GC_word GC_CALL GC_get_gc_no(void);
 106  
 107  #ifdef GC_THREADS
 108  /**
 109   * GC is parallelized for performance on multiprocessors.
 110   * Set to a nonzero value when client calls `GC_start_mark_threads()`
 111   * directly or starts the first non-main thread, provided the
 112   * collector is built with `PARALLEL_MARK` macro defined, and either
 113   * `GC_MARKERS` (or `GC_NPROCS`) environment variable is set to a value
 114   * bigger than 1, or multiple cores (processors) are available, or
 115   * the client calls `GC_set_markers_count()` before the collector
 116   * initialization.  After setting, `GC_parallel` value is equal to the
 117   * number of marker threads minus one (i.e. the number of existing
 118   * parallel marker threads excluding the initiating one).
 119   */
 120  GC_API GC_ATTR_DEPRECATED int GC_parallel;
 121  #endif
 122  
 123  /** Return value of `GC_parallel`.  Does not acquire the allocator lock. */
 124  GC_API int GC_CALL GC_get_parallel(void);
 125  
 126  /**
 127   * Set the number of marker threads (including the initiating one)
 128   * to the desired value at start-up.  Zero value means the collector
 129   * is to decide.  If the correct nonzero value is passed, then
 130   * `GC_parallel` will be set to the value minus one later.  Has no effect
 131   * if called after the collector initialization.  Does not itself cause
 132   * creation of the marker threads.  Does not use any synchronization.
 133   */
 134  GC_API void GC_CALL GC_set_markers_count(unsigned);
 135  
 136  /*
 137   * Public R/W variables.  The supplied setter and getter functions are
 138   * preferred for new client code.
 139   */
 140  
 141  /**
 142   * When there is insufficient memory to satisfy an allocation request,
 143   * we return `(*GC_oom_fn)(size)`.  If it returns, it must return either
 144   * `NULL` or a valid pointer to a previously allocated heap object.
 145   * By default, this just returns `NULL`.  If it points to a function which
 146   * never returns `NULL`, probably by aborting the program instead, then
 147   * invocations of `GC_MALLOC()` and friends (that are additionally marked
 148   * as "never returning NULL unless GC_oom_fn returns NULL") do not need to
 149   * be followed by code that checks for the `NULL` result.
 150   * `GC_oom_fn` must not be 0.  Both the setter and the getter acquire
 151   * the allocator lock (in the reader mode in case of the getter) to
 152   * avoid data race.
 153   */
 154  typedef void *(GC_CALLBACK *GC_oom_func)(size_t /* `bytes_requested` */);
 155  GC_API GC_ATTR_DEPRECATED GC_oom_func GC_oom_fn;
 156  GC_API void GC_CALL GC_set_oom_fn(GC_oom_func) GC_ATTR_NONNULL(1);
 157  GC_API GC_oom_func GC_CALL GC_get_oom_fn(void);
 158  
 159  /**
 160   * Invoked when the heap grows or shrinks.  Called with the world
 161   * stopped (and the allocator lock held).  May be 0.  Both the setter
 162   * and the getter acquire the allocator lock (in the reader mode in
 163   * case of the getter).
 164   */
 165  typedef void(GC_CALLBACK *GC_on_heap_resize_proc)(GC_word /* new_size */);
 166  GC_API GC_ATTR_DEPRECATED GC_on_heap_resize_proc GC_on_heap_resize;
 167  GC_API void GC_CALL GC_set_on_heap_resize(GC_on_heap_resize_proc);
 168  GC_API GC_on_heap_resize_proc GC_CALL GC_get_on_heap_resize(void);
 169  
 170  typedef enum {
 171    GC_EVENT_START, /*< start collection */
 172    GC_EVENT_MARK_START,
 173    GC_EVENT_MARK_END,
 174    GC_EVENT_RECLAIM_START,
 175    GC_EVENT_RECLAIM_END,
 176    GC_EVENT_END,              /*< end collection */
 177    GC_EVENT_PRE_STOP_WORLD,   /*< begin stopping world */
 178    GC_EVENT_POST_STOP_WORLD,  /*< end stopping world */
 179    GC_EVENT_PRE_START_WORLD,  /*< begin restarting world */
 180    GC_EVENT_POST_START_WORLD, /*< end restarting world */
 181    GC_EVENT_THREAD_SUSPENDED,
 182    GC_EVENT_THREAD_UNSUSPENDED
 183  } GC_EventType;
 184  
 185  /**
 186   * Invoked to indicate progress through the collection process.
 187   * Not used for thread suspend/resume notifications.  Called with the
 188   * allocator lock held (or, even, the world stopped).  May be 0 (means
 189   * no notifier).  Both the setter and the getter acquire the allocator
 190   * lock (in the reader mode in case of the getter).
 191   */
 192  typedef void(GC_CALLBACK *GC_on_collection_event_proc)(GC_EventType);
 193  GC_API void GC_CALL GC_set_on_collection_event(GC_on_collection_event_proc);
 194  GC_API GC_on_collection_event_proc GC_CALL GC_get_on_collection_event(void);
 195  
 196  #ifdef GC_THREADS
 197  /**
 198   * Invoked when a thread is suspended or resumed during collection.
 199   * Called with the allocator lock held (and the world stopped partially).
 200   * May be 0 (means no notifier).  Both the setter and the getter acquire
 201   * the allocator lock (in the reader mode in case of the getter).
 202   */
 203  typedef void(GC_CALLBACK *GC_on_thread_event_proc)(GC_EventType,
 204                                                     void * /* `thread_id` */);
 205  GC_API void GC_CALL GC_set_on_thread_event(GC_on_thread_event_proc);
 206  GC_API GC_on_thread_event_proc GC_CALL GC_get_on_thread_event(void);
 207  #endif
 208  
 209  /**
 210   * Turn on the find-leak mode (do not actually garbage collect, but
 211   * simply report inaccessible memory that was not deallocated with
 212   * `GC_FREE()`).  Initial value is determined by `FIND_LEAK` macro.
 213   * The value should not typically be modified after the collector
 214   * initialization (and, thus, it does not use or need synchronization).
 215   * The mode is supported only if the library has been compiled without
 216   * `NO_FIND_LEAK` macro defined.
 217   */
 218  GC_API GC_ATTR_DEPRECATED int GC_find_leak;
 219  GC_API void GC_CALL GC_set_find_leak(int);
 220  GC_API int GC_CALL GC_get_find_leak(void);
 221  
 222  /**
 223   * Arrange for pointers to object interiors to be recognized as valid.
 224   * Typically should not be changed after the collector initialization
 225   * (in case of calling it after the collector is initialized, the
 226   * setter acquires the allocator lock).  Must be only 0 or 1.
 227   * The initial value depends on whether the collector is built with
 228   * `ALL_INTERIOR_POINTERS` macro defined or not.  This also affects,
 229   * unless `GC_get_dont_add_byte_at_end()` returns a nonzero value,
 230   * whether the object sizes are increased by at least a byte to allow
 231   * "off-the-end" pointer recognition (but the size is not increased
 232   * for uncollectible objects as well as for ignore-off-page objects of
 233   * at least heap block size).
 234   */
 235  GC_API GC_ATTR_DEPRECATED int GC_all_interior_pointers;
 236  GC_API void GC_CALL GC_set_all_interior_pointers(int);
 237  GC_API int GC_CALL GC_get_all_interior_pointers(void);
 238  
 239  /**
 240   * If nonzero, finalizers will only be run in response to an explicit
 241   * `GC_invoke_finalizers()` call.  The default is determined by whether
 242   * the `FINALIZE_ON_DEMAND` macro is defined when the collector is built.
 243   * The setter and the getter are unsynchronized.
 244   */
 245  GC_API GC_ATTR_DEPRECATED int GC_finalize_on_demand;
 246  GC_API void GC_CALL GC_set_finalize_on_demand(int);
 247  GC_API int GC_CALL GC_get_finalize_on_demand(void);
 248  
 249  /**
 250   * Mark objects reachable from finalizable objects in a separate post-pass.
 251   * This makes it a bit safer to use non-topologically-ordered finalization.
 252   * Default value is determined by `JAVA_FINALIZATION` macro.
 253   * Enables `GC_register_finalizer_unreachable()` to work correctly.
 254   * The setter and the getter are unsynchronized.
 255   */
 256  GC_API GC_ATTR_DEPRECATED int GC_java_finalization;
 257  GC_API void GC_CALL GC_set_java_finalization(int);
 258  GC_API int GC_CALL GC_get_java_finalization(void);
 259  
 260  /**
 261   * Invoked by the collector when there are objects to be finalized.
 262   * Invoked at most once per collection cycle.  Never invoked unless
 263   * `GC_finalize_on_demand` is set.  Typically this will notify
 264   * a finalization thread, which will call `GC_invoke_finalizers()` in
 265   * response.  May be 0 (means no notifier).  Both the setter and the getter
 266   * acquire the allocator lock (in the reader mode in case of the getter).
 267   */
 268  typedef void(GC_CALLBACK *GC_finalizer_notifier_proc)(void);
 269  GC_API GC_ATTR_DEPRECATED GC_finalizer_notifier_proc GC_finalizer_notifier;
 270  GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc);
 271  GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void);
 272  
 273  /**
 274   * The functions called to report pointer checking errors.  Called without
 275   * the allocator lock held.  The default behavior is to fail with the
 276   * appropriate message which includes the pointers.  The functions
 277   * (variables) must not be 0.  Both the setters and the getters are
 278   * unsynchronized.
 279   */
 280  typedef void(GC_CALLBACK *GC_valid_ptr_print_proc_t)(void *);
 281  typedef void(GC_CALLBACK *GC_same_obj_print_proc_t)(void * /* `p` */,
 282                                                      void * /* `q` */);
 283  GC_API GC_ATTR_DEPRECATED GC_same_obj_print_proc_t GC_same_obj_print_proc;
 284  GC_API GC_ATTR_DEPRECATED GC_valid_ptr_print_proc_t
 285      GC_is_valid_displacement_print_proc;
 286  GC_API GC_ATTR_DEPRECATED GC_valid_ptr_print_proc_t GC_is_visible_print_proc;
 287  GC_API void GC_CALL GC_set_same_obj_print_proc(GC_same_obj_print_proc_t)
 288      GC_ATTR_NONNULL(1);
 289  GC_API GC_same_obj_print_proc_t GC_CALL GC_get_same_obj_print_proc(void);
 290  GC_API void
 291      GC_CALL GC_set_is_valid_displacement_print_proc(GC_valid_ptr_print_proc_t)
 292          GC_ATTR_NONNULL(1);
 293  GC_API GC_valid_ptr_print_proc_t GC_CALL
 294  GC_get_is_valid_displacement_print_proc(void);
 295  GC_API void GC_CALL GC_set_is_visible_print_proc(GC_valid_ptr_print_proc_t)
 296      GC_ATTR_NONNULL(1);
 297  GC_API GC_valid_ptr_print_proc_t GC_CALL GC_get_is_visible_print_proc(void);
 298  
 299  /*
 300   * A flag indicating "do not collect" mode.  This overrides explicit
 301   * `GC_gcollect()` calls as well.  Used as a counter, so that nested
 302   * enabling and disabling work correctly.  Should normally be updated
 303   * with `GC_enable()` and `GC_disable()` calls.  Direct assignment to
 304   * `GC_dont_gc` variable is deprecated.  To check whether collections
 305   * are disabled, `GC_is_disabled()` is preferred for new code.
 306   */
 307  GC_API
 308  #ifndef GC_DONT_GC
 309  GC_ATTR_DEPRECATED
 310  #endif
 311  int GC_dont_gc;
 312  
 313  /**
 314   * Do not expand the heap unless explicitly requested or forced to.
 315   * The setter and the getter are unsynchronized.
 316   */
 317  GC_API GC_ATTR_DEPRECATED int GC_dont_expand;
 318  GC_API void GC_CALL GC_set_dont_expand(int);
 319  GC_API int GC_CALL GC_get_dont_expand(void);
 320  
 321  /**
 322   * Causes the non-incremental collector to use the entire heap before
 323   * collecting.  This sometimes results in more large-block fragmentation,
 324   * since very large blocks will tend to get broken up during each
 325   * collection cycle.  It is likely to result in a larger working set, but
 326   * lower collection frequencies, and hence fewer instructions executed in
 327   * the collector.
 328   */
 329  GC_API GC_ATTR_DEPRECATED int GC_use_entire_heap;
 330  
 331  /**
 332   * Number of partial collections between full collections.  Matters only
 333   * if `GC_is_incremental_mode()`.  Full collections are also triggered
 334   * if the collector detects a substantial increase in the number of the
 335   * in-use heap blocks.  Values in the tens are now perfectly reasonable.
 336   * The setter and the getter are unsynchronized, so
 337   * `GC_call_with_alloc_lock()` (`GC_call_with_reader_lock()` in case
 338   * of the getter) is required to avoid data race (if the value is
 339   * modified after the collector is put into the multi-threaded mode).
 340   */
 341  GC_API GC_ATTR_DEPRECATED int GC_full_freq;
 342  GC_API void GC_CALL GC_set_full_freq(int);
 343  GC_API int GC_CALL GC_get_full_freq(void);
 344  
 345  /**
 346   * Bytes not considered candidates for collection.  Used only to control
 347   * scheduling of collections.  Updated by `GC_malloc_uncollectable()` and
 348   * `GC_free()`.  Wizards only.  The setter and the getter are unsynchronized,
 349   * so `GC_call_with_alloc_lock()` (`GC_call_with_reader_lock()` in case of
 350   * the getter) is required to avoid data race (if the value is modified
 351   * after the collector is put into the multi-threaded mode).
 352   */
 353  GC_API GC_ATTR_DEPRECATED GC_word GC_non_gc_bytes;
 354  GC_API void GC_CALL GC_set_non_gc_bytes(GC_word);
 355  GC_API GC_word GC_CALL GC_get_non_gc_bytes(void);
 356  
 357  /**
 358   * Do not register dynamic library data segments automatically.
 359   * Also, if set by the collector itself (during a collection), this
 360   * means that such a registration is not supported.  Wizards only.
 361   * Should be set only if the client explicitly registers all roots.
 362   * (In some environments like Microsoft Windows and Apple's Darwin,
 363   * this may also prevent registration of the main data segment as a part
 364   * of the root set.)  The setter and the getter are unsynchronized.
 365   */
 366  GC_API GC_ATTR_DEPRECATED int GC_no_dls;
 367  GC_API void GC_CALL GC_set_no_dls(int);
 368  GC_API int GC_CALL GC_get_no_dls(void);
 369  
 370  /**
 371   * We try to make sure that we allocate at least
 372   * `N / GC_free_space_divisor` bytes between collections, where `N` is
 373   * twice the number of traced bytes, plus the number of untraced bytes
 374   * (i.e. bytes in the "atomic" objects), plus a rough estimate of the
 375   * root set size.  `N` approximates GC tracing work per collection.
 376   * The initial value is given by `GC_FREE_SPACE_DIVISOR` macro.
 377   * Increasing its value will use less space but more collection time.
 378   * Decreasing it will appreciably decrease total collection time at the
 379   * expense of space.  The setter and the getter are unsynchronized, so
 380   * `GC_call_with_alloc_lock()` (`GC_call_with_reader_lock()` in case of
 381   * the getter) is required to avoid data race (if the value is modified
 382   * after the collector is put into the multi-threaded mode).
 383   * In GC v7.1 and before, the setter returned the old value.
 384   */
 385  GC_API GC_ATTR_DEPRECATED GC_word GC_free_space_divisor;
 386  GC_API void GC_CALL GC_set_free_space_divisor(GC_word);
 387  GC_API GC_word GC_CALL GC_get_free_space_divisor(void);
 388  
 389  /**
 390   * The maximum number of collections attempted before reporting out of
 391   * memory after heap expansion fails.  Initially 0.  The setter and
 392   * getter are unsynchronized, so `GC_call_with_alloc_lock()`
 393   * (`GC_call_with_reader_lock()` in case of the getter) is required to
 394   * avoid data race (if the value is modified after the collector is put
 395   * into the multi-threaded mode).
 396   */
 397  GC_API GC_ATTR_DEPRECATED GC_word GC_max_retries;
 398  GC_API void GC_CALL GC_set_max_retries(GC_word);
 399  GC_API GC_word GC_CALL GC_get_max_retries(void);
 400  
 401  /**
 402   * The cold end (bottom) of user stack.  May be set in the client prior
 403   * to calling any `GC_` routines.  This avoids some overhead, and
 404   * potentially some signals that can confuse debuggers.  Otherwise the
 405   * collector attempts to set it automatically.  For multi-threaded
 406   * code, this is the cold end of the stack for the primordial thread.
 407   * For multi-threaded code, altering `GC_stackbottom` value directly
 408   * after the collector initialization has no effect.  Portable clients
 409   * should use `GC_set_stackbottom()`, `GC_get_stack_base()`,
 410   * `GC_call_with_gc_active()` and `GC_register_my_thread()` instead.
 411   */
 412  GC_API GC_ATTR_DEPRECATED char *GC_stackbottom;
 413  
 414  /**
 415   * Do not collect as part of the collector initialization.  Should be
 416   * set only if the client wants a chance to manually initialize the
 417   * root set before the first collection.  Interferes with black-listing.
 418   * Wizards only.  The setter and the getter are unsynchronized (and no
 419   * external locking is needed since the value is accessed at the collector
 420   * initialization only).
 421   */
 422  GC_API GC_ATTR_DEPRECATED int GC_dont_precollect;
 423  GC_API void GC_CALL GC_set_dont_precollect(int);
 424  GC_API int GC_CALL GC_get_dont_precollect(void);
 425  
 426  /**
 427   * If incremental collection is enabled, we try to terminate collections
 428   * after this many milliseconds (plus the amount of nanoseconds as given in
 429   * the latest `GC_set_time_limit_tv()` call, if any).  Not a hard time bound.
 430   * Setting this variable to `GC_TIME_UNLIMITED` essentially disables
 431   * incremental collection (i.e. disables the "pause time exceeded" tests)
 432   * while leaving generational collection enabled.  The setter and the
 433   * getter are unsynchronized, so `GC_call_with_alloc_lock()`
 434   * (`GC_call_with_reader_lock()` in case of the getter) is required to
 435   * avoid data race (if the value is modified after the collector is put
 436   * into the multi-threaded mode).  The setter does not update the value
 437   * of the nanosecond part of the time limit (it is zero unless ever set
 438   * by `GC_set_time_limit_tv()` call).
 439   */
 440  GC_API GC_ATTR_DEPRECATED unsigned long GC_time_limit;
 441  #define GC_TIME_UNLIMITED 999999
 442  GC_API void GC_CALL GC_set_time_limit(unsigned long);
 443  GC_API unsigned long GC_CALL GC_get_time_limit(void);
 444  
 445  /** A portable type definition of time with a nanosecond precision. */
 446  struct GC_timeval_s {
 447    unsigned long tv_ms;   /*< time in milliseconds */
 448    unsigned long tv_nsec; /*< nanoseconds fraction (less than 1000000) */
 449  };
 450  
 451  /* Public procedures */
 452  
 453  /**
 454   * Set/get the time limit of the incremental collections.  This is
 455   * similar to `GC_set_time_limit` and `GC_get_time_limit` but the time
 456   * is provided with the nanosecond precision.  The value of `tv_nsec`
 457   * part should be less than a million.  If the value of `tv_ms` part is
 458   * `GC_TIME_UNLIMITED`, then `tv_nsec` part is ignored.  Initially, the
 459   * value of `tv_nsec` part of the time limit is zero.  The functions do
 460   * not use any synchronization.  Defined only if the library has been
 461   * compiled without `NO_CLOCK` macro defined.
 462   */
 463  GC_API void GC_CALL GC_set_time_limit_tv(struct GC_timeval_s);
 464  GC_API struct GC_timeval_s GC_CALL GC_get_time_limit_tv(void);
 465  
 466  /**
 467   * Set/get the minimum value of the ratio of allocated bytes since
 468   * garbage collection to the amount of finalizers created since that
 469   * collection (so value is greater than
 470   * `GC_bytes_allocd / (GC_fo_entries - last_fo_entries)`) which
 471   * triggers the collection instead heap expansion.  The value has no
 472   * effect in the collector incremental mode.  The default value is
 473   * 10000 unless `GC_ALLOCD_BYTES_PER_FINALIZER` macro with a custom value
 474   * is defined to build the collector.  The default value might be not the
 475   * right choice for clients where e.g. most objects have a finalizer.
 476   * Zero value effectively disables taking amount of finalizers in the
 477   * decision whether to collect or not.  The functions do not use any
 478   * synchronization.
 479   */
 480  GC_API void GC_CALL GC_set_allocd_bytes_per_finalizer(GC_word);
 481  GC_API GC_word GC_CALL GC_get_allocd_bytes_per_finalizer(void);
 482  
 483  /**
 484   * Tell the collector to start various performance measurements.
 485   * Only the total time taken by full collections and the average time
 486   * spent in the world-stopped collections are calculated, as of now.
 487   * And, currently, there is no way to stop the measurements.
 488   * The function does not use any synchronization.  Defined only if the
 489   * library has been compiled without `NO_CLOCK` macro defined.
 490   */
 491  GC_API void GC_CALL GC_start_performance_measurement(void);
 492  
 493  /**
 494   * Get the total time of all full collections since the start of the
 495   * performance measurements.  Includes time spent in the supplementary
 496   * actions like blacklists promotion, marks clearing, free lists
 497   * reconstruction and objects finalization.  The measurement unit is a
 498   * millisecond.  Note that the returned value wraps around on overflow.
 499   * The function does not use any synchronization.  Defined only if the
 500   * library has been compiled without `NO_CLOCK` macro defined.
 501   */
 502  GC_API unsigned long GC_CALL GC_get_full_gc_total_time(void);
 503  
 504  /**
 505   * Same as `GC_get_full_gc_total_time` but takes into account all mark
 506   * phases with the world stopped and nothing else.
 507   */
 508  GC_API unsigned long GC_CALL GC_get_stopped_mark_total_time(void);
 509  
 510  /**
 511   * Get the average time spent in all mark phases with the world stopped.
 512   * The average value is computed since the start of the performance
 513   * measurements (or right since the collector initialization if the
 514   * collector logging is enabled).  The result is in nanoseconds.
 515   * The function acquires the allocator lock (in the reader mode) to avoid
 516   * data race.  Defined only if the library has been compiled without
 517   * `NO_CLOCK` macro defined.
 518   */
 519  GC_API unsigned long GC_CALL GC_get_avg_stopped_mark_time_ns(void);
 520  
 521  /**
 522   * Set whether the garbage collector will allocate executable memory
 523   * pages or not.  A nonzero argument instructs the collector to
 524   * allocate memory with the executable flag on.  Must be called before
 525   * the collector is initialized.  May have no effect on some platforms.
 526   * The default value is controlled by `NO_EXECUTE_PERMISSION` macro (if
 527   * present then the flag is off).  Portable clients should have
 528   * `GC_set_pages_executable(1)` call (before `GC_INIT()` one) provided
 529   * they are going to execute code on any of the GC-allocated memory objects.
 530   */
 531  GC_API void GC_CALL GC_set_pages_executable(int);
 532  
 533  /**
 534   * Returns nonzero value if the garbage collector is set to the
 535   * allocate-executable-memory mode.  The mode could be changed by
 536   * `GC_set_pages_executable` (before `GC_INIT()` call) unless the former
 537   * has no effect on the platform.  Does not use or need synchronization.
 538   */
 539  GC_API int GC_CALL GC_get_pages_executable(void);
 540  
 541  /**
 542   * The setter and the getter of the minimum value returned by the internal
 543   * `min_bytes_allocd()`.  The value should not be zero; the default value
 544   * is one.  Not synchronized.
 545   */
 546  GC_API void GC_CALL GC_set_min_bytes_allocd(size_t);
 547  GC_API size_t GC_CALL GC_get_min_bytes_allocd(void);
 548  
 549  /**
 550   * Set/get the size in pages of units operated by `GC_collect_a_little()`.
 551   * The value should not be zero.  Not synchronized.
 552   */
 553  GC_API void GC_CALL GC_set_rate(int);
 554  GC_API int GC_CALL GC_get_rate(void);
 555  
 556  /**
 557   * Set/get the maximum number of prior attempts at the world-stop marking.
 558   * Not synchronized.
 559   */
 560  GC_API void GC_CALL GC_set_max_prior_attempts(int);
 561  GC_API int GC_CALL GC_get_max_prior_attempts(void);
 562  
 563  /**
 564   * Control whether to disable algorithm deciding if a collection should
 565   * be started when we allocated enough to amortize the collection.
 566   * Both the setter and the getter acquire the allocator lock (in the reader
 567   * mode in case of the getter) to avoid data race.
 568   */
 569  GC_API void GC_CALL GC_set_disable_automatic_collection(int);
 570  GC_API int GC_CALL GC_get_disable_automatic_collection(void);
 571  
 572  /**
 573   * Overrides the default handle-fork mode.  A nonzero value means GC
 574   * should install proper `pthread_atfork` handlers.  Has effect only
 575   * if called before the collector initialization.  Clients should call
 576   * `GC_set_handle_fork()` with nonzero argument if going to use `fork`
 577   * with the GC functions called in the child process.  (Note that such
 578   * client and at-fork handler activities are not fully POSIX-compliant.)
 579   * `GC_set_handle_fork()` instructs `GC_init` to setup GC fork handlers
 580   * using `pthread_atfork()`, the latter might fail (or, even, absent on
 581   * some targets) causing `abort` at the collector initialization.
 582   * Issues with missing (or failed) `pthread_atfork()` could be avoided
 583   * by invocation of `GC_set_handle_fork(-1)` at application start-up and
 584   * surrounding each `fork()` with the relevant
 585   * `GC_atfork_prepare`/`GC_atfork_parent`/`GC_atfork_child` calls.
 586   */
 587  GC_API void GC_CALL GC_set_handle_fork(int);
 588  
 589  /**
 590   * Routines to handle POSIX `fork()` manually (no-op if handled
 591   * automatically).  `GC_atfork_prepare()` should be called immediately
 592   * before `fork()`; `GC_atfork_parent()` should be invoked just after
 593   * `fork` in the branch that corresponds to parent process (i.e.,
 594   * `fork` result is nonzero); `GC_atfork_child()` is to be called
 595   * immediately in the child branch (i.e., `fork` result is 0).
 596   * Note that `GC_atfork_child()` call should, of course, precede
 597   * `GC_start_mark_threads()` call, if any.
 598   */
 599  GC_API void GC_CALL GC_atfork_prepare(void);
 600  GC_API void GC_CALL GC_atfork_parent(void);
 601  GC_API void GC_CALL GC_atfork_child(void);
 602  
 603  /**
 604   * Initialize the collector.  Portable clients should call `GC_INIT()`
 605   * from the program's `main()` instead.
 606   */
 607  GC_API void GC_CALL GC_init(void);
 608  
 609  /**
 610   * Return 1 (true) if the collector is initialized (or, at least, the
 611   * initialization is in progress), 0 otherwise.
 612   */
 613  GC_API int GC_CALL GC_is_init_called(void);
 614  
 615  /**
 616   * Perform the collector shutdown.  (E.g. dispose critical sections on
 617   * Win32 target.)  A duplicate invocation is a no-op.  `GC_INIT()` should
 618   * not be called after the shutdown.  See also `GC_win32_free_heap()`.
 619   */
 620  GC_API void GC_CALL GC_deinit(void);
 621  
 622  /**
 623   * General-purpose allocation functions, with roughly `malloc` calling
 624   * conventions.  The atomic variants promise that no relevant pointers
 625   * are contained in the object.  The non-atomic variants guarantee that
 626   * the new object is cleared.  `GC_malloc_uncollectable()` allocates
 627   * an object that is scanned for pointers to collectible objects, but
 628   * is not itself collectible.  The object is scanned even if it does
 629   * not appear to be reachable.  `GC_malloc_uncollectable()` and `GC_free()`
 630   * called on the resulting object implicitly update `GC_non_gc_bytes`
 631   * appropriately.  All these functions (`GC_malloc`, `GC_malloc_atomic`,
 632   * `GC_strdup`, `GC_strndup`, `GC_malloc_uncollectable`) are guaranteed
 633   * never to return `NULL` unless `GC_oom_fn()` returns `NULL`.
 634   */
 635  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
 636      GC_malloc(size_t /* `size_in_bytes` */);
 637  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
 638      GC_malloc_atomic(size_t /* `size_in_bytes` */);
 639  GC_API GC_ATTR_MALLOC char *GC_CALL GC_strdup(const char *);
 640  GC_API GC_ATTR_MALLOC char *GC_CALL GC_strndup(const char *, size_t)
 641      GC_ATTR_NONNULL(1);
 642  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
 643      GC_malloc_uncollectable(size_t /* `size_in_bytes` */);
 644  
 645  /**
 646   * The allocation function which guarantees the requested alignment of
 647   * the allocated memory object.  The `align` argument should be nonzero
 648   * and a power of two.  It is guaranteed never to return `NULL` unless
 649   * `GC_oom_fn()` returns `NULL`.
 650   */
 651  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(2) void *GC_CALL
 652      GC_memalign(size_t /* `align` */, size_t /* `lb` */);
 653  
 654  /**
 655   * A function similar to `GC_memalign` but existing largely for
 656   * redirection in the find-leak mode.  The `align` argument should be
 657   * nonzero and a power of two, but additionally the argument is
 658   * required to be not less than size of a pointer.  Note that the
 659   * function does not change value of `*memptr` in case of failure
 660   * (i.e. when the result is nonzero).  It is guaranteed never to return
 661   * `NULL` unless `GC_oom_fn()` returns `NULL`.
 662   */
 663  GC_API int GC_CALL GC_posix_memalign(void ** /* `memptr` */,
 664                                       size_t /* `align` */, size_t /* `lb` */)
 665      GC_ATTR_NONNULL(1);
 666  
 667  #ifndef GC_NO_VALLOC
 668  /**
 669   * The allocation functions that guarantee the memory page alignment of
 670   * the returned object.  Exist largely for redirection in the find-leak
 671   * mode.  All these functions (`GC_pvalloc`, `GC_valloc`) are guaranteed
 672   * never to return `NULL` unless `GC_oom_fn()` returns `NULL`.
 673   */
 674  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
 675      GC_valloc(size_t /* `lb` */);
 676  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
 677      GC_pvalloc(size_t /* `lb` */);
 678  #endif
 679  
 680  /**
 681   * Explicitly deallocate an object.  Dangerous if used incorrectly.
 682   * Requires a pointer to the base of an object.  An object should not
 683   * be enabled for finalization (and it should not contain registered
 684   * disappearing links of any kind) when it is explicitly deallocated.
 685   * `GC_free(0)` is a no-op, as required by ANSI C for `free()`.
 686   */
 687  GC_API void GC_CALL GC_free(void *);
 688  GC_API void GC_CALL GC_debug_free(void *);
 689  
 690  /**
 691   * A symbol to be intercepted by heap profilers so that they can
 692   * accurately track allocations.  Programs such as Valgrind `massif`
 693   * and KDE heaptrack do tracking of allocated objects by overriding
 694   * common allocator methods (e.g. `malloc` and `free`).  However,
 695   * because the collector does not work by calling standard allocation
 696   * methods on objects that were reclaimed, we need a way to tell the
 697   * profiler that an object has been freed.  This function is not
 698   * intended to be called by the client, it should be used for the
 699   * interception purpose only.  The collector calls this function
 700   * internally whenever an object is freed.  Defined only if the library
 701   * has been compiled with `VALGRIND_TRACKING` macro defined.
 702   */
 703  GC_API void GC_CALLBACK GC_free_profiler_hook(void *);
 704  
 705  #if (defined(GC_CAN_SAVE_CALL_STACKS) || defined(GC_ADD_CALLER)) \
 706      && !defined(GC_RETURN_ADDR_T_DEFINED)
 707  /*
 708   * A type to hold a function return address (pointer).  Never used for
 709   * calling a function.
 710   */
 711  #  if defined(__GNUC__)
 712  /*
 713   * Defined as a data (object) pointer type to avoid the compiler complain
 714   * that ISO C forbids conversion between object and function pointer types.
 715   */
 716  typedef void *GC_return_addr_t;
 717  #  else
 718  typedef void (*GC_return_addr_t)(void);
 719  #  endif
 720  #  define GC_RETURN_ADDR_T_DEFINED
 721  #endif /* GC_CAN_SAVE_CALL_STACKS || GC_ADD_CALLER */
 722  
 723  #ifdef GC_ADD_CALLER
 724  #  define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
 725  #  define GC_EXTRA_PARAMS GC_return_addr_t ra, const char *s, int i
 726  #else
 727  #  define GC_EXTRAS __FILE__, __LINE__
 728  #  define GC_EXTRA_PARAMS const char *s, int i
 729  #endif
 730  
 731  /*
 732   * The "stubborn" objects allocation is not supported anymore.
 733   * Exists only for the backward compatibility.
 734   */
 735  #define GC_MALLOC_STUBBORN(sz) GC_MALLOC(sz)
 736  #define GC_NEW_STUBBORN(t) GC_NEW(t)
 737  #define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
 738  GC_API GC_ATTR_DEPRECATED void GC_CALL GC_change_stubborn(const void *);
 739  GC_API GC_ATTR_DEPRECATED void GC_CALL GC_debug_change_stubborn(const void *);
 740  GC_API GC_ATTR_ALLOC_SIZE(1) GC_ATTR_DEPRECATED void *GC_CALL
 741      GC_malloc_stubborn(size_t);
 742  GC_API GC_ATTR_ALLOC_SIZE(1) GC_ATTR_DEPRECATED void *GC_CALL
 743      GC_debug_malloc_stubborn(size_t, GC_EXTRA_PARAMS);
 744  
 745  /**
 746   * Inform the collector that the object has been changed.
 747   * Only non-`NULL` pointer stores into the object are considered to be
 748   * changes.  Matters only if the incremental collection is enabled in
 749   * the manual VDB (virtual dirty bits) mode; otherwise the function
 750   * does nothing.  Should be followed typically by `GC_reachable_here()`
 751   * called for each of the stored pointers.
 752   */
 753  GC_API void GC_CALL GC_end_stubborn_change(const void *) GC_ATTR_NONNULL(1);
 754  GC_API void GC_CALL GC_debug_end_stubborn_change(const void *)
 755      GC_ATTR_NONNULL(1);
 756  
 757  /**
 758   * Return a pointer to the base (lowest address) of an object given
 759   * a pointer to a location within the object.  I.e., map an interior
 760   * pointer to the corresponding base pointer.  Note that with debugging
 761   * allocation, this returns a pointer to the actual base of the object,
 762   * i.e. the debug information, not to the base of the user object.
 763   * Return `NULL` if `displaced_pointer` does not point to within
 764   * a valid object.  Note that a deallocated object in the garbage
 765   * collected heap may be considered valid, even if it has been
 766   * deallocated with `GC_free()`.
 767   */
 768  GC_API void *GC_CALL GC_base(void * /* `displaced_pointer` */);
 769  
 770  /**
 771   * Return 1 (true) if the argument points to somewhere in the garbage
 772   * collected heap, 0 otherwise.  Primary use is as a fast alternative to
 773   * `GC_base()` to check whether the given object is allocated by the
 774   * collector or not.  It is assumed that the collector is already initialized.
 775   */
 776  GC_API int GC_CALL GC_is_heap_ptr(const void *);
 777  
 778  /**
 779   * Given a pointer to the base of an object, return its size in bytes.
 780   * (For small objects this also happens to work from interior pointers,
 781   * but that should not be relied upon.)  The returned size may be slightly
 782   * larger than what was originally requested.  The argument may be `NULL`
 783   * (causing 0 to be returned).
 784   */
 785  GC_API size_t GC_CALL GC_size(const void * /* `obj` */);
 786  
 787  /**
 788   * For compatibility with C library.  This is occasionally faster than
 789   * a `malloc` followed by a `bcopy`.  But if you rely on that, either
 790   * here or with the standard C library, your code is broken.
 791   * Probably, it should not have been invented, but now we are stuck.
 792   * The resulting object has the same kind as the original one.
 793   * It is an error to have changes enabled for the original object.
 794   * It does not change the content of the object from its beginning to
 795   * the minimum of old size and `new_size_in_bytes`; the content above in
 796   * case of object size growth is initialized to zero (not guaranteed for
 797   * atomic object type).  The function follows ANSI conventions for `NULL`
 798   * `old_object` (i.e., equivalent to `GC_malloc` regardless of
 799   * `new_size_in_bytes`).  If `new_size_in_bytes` is zero (and
 800   * `old_object` is non-`NULL`), then the call is equivalent to
 801   * `GC_free` (and `NULL` is returned).  If `old_object` is non-`NULL`,
 802   * it must have been returned by an earlier call to `GC_realloc`,
 803   * `GC_malloc` or friends.  In case of the allocation failure, the
 804   * memory pointed by `old_object` is untouched (and not freed).  If the
 805   * returned pointer is not the same as `old_object` and both of them
 806   * are non-`NULL`, then `old_object` is freed.  Returns either `NULL`
 807   * (in case of the allocation failure or zero `new_size_in_bytes`) or
 808   * pointer to the allocated memory.  For a nonzero `new_size_in_bytes`,
 809   * the function (including its debug variant) is guaranteed never to
 810   * return `NULL` unless `GC_oom_fn()` returns `NULL`.
 811   */
 812  GC_API void *GC_CALL GC_realloc(void * /* `old_object` */,
 813                                  size_t /* `new_size_in_bytes` */)
 814      /* `realloc` attribute */ GC_ATTR_ALLOC_SIZE(2);
 815  GC_API void *GC_CALL GC_debug_realloc(void * /* `old_object` */,
 816                                        size_t /* `new_size_in_bytes` */,
 817                                        GC_EXTRA_PARAMS)
 818      /* `realloc` attribute */ GC_ATTR_ALLOC_SIZE(2);
 819  
 820  /**
 821   * Increase the heap size explicitly.  The performed increase is at
 822   * least `number_of_bytes`.  Does the collector initialization as well
 823   * (if not yet).  Returns 0 on failure, 1 on success.
 824   */
 825  GC_API int GC_CALL GC_expand_hp(size_t /* `number_of_bytes` */);
 826  
 827  /**
 828   * Limit the heap size to `n` bytes.  Useful when you are debugging,
 829   * especially on systems that do not handle running out of memory well.
 830   * A zero `n` means the heap is unbounded; this is the default.
 831   * This setter function is unsynchronized (so it might require
 832   * `GC_call_with_alloc_lock` to avoid data race).
 833   */
 834  GC_API void GC_CALL GC_set_max_heap_size(GC_word /* `n` */);
 835  
 836  /**
 837   * Inform the collector that a certain section of statically allocated
 838   * memory contains no pointers to garbage-collected memory.  Thus it does
 839   * not need to be scanned.  This is sometimes important if the application
 840   * maps large read/write files into the address space, which could be
 841   * mistaken for dynamic library data segments on some systems.
 842   * Both section start (`low_address`) and end (`high_address_plus_1`)
 843   * are not needed to be pointer-aligned.
 844   */
 845  GC_API void GC_CALL GC_exclude_static_roots(
 846      void * /* `low_address` */, void * /* `high_address_plus_1` */);
 847  
 848  /**
 849   * Clear the number of entries in the exclusion table.  Wizards only.
 850   * Should be called typically with the allocator lock held, but no
 851   * assertion about it by design.
 852   */
 853  GC_API void GC_CALL GC_clear_exclusion_table(void);
 854  
 855  /** Clear the set of root segments.  Wizards only. */
 856  GC_API void GC_CALL GC_clear_roots(void);
 857  
 858  /**
 859   * Add a root segment.  Wizards only.  May merge adjacent or overlapping
 860   * segments if appropriate.  Both segment start (`low_address`) and
 861   * end (`high_address_plus_1`) are not needed to be pointer-aligned.
 862   * `low_address` must not be greater than `high_address_plus_1`.
 863   */
 864  GC_API void GC_CALL GC_add_roots(void * /* `low_address` */,
 865                                   void * /* `high_address_plus_1` */);
 866  
 867  /** Remove root segments located fully in the region.  Wizards only. */
 868  GC_API void GC_CALL GC_remove_roots(void * /* `low_address` */,
 869                                      void * /* `high_address_plus_1` */);
 870  
 871  /**
 872   * Add a displacement to the set of those considered valid by the
 873   * collector.  `GC_register_displacement(offset)` means that if `p` was
 874   * returned by `GC_malloc()`, then `(char *)p + offset` will be
 875   * considered to be a valid pointer to `p`.  `offset` must be less than
 876   * the size of a heap block.  (All pointers to the interior of objects
 877   * from the stack are considered valid in any case.  This applies to
 878   * heap objects and static data.)  Preferably, this should be called
 879   * before any other GC procedures.  Calling it later adds to the
 880   * probability of excess memory retention.  This is a no-op if the
 881   * collector has recognition of arbitrary interior pointers enabled,
 882   * which is the default (assuming the collector is built with
 883   * `ALL_INTERIOR_POINTERS` macro defined).  The debugging variant should
 884   * be used if any debugging allocation is being done.
 885   */
 886  GC_API void GC_CALL GC_register_displacement(size_t /* `offset` */);
 887  GC_API void GC_CALL GC_debug_register_displacement(size_t /* `offset` */);
 888  
 889  /** Explicitly trigger a full, world-stop collection. */
 890  GC_API void GC_CALL GC_gcollect(void);
 891  
 892  /**
 893   * Same as above but ignores the default `stop_func` setting and tries
 894   * to unmap as much memory as possible (regardless of the corresponding
 895   * switch setting).  The recommended usage: on receiving a system
 896   * low-memory event; before retrying a system call failed because of
 897   * the system is running out of resources.
 898   */
 899  GC_API void GC_CALL GC_gcollect_and_unmap(void);
 900  
 901  /**
 902   * Trigger a full world-stopped collection.  Abort the collection if
 903   * and when `stop_func()` returns a nonzero value.  `stop_func()` will
 904   * be called frequently, and should be reasonably fast.
 905   * (`stop_func()` is called with the allocator lock held and the world
 906   * might be stopped; it is not allowed for `stop_func()` to manipulate
 907   * pointers to the garbage-collected heap or call most of GC functions.)
 908   * This works even if no virtual dirty bits, and hence incremental
 909   * collection is not available for the architecture.  Collections can
 910   * be aborted faster than normal pause times for incremental collection;
 911   * however, aborted collections do no useful work; the next collection
 912   * needs to start from the beginning.  `stop_func` must not be 0.
 913   * `GC_try_to_collect()` returns 0 if the collection was aborted (or the
 914   * collections are disabled), 1 if it succeeded.
 915   */
 916  typedef int(GC_CALLBACK *GC_stop_func)(void);
 917  GC_API int GC_CALL GC_try_to_collect(GC_stop_func /* `stop_func` */)
 918      GC_ATTR_NONNULL(1);
 919  
 920  /**
 921   * Set/get the default `stop_func`.  The latter is used by `GC_gcollect()`
 922   * and by implicitly triggered collections (except for the case when
 923   * handling out of memory).  Must not be 0.  Both the setter and the getter
 924   * acquire the allocator lock (in the reader mode in case of the getter)
 925   * to avoid data race.
 926   */
 927  GC_API void GC_CALL GC_set_stop_func(GC_stop_func /* `stop_func` */)
 928      GC_ATTR_NONNULL(1);
 929  GC_API GC_stop_func GC_CALL GC_get_stop_func(void);
 930  
 931  /**
 932   * Return the number of bytes in the heap.  Excludes collector private
 933   * data structures; excludes the unmapped memory (returned to the OS).
 934   * Includes empty blocks and fragmentation loss.  Includes some pages
 935   * that were allocated but never written.  This is an unsynchronized
 936   * getter, so it should be called typically with the allocator lock
 937   * held, at least in the reader mode, to avoid data race on
 938   * multiprocessors (the alternative way is to use `GC_get_prof_stats`
 939   * or `GC_get_heap_usage_safe` API calls instead).
 940   * This getter remains lock-free (unsynchronized) for compatibility
 941   * reason since some existing clients call it from a GC callback
 942   * holding the allocator lock.  (This API function and the following
 943   * four ones below were made thread-safe in GC v7.2alpha1 and
 944   * reverted back in v7.2alpha7 for the reason described.)
 945   */
 946  GC_API size_t GC_CALL GC_get_heap_size(void);
 947  
 948  /**
 949   * Return a lower bound on the number of free bytes in the heap
 950   * (excluding the unmapped memory space).  This is an unsynchronized
 951   * getter (see `GC_get_heap_size` comment regarding thread-safety).
 952   */
 953  GC_API size_t GC_CALL GC_get_free_bytes(void);
 954  
 955  /**
 956   * Return the size (in bytes) of the unmapped memory (which is returned
 957   * to the OS but could be remapped back by the collector later unless
 958   * the OS runs out of system/virtual memory).  This is an unsynchronized
 959   * getter (see `GC_get_heap_size` comment regarding thread-safety).
 960   */
 961  GC_API size_t GC_CALL GC_get_unmapped_bytes(void);
 962  
 963  /**
 964   * Return the number of bytes allocated since the last collection.
 965   * This is an unsynchronized getter (see `GC_get_heap_size` comment
 966   * regarding thread-safety).
 967   */
 968  GC_API size_t GC_CALL GC_get_bytes_since_gc(void);
 969  
 970  /**
 971   * Return the number of explicitly deallocated bytes of memory since
 972   * the recent collection.  This is an unsynchronized getter.
 973   */
 974  GC_API size_t GC_CALL GC_get_expl_freed_bytes_since_gc(void);
 975  
 976  /**
 977   * Return the total number of bytes allocated in this process.
 978   * Never decreases, except due to wrapping.  This is an unsynchronized
 979   * getter (see `GC_get_heap_size` comment regarding thread-safety).
 980   */
 981  GC_API size_t GC_CALL GC_get_total_bytes(void);
 982  
 983  /**
 984   * Return the total number of bytes obtained from OS.  Includes the
 985   * unmapped memory.  Never decreases.  It is an unsynchronized getter.
 986   */
 987  GC_API size_t GC_CALL GC_get_obtained_from_os_bytes(void);
 988  
 989  /**
 990   * Return the heap usage information.  This is a thread-safe (atomic)
 991   * alternative for the five above getters.   (This function acquires
 992   * the allocator lock in the reader mode, thus preventing data race and
 993   * returning the consistent result.)  Passing `NULL` pointer is allowed
 994   * for any argument.  Returned (filled in) values are of `GC_word` type.
 995   */
 996  GC_API void GC_CALL GC_get_heap_usage_safe(GC_word * /* `pheap_size` */,
 997                                             GC_word * /* `pfree_bytes` */,
 998                                             GC_word * /* `punmapped_bytes` */,
 999                                             GC_word * /* `pbytes_since_gc` */,
1000                                             GC_word * /* `ptotal_bytes` */);
1001  
1002  /**
1003   * Structure used to query the GC statistics (profiling information).
1004   * More fields could be added in the future.  To preserve compatibility
1005   * new fields should be added only to the end, and no deprecated fields
1006   * should be removed from.
1007   */
1008  struct GC_prof_stats_s {
1009    /**
1010     * Heap size in bytes (including the area unmapped to OS).
1011     * Same as value of `GC_get_heap_size() + GC_get_unmapped_bytes()`.
1012     */
1013    GC_word heapsize_full;
1014  
1015    /**
1016     * Total bytes contained in free and unmapped blocks.
1017     * Same as result of `GC_get_free_bytes() + GC_get_unmapped_bytes()`.
1018     */
1019    GC_word free_bytes_full;
1020  
1021    /**
1022     * Amount of memory unmapped to OS.  Same as the value returned by
1023     * `GC_get_unmapped_bytes()`.
1024     */
1025    GC_word unmapped_bytes;
1026  
1027    /**
1028     * Number of bytes allocated since the recent collection.
1029     * Same as the value returned by `GC_get_bytes_since_gc()`.
1030     */
1031    GC_word bytes_allocd_since_gc;
1032  
1033    /**
1034     * Number of bytes allocated before the recent garbage collection.
1035     * The value may wrap.  Same as the result of
1036     * `GC_get_total_bytes() - GC_get_bytes_since_gc()`.
1037     */
1038    GC_word allocd_bytes_before_gc;
1039  
1040    /**
1041     * Number of bytes not considered candidates for garbage collection.
1042     * Same as the value returned by `GC_get_non_gc_bytes()`.
1043     */
1044    GC_word non_gc_bytes;
1045  
1046    /**
1047     * Garbage collection cycle number.  The value may wrap.  Same as the
1048     * value returned by `GC_get_gc_no()`.
1049     */
1050    GC_word gc_no;
1051  
1052    /**
1053     * Number of marker threads (excluding the initiating one).  Same as
1054     * the value returned by `GC_get_parallel()` (or 0 if the collector
1055     * is single-threaded).
1056     */
1057    GC_word markers_m1;
1058  
1059    /**
1060     * Approximate number of reclaimed bytes after the recent garbage
1061     * collection.
1062     */
1063    GC_word bytes_reclaimed_since_gc;
1064  
1065    /**
1066     * Approximate number of bytes reclaimed before the recent garbage
1067     * collection.  The value may wrap.
1068     */
1069    GC_word reclaimed_bytes_before_gc;
1070  
1071    /**
1072     * Number of bytes freed explicitly since the recent garbage collection.
1073     * Same as the value returned by `GC_get_expl_freed_bytes_since_gc()`.
1074     */
1075    GC_word expl_freed_bytes_since_gc;
1076  
1077    /** Total amount of memory obtained from OS, in bytes. */
1078    GC_word obtained_from_os_bytes;
1079  };
1080  
1081  /**
1082   * Atomically get the collector statistics (various global counters).
1083   * Clients should pass the size of the buffer (of `GC_prof_stats_s` type)
1084   * to fill in the values - this is for interoperability between different
1085   * collector versions: an old client could have fewer fields, and vice
1086   * versa, client could use newer `gc.h` file (with more entries declared
1087   * in the structure) than that of the linked collector library; in the
1088   * latter case, unsupported (unknown) fields are filled in with -1 (`~0`).
1089   * Return the size (in bytes) of the filled in part of the structure
1090   * (excluding all unknown fields, if any).
1091   */
1092  GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s *,
1093                                          size_t /* `stats_sz` */);
1094  #ifdef GC_THREADS
1095  /**
1096   * Same as `GC_get_prof_stats` but unsynchronized (i.e., not holding
1097   * the allocator lock).  Clients should call it using
1098   * `GC_call_with_reader_lock()` to avoid data race on multiprocessors.
1099   */
1100  GC_API size_t GC_CALL GC_get_prof_stats_unsafe(struct GC_prof_stats_s *,
1101                                                 size_t /* `stats_sz` */);
1102  #endif
1103  
1104  /**
1105   * Get the element value (converted to bytes) at a given index of
1106   * `GC_size_map` table which provides requested-to-actual allocation size
1107   * mapping.  Assumes the collector is initialized.  Returns -1 (`~0`) if
1108   * the index is out of `GC_size_map` table bounds.  Does not use
1109   * synchronization, thus clients should call it using
1110   * `GC_call_with_reader_lock()` typically to avoid data race on
1111   * multiprocessors.
1112   */
1113  GC_API size_t GC_CALL GC_get_size_map_at(int i);
1114  
1115  /**
1116   * Return the total memory use (in bytes) by all allocated blocks.
1117   * The result is equal to `GC_get_heap_size() - GC_get_free_bytes()`.
1118   * Acquires the allocator lock in the reader mode.
1119   */
1120  GC_API GC_word GC_CALL GC_get_memory_use(void);
1121  
1122  /**
1123   * Disable garbage collection.  Even `GC_gcollect()` calls will be
1124   * ineffective.
1125   */
1126  GC_API void GC_CALL GC_disable(void);
1127  
1128  /**
1129   * Return 1 (true) if the garbage collection is disabled (i.e., the
1130   * value of `GC_dont_gc` is nonzero), 0 otherwise.  Does not acquire
1131   * the allocator lock.
1132   */
1133  GC_API int GC_CALL GC_is_disabled(void);
1134  
1135  /**
1136   * Try to re-enable garbage collection.  `GC_disable()` and `GC_enable()`
1137   * calls could be nested.  Garbage collection is enabled if the number of
1138   * calls to both functions is equal.
1139   */
1140  GC_API void GC_CALL GC_enable(void);
1141  
1142  /**
1143   * Select whether to use the manual VDB (virtual dirty bits) mode for
1144   * the incremental collection.  Has no effect if called after enabling
1145   * the incremental collection.  The default value is off unless the
1146   * collector is compiled with `MANUAL_VDB` macro defined.  The manual
1147   * VDB mode should be used only if the client has the appropriate
1148   * `GC_END_STUBBORN_CHANGE()` and `GC_reachable_here()` (or,
1149   * alternatively, `GC_PTR_STORE_AND_DIRTY()`) calls (to ensure proper
1150   * write barriers).  The setter and the getter are not synchronized.
1151   */
1152  GC_API void GC_CALL GC_set_manual_vdb_allowed(int);
1153  GC_API int GC_CALL GC_get_manual_vdb_allowed(void);
1154  
1155  /*
1156   * The constants to represent available VDB (virtual dirty bits)
1157   * techniques.
1158   */
1159  
1160  /** Means the incremental mode is unsupported. */
1161  #define GC_VDB_NONE 0
1162  
1163  #define GC_VDB_MPROTECT 0x1
1164  
1165  /** Means `GC_set_manual_vdb_allowed(1)` has effect. */
1166  #define GC_VDB_MANUAL 0x2
1167  
1168  /** Means no other technique is usable. */
1169  #define GC_VDB_DEFAULT 0x4
1170  
1171  #define GC_VDB_GWW 0x8
1172  
1173  #define GC_VDB_PROC 0x20
1174  #define GC_VDB_SOFT 0x40
1175  
1176  /**
1177   * Get the list of available VDB (virtual dirty bits) techniques.
1178   * The returned value is a constant one, either `GC_VDB_NONE`, or one
1179   * or more of the above `GC_VDB_` constants, or'ed together.  May be
1180   * called before the collector is initialized.
1181   */
1182  GC_API unsigned GC_CALL GC_get_supported_vdbs(void);
1183  
1184  /**
1185   * Enable incremental/generational collection.  Not advisable unless
1186   * dirty bits are available or most heap objects are pointer-free
1187   * (atomic) or immutable.  Do not use in the find-leak mode.
1188   * Ignored if `GC_dont_gc` is nonzero.  Only the generational piece of
1189   * this is functional if `GC_time_limit` is set to `GC_TIME_UNLIMITED`.
1190   * Causes thread-local variant of `GC_gcj_malloc()` to revert to locked
1191   * allocation.  Must be called before any such `GC_gcj_malloc()` calls.
1192   * For best performance, should be called as early as possible.
1193   * On some platforms, calling it later may have adverse effects.
1194   * Safe to call before the collector initialization; it performs the
1195   * latter if not done yet.
1196   */
1197  GC_API void GC_CALL GC_enable_incremental(void);
1198  
1199  /**
1200   * Return 1 (true) if the incremental mode is on, 0 otherwise.
1201   * Does not acquire the allocator lock.
1202   */
1203  GC_API int GC_CALL GC_is_incremental_mode(void);
1204  
1205  /**
1206   * An extended variant of `GC_is_incremental_mode()` to return one of
1207   * `GC_VDB_` constants designating which VDB (virtual dirty bits)
1208   * technique is used exactly.  Does not acquire the allocator lock.
1209   */
1210  GC_API unsigned GC_CALL GC_get_actual_vdb(void);
1211  
1212  /** May protect non-atomic objects. */
1213  #define GC_PROTECTS_POINTER_HEAP 1
1214  
1215  #define GC_PROTECTS_PTRFREE_HEAP 2
1216  
1217  /* Protects static data.  But this is currently never. */
1218  #define GC_PROTECTS_STATIC_DATA 4
1219  
1220  /* Deprecated.  It is probably impractical to protect stacks. */
1221  #define GC_PROTECTS_STACK 8
1222  
1223  #define GC_PROTECTS_NONE 0
1224  
1225  /**
1226   * Does incremental mode write-protect pages?  Returns zero or
1227   * more of the above `GC_PROTECTS_` constants, or'ed together.
1228   * The collector is assumed to be initialized before this call.
1229   * The result is not affected by `GC_set_manual_vdb_allowed()`.
1230   * Call of `GC_enable_incremental()` may change the result to
1231   * `GC_PROTECTS_NONE` if some implementation is chosen at
1232   * runtime not needing to write-protect the pages.
1233   */
1234  GC_API int GC_CALL GC_incremental_protection_needs(void);
1235  
1236  /**
1237   * Force start of incremental collection.  Acquires the allocator lock.
1238   * No-op unless the incremental mode is on.
1239   */
1240  GC_API void GC_CALL GC_start_incremental_collection(void);
1241  
1242  /**
1243   * Perform some garbage collection work, if appropriate.
1244   * Return 0 if there is no more work to be done (including the
1245   * case when garbage collection is not appropriate).
1246   * Typically performs an amount of work corresponding roughly
1247   * to marking from one page.  May do more work if further
1248   * progress requires it, e.g. if incremental collection is
1249   * disabled.  It is reasonable to call this in a wait loop
1250   * until it returns 0.  If the garbage collection is disabled
1251   * but the incremental collection is already ongoing, then
1252   * perform marking anyway but not stopping the world (and
1253   * without the reclaim phase).
1254   */
1255  GC_API int GC_CALL GC_collect_a_little(void);
1256  
1257  /**
1258   * Allocate an object of size `lb` bytes.  The client guarantees that
1259   * as long as the object is live, it will be referenced by a pointer
1260   * that points to somewhere within the first GC heap block (`hblk`) of
1261   * the object.  (This should normally be declared `volatile` to prevent
1262   * the compiler from invalidating this assertion.)  This function is
1263   * only useful if a large array is being allocated.  It reduces the
1264   * chance of accidentally retaining such an array as a result of
1265   * scanning an integer that happens to be an address inside the array.
1266   * (Actually, it reduces the chance of the allocator not finding space
1267   * for such an array, since it will try hard to avoid introducing such
1268   * a false reference.)  On a SunOS 4.x or Windows system this is
1269   * recommended for arrays likely to be larger than 100 KB or so.
1270   * For other systems, or if the collector is not configured to
1271   * recognize all interior pointers, the threshold is normally much
1272   * higher.  These functions are guaranteed never to return `NULL`
1273   * unless `GC_oom_fn()` returns `NULL`.
1274   */
1275  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1276      GC_malloc_ignore_off_page(size_t /* `lb` */);
1277  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1278      GC_malloc_atomic_ignore_off_page(size_t /* `lb` */);
1279  
1280  /**
1281   * Allocate `lb` bytes of pointer-free, untraced, uncollectible data.
1282   * This is normally roughly equivalent to the system `malloc`.  But it
1283   * may be useful if `malloc` is redefined.  The function (including its
1284   * debug variant) is guaranteed never to return `NULL` unless
1285   * `GC_oom_fn()` returns `NULL`.  Defined only if the library has been
1286   * compiled with `GC_ATOMIC_UNCOLLECTABLE` macro defined.
1287   */
1288  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1289      GC_malloc_atomic_uncollectable(size_t /* `size_in_bytes` */);
1290  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1291      GC_debug_malloc_atomic_uncollectable(size_t, GC_EXTRA_PARAMS);
1292  
1293  /**
1294   * Debugging (annotated) allocation.  `GC_gcollect()` will check
1295   * objects allocated in this way for overwrites, etc.
1296   */
1297  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1298      GC_debug_malloc(size_t /* `size_in_bytes` */, GC_EXTRA_PARAMS);
1299  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1300      GC_debug_malloc_atomic(size_t /* `size_in_bytes` */, GC_EXTRA_PARAMS);
1301  GC_API GC_ATTR_MALLOC char *GC_CALL GC_debug_strdup(const char *,
1302                                                      GC_EXTRA_PARAMS);
1303  GC_API GC_ATTR_MALLOC char *GC_CALL GC_debug_strndup(const char *, size_t,
1304                                                       GC_EXTRA_PARAMS)
1305      GC_ATTR_NONNULL(1);
1306  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1307      GC_debug_malloc_uncollectable(size_t /* `size_in_bytes` */,
1308                                    GC_EXTRA_PARAMS);
1309  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1310      GC_debug_malloc_ignore_off_page(size_t /* `size_in_bytes` */,
1311                                      GC_EXTRA_PARAMS);
1312  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1313      GC_debug_malloc_atomic_ignore_off_page(size_t /* `size_in_bytes` */,
1314                                             GC_EXTRA_PARAMS);
1315  
1316  /**
1317   * The functions that allocate objects with debug information (like the
1318   * above), but just fill in dummy file and line number information.
1319   * Thus they can serve as drop-in `malloc`/`realloc` replacements.
1320   * This can be useful for two reasons:
1321   *   1. It allows the collector to be built with `DBG_HDRS_ALL` macro
1322   *      defined even if some allocation calls come from 3rd-party
1323   *      libraries that cannot be recompiled.
1324   *   2. On some platforms, the file and line information is redundant,
1325   *      since it can be reconstructed from a stack trace.  On such
1326   *      platforms it may be more convenient not to recompile, e.g. for
1327   *      leak detection.  This can be accomplished by instructing the
1328   *      linker to replace `malloc`/`realloc` with these.
1329   *
1330   * Note that these functions (for a nonzero new size in case of
1331   * `realloc`) are guaranteed never to return `NULL` unless
1332   * `GC_oom_fn()` returns `NULL`.
1333   */
1334  GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void *GC_CALL
1335      GC_debug_malloc_replacement(size_t /* `size_in_bytes` */);
1336  GC_API /* `realloc` attribute */ GC_ATTR_ALLOC_SIZE(2) void *GC_CALL
1337      GC_debug_realloc_replacement(void * /* `obj` */,
1338                                   size_t /* `size_in_bytes` */);
1339  
1340  #ifdef __cplusplus
1341  #  define GC_CAST_AWAY_CONST_PVOID(p) \
1342      reinterpret_cast</* no const */ void *>(reinterpret_cast<GC_uintptr_t>(p))
1343  #else
1344  #  define GC_CAST_AWAY_CONST_PVOID(p) \
1345      ((/* no const */ void *)(GC_uintptr_t)(p))
1346  #endif
1347  
1348  /**
1349   * Convenient macros for disappearing links registration working both
1350   * for debug and non-debug allocated objects, and accepting interior
1351   * pointers to object.
1352   */
1353  #define GC_GENERAL_REGISTER_DISAPPEARING_LINK_SAFE(link, obj) \
1354    GC_general_register_disappearing_link(                      \
1355        link, GC_base(GC_CAST_AWAY_CONST_PVOID(obj)))
1356  #define GC_REGISTER_LONG_LINK_SAFE(link, obj) \
1357    GC_register_long_link(link, GC_base(GC_CAST_AWAY_CONST_PVOID(obj)))
1358  
1359  /*
1360   * Convenient macros over debug and non-debug allocation functions.
1361   * All these macros (`GC_MALLOC`, `GC_REALLOC`, `GC_MALLOC_ATOMIC`,
1362   * `GC_STRDUP`, `GC_STRNDUP`, `GC_MALLOC_ATOMIC_UNCOLLECTABLE`,
1363   * `GC_MALLOC_UNCOLLECTABLE`, `GC_MALLOC_IGNORE_OFF_PAGE`,
1364   * `GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE`) are guaranteed never to return
1365   * `NULL` (for a nonzero new size in case of `GC_REALLOC`) unless
1366   * `GC_oom_fn()` returns `NULL`.
1367   */
1368  #ifdef GC_DEBUG_REPLACEMENT
1369  #  define GC_MALLOC(sz) GC_debug_malloc_replacement(sz)
1370  #  define GC_REALLOC(old, sz) GC_debug_realloc_replacement(old, sz)
1371  #elif defined(GC_DEBUG)
1372  #  define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
1373  #  define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
1374  #else
1375  #  define GC_MALLOC(sz) GC_malloc(sz)
1376  #  define GC_REALLOC(old, sz) GC_realloc(old, sz)
1377  #endif /* !GC_DEBUG_REPLACEMENT && !GC_DEBUG */
1378  #ifdef GC_DEBUG
1379  #  define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
1380  #  define GC_STRDUP(s) GC_debug_strdup(s, GC_EXTRAS)
1381  #  define GC_STRNDUP(s, sz) GC_debug_strndup(s, sz, GC_EXTRAS)
1382  #  define GC_MALLOC_ATOMIC_UNCOLLECTABLE(sz) \
1383      GC_debug_malloc_atomic_uncollectable(sz, GC_EXTRAS)
1384  #  define GC_MALLOC_UNCOLLECTABLE(sz) \
1385      GC_debug_malloc_uncollectable(sz, GC_EXTRAS)
1386  #  define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
1387      GC_debug_malloc_ignore_off_page(sz, GC_EXTRAS)
1388  #  define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
1389      GC_debug_malloc_atomic_ignore_off_page(sz, GC_EXTRAS)
1390  #else
1391  #  define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
1392  #  define GC_STRDUP(s) GC_strdup(s)
1393  #  define GC_STRNDUP(s, sz) GC_strndup(s, sz)
1394  #  define GC_MALLOC_ATOMIC_UNCOLLECTABLE(sz) GC_malloc_atomic_uncollectable(sz)
1395  #  define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
1396  #  define GC_MALLOC_IGNORE_OFF_PAGE(sz) GC_malloc_ignore_off_page(sz)
1397  #  define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
1398      GC_malloc_atomic_ignore_off_page(sz)
1399  #endif /* !GC_DEBUG */
1400  
1401  #ifdef GC_DEBUG
1402  #  define GC_FREE(p) GC_debug_free(p)
1403  #  define GC_REGISTER_FINALIZER(p, f, d, of, od) \
1404      GC_debug_register_finalizer(p, f, d, of, od)
1405  #  define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
1406      GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
1407  #  define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
1408      GC_debug_register_finalizer_no_order(p, f, d, of, od)
1409  #  define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
1410      GC_debug_register_finalizer_unreachable(p, f, d, of, od)
1411  #  define GC_TOGGLEREF_ADD(p, is_strong) GC_debug_toggleref_add(p, is_strong)
1412  #  define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
1413  #  define GC_PTR_STORE_AND_DIRTY(p, q) GC_debug_ptr_store_and_dirty(p, q)
1414  #  define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
1415      GC_GENERAL_REGISTER_DISAPPEARING_LINK_SAFE(link, obj)
1416  #  define GC_REGISTER_LONG_LINK(link, obj) \
1417      GC_REGISTER_LONG_LINK_SAFE(link, obj)
1418  #  define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
1419  #else
1420  #  define GC_FREE(p) GC_free(p)
1421  #  define GC_REGISTER_FINALIZER(p, f, d, of, od) \
1422      GC_register_finalizer(p, f, d, of, od)
1423  #  define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
1424      GC_register_finalizer_ignore_self(p, f, d, of, od)
1425  #  define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
1426      GC_register_finalizer_no_order(p, f, d, of, od)
1427  #  define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
1428      GC_register_finalizer_unreachable(p, f, d, of, od)
1429  #  define GC_TOGGLEREF_ADD(p, is_strong) GC_toggleref_add(p, is_strong)
1430  #  define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
1431  #  define GC_PTR_STORE_AND_DIRTY(p, q) GC_ptr_store_and_dirty(p, q)
1432  #  define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
1433      GC_general_register_disappearing_link(link, obj)
1434  #  define GC_REGISTER_LONG_LINK(link, obj) GC_register_long_link(link, obj)
1435  #  define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
1436  #endif /* !GC_DEBUG */
1437  
1438  /**
1439   * Convenient macros for object allocation in C++ style.  The use of
1440   * them also reduces the chance for a misspecified size argument.
1441   * But, note, that they may expand to something syntactically incorrect
1442   * if the argument is a complicated type expression.  Note also, unlike
1443   * C++ new operator, these ones may return `NULL` (in case of out of
1444   * memory); however these macros are guaranteed never to return `NULL`
1445   * unless `GC_oom_fn()` returns `NULL`.
1446   */
1447  #define GC_NEW(t) ((t *)GC_MALLOC(sizeof(t)))
1448  #define GC_NEW_ATOMIC(t) ((t *)GC_MALLOC_ATOMIC(sizeof(t)))
1449  #define GC_NEW_UNCOLLECTABLE(t) ((t *)GC_MALLOC_UNCOLLECTABLE(sizeof(t)))
1450  
1451  #ifdef GC_REQUIRE_WCSDUP
1452  /**
1453   * Same as `GC_strdup` but for a `wchar_t` string.  Might be
1454   * unavailable on some targets (or not needed).  `wchar_t` type should
1455   * be defined in the platform `stddef.h` file.  The function (including
1456   * its debug variant) is guaranteed never to return `NULL` unless
1457   * `GC_oom_fn()` returns `NULL`.
1458   */
1459  GC_API GC_ATTR_MALLOC wchar_t *GC_CALL GC_wcsdup(const wchar_t *)
1460      GC_ATTR_NONNULL(1);
1461  GC_API GC_ATTR_MALLOC wchar_t *GC_CALL GC_debug_wcsdup(const wchar_t *,
1462                                                         GC_EXTRA_PARAMS)
1463      GC_ATTR_NONNULL(1);
1464  #  ifdef GC_DEBUG
1465  #    define GC_WCSDUP(s) GC_debug_wcsdup(s, GC_EXTRAS)
1466  #  else
1467  #    define GC_WCSDUP(s) GC_wcsdup(s)
1468  #  endif
1469  #endif /* GC_REQUIRE_WCSDUP */
1470  
1471  /*
1472   * The finalization.  Some of these primitives are grossly unsafe.
1473   * The idea is to make them both cheap, and sufficient to build
1474   * a safer layer, closer to Modula-3, Java, or PCedar finalization.
1475   * The interface represents my conclusions from a long discussion
1476   * with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes,
1477   * Christian Jacobi, and Russ Atkinson.  It is not perfect, and
1478   * probably nobody else agrees with it.
1479   */
1480  
1481  typedef void(GC_CALLBACK *GC_finalization_proc)(void * /* `obj` */,
1482                                                  void * /* `client_data` */);
1483  
1484  /**
1485   * When `obj` is no longer accessible, invoke `(*fn)(obj, cd)`.  If `a`
1486   * and `b` are inaccessible, and `a` points to `b` (after disappearing
1487   * links have been made to disappear), then only `a` will be finalized.
1488   * (If this does not create any new pointers to `b`, then `b` will be
1489   * finalized after the next collection.)  Any finalizable object that
1490   * is reachable from itself by following one or more pointers will not
1491   * be finalized (or collected).  Thus cycles involving finalizable
1492   * objects should be avoided, or broken by disappearing links.  All but
1493   * the last finalizer registered for an object is ignored.  No-op in
1494   * the find-leak mode.  Finalization may be removed by passing 0 as
1495   * `fn`.  Finalizers are implicitly unregistered when they are enqueued
1496   * for finalization (i.e. become ready to be finalized).  The old
1497   * finalizer and client data are stored in `*ofn` and `*ocd`,
1498   * respectively.  (`ofn` and/or `ocd` may be `NULL`.  The allocator
1499   * lock is held while `*ofn` and `*ocd` are updated.  In case of error
1500   * (no memory to register new finalizer), `*ofn` and `*ocd` remain
1501   * unchanged.)  `fn` is never invoked on an accessible object, provided
1502   * hidden pointers are converted to real pointers only if the allocator
1503   * lock is held, at least in the reader mode, and such conversions are
1504   * not performed by finalization routines.
1505   * If `GC_register_finalizer()` is aborted as a result of a signal,
1506   * then the object may be left with no finalization, even if neither
1507   * the old nor new finalizer were `NULL`.  `obj` should be the starting
1508   * address of an object allocated by `GC_malloc` or friends. `obj` may
1509   * also be `NULL` or point to something outside the collector heap (in
1510   * this case, `fn` is ignored, `*ofn` and `*ocd` are set to `NULL`).
1511   * Note that any garbage collectible object referenced by `cd` will be
1512   * considered accessible until the finalizer is invoked.
1513   */
1514  GC_API void GC_CALL GC_register_finalizer(void * /* `obj` */,
1515                                            GC_finalization_proc /* `fn` */,
1516                                            void * /* `cd` */,
1517                                            GC_finalization_proc * /* `ofn` */,
1518                                            void ** /* `ocd` */)
1519      GC_ATTR_NONNULL(1);
1520  GC_API void GC_CALL GC_debug_register_finalizer(
1521      void * /* `obj` */, GC_finalization_proc /* `fn` */, void * /* `cd` */,
1522      GC_finalization_proc * /* `ofn` */, void ** /* `ocd` */)
1523      GC_ATTR_NONNULL(1);
1524  
1525  /**
1526   * Another variant of `GC_register_finalizer` but ignoring self-cycles,
1527   * i.e. pointers from a finalizable object to itself.  There is
1528   * a stylistic argument that this is wrong, but it is unavoidable for
1529   * C++, since the compiler may silently introduce these.  It is also
1530   * benign in that specific case.  And it helps if finalizable objects
1531   * are split to avoid cycles.  Note that `cd` will still be viewed as
1532   * accessible, even if it refers to the object itself.
1533   */
1534  GC_API void GC_CALL GC_register_finalizer_ignore_self(
1535      void * /* `obj` */, GC_finalization_proc /* `fn` */, void * /* `cd` */,
1536      GC_finalization_proc * /* `ofn` */, void ** /* `ocd` */)
1537      GC_ATTR_NONNULL(1);
1538  GC_API void GC_CALL GC_debug_register_finalizer_ignore_self(
1539      void * /* `obj` */, GC_finalization_proc /* `fn` */, void * /* `cd` */,
1540      GC_finalization_proc * /* `ofn` */, void ** /* `ocd` */)
1541      GC_ATTR_NONNULL(1);
1542  
1543  /**
1544   * Another variant of `GC_register_finalizer` which ignores all cycles.
1545   * It should probably only be used by Java implementations.  Note that
1546   * `cd` will still be viewed as accessible, even if it refers to the
1547   * object itself.
1548   */
1549  GC_API void GC_CALL GC_register_finalizer_no_order(
1550      void * /* `obj` */, GC_finalization_proc /* `fn` */, void * /* `cd` */,
1551      GC_finalization_proc * /* `ofn` */, void ** /* `ocd` */)
1552      GC_ATTR_NONNULL(1);
1553  GC_API void GC_CALL GC_debug_register_finalizer_no_order(
1554      void * /* `obj` */, GC_finalization_proc /* `fn` */, void * /* `cd` */,
1555      GC_finalization_proc * /* `ofn` */, void ** /* `ocd` */)
1556      GC_ATTR_NONNULL(1);
1557  
1558  /**
1559   * This is a special finalizer that is useful when an object's finalizer
1560   * must be run when the object is known to be no longer reachable, not even
1561   * from other finalizable objects.  It behaves like "normal" finalization,
1562   * except that the finalizer is not run while the object is reachable from
1563   * other objects specifying unordered finalization.  Effectively it allows
1564   * an object referenced, possibly indirectly, from an unordered finalizable
1565   * object to override the unordered finalization request.  This can be used
1566   * in combination with `GC_register_finalizer_no_order` so as to release
1567   * resources that must not be released while an object can still be brought
1568   * back to life by other finalizers.  Only works if `GC_java_finalization`
1569   * is set.  Probably only of interest when implementing a language that
1570   * requires unordered finalization (e.g. Java, C#).
1571   */
1572  GC_API void GC_CALL GC_register_finalizer_unreachable(
1573      void * /* `obj` */, GC_finalization_proc /* `fn` */, void * /* `cd` */,
1574      GC_finalization_proc * /* `ofn` */, void ** /* `ocd` */)
1575      GC_ATTR_NONNULL(1);
1576  GC_API void GC_CALL GC_debug_register_finalizer_unreachable(
1577      void * /* `obj` */, GC_finalization_proc /* `fn` */, void * /* `cd` */,
1578      GC_finalization_proc * /* `ofn` */, void ** /* `ocd` */)
1579      GC_ATTR_NONNULL(1);
1580  
1581  /** A constant indicating a failure due to lack of memory. */
1582  #define GC_NO_MEMORY 2
1583  
1584  /**
1585   * This routine may be used to break cycles between finalizable objects,
1586   * thus causing cyclic finalizable objects to be finalized in
1587   * the correct order.  The standard use involves calling
1588   * `GC_register_disappearing_link(&p)`, where `p` is a pointer that is
1589   * not followed by finalization code, and should not be considered in
1590   * determining finalization order.  `link` should point to a field of
1591   * a heap-allocated object.  `*link` will be cleared when the object is
1592   * found to be inaccessible.  This happens before any finalization code
1593   * is invoked, and before any decisions about finalization order are
1594   * made.  This is useful in telling the finalizer that some pointers
1595   * are not essential for proper finalization.  This may avoid
1596   * finalization cycles.  Note that the object may be resurrected by
1597   * another finalizer, and thus the clearing of `*link` may be visible
1598   * to non-finalization code.  There is an argument that an arbitrary
1599   * action should be allowed here, instead of just clearing a pointer.
1600   * But this causes problems if that action alters, or examines
1601   * connectivity.  Returns `GC_DUPLICATE` if given `link` was already
1602   * registered, `GC_SUCCESS` if registration succeeded, `GC_NO_MEMORY`
1603   * if it failed for lack of memory (and `GC_oom_fn` did not handle the
1604   * problem).  Only exists for backward compatibility, use
1605   * `GC_general_register_disappearing_link()` instead.
1606   */
1607  GC_API int GC_CALL GC_register_disappearing_link(void ** /* `link` */)
1608      GC_ATTR_NONNULL(1);
1609  
1610  /**
1611   * A slight generalization of `GC_register_disappearing_link`.
1612   * `*link` is cleared when `obj` first becomes inaccessible.  This can be
1613   * used to implement weak pointers easily and safely.  Typically `link`
1614   * will point to a location (in a GC-allocated object or not) holding
1615   * a disguised pointer to `obj`.  (A pointer inside an "atomic" object
1616   * is effectively disguised.)  In this way, weak pointers are broken
1617   * before any object reachable from them gets finalized.  Each `link`
1618   * may be registered only with one `obj` value, i.e. all objects but
1619   * the last one (`link` registered with) are ignored.  `link` must be
1620   * non-`NULL` (and be properly aligned).  `obj` must be a pointer to
1621   * the beginning of an object allocated by `GC_malloc` or friends.
1622   * A link disappears when it is unregistered manually, or when `*link`
1623   * is cleared, or when the object containing this `link` is garbage
1624   * collected.  It is unsafe to explicitly deallocate the object
1625   * containing `link`.  Explicit deallocation of `obj` may or may not
1626   * cause `link` to eventually be cleared.  No-op in the find-leak mode.
1627   * This function can be used to implement certain types of weak pointers.
1628   * Note, however, this generally requires that the allocator lock is held,
1629   * at least in the reader mode (e.g. using `GC_call_with_reader_lock()`),
1630   * when the disguised pointer is accessed.  Otherwise a strong pointer
1631   * could be recreated between the time the collector decides to reclaim
1632   * the object and the link is cleared.  Returns `GC_SUCCESS` if
1633   * registration succeeded (a new link is registered), `GC_DUPLICATE` if
1634   * `link` was already registered (with some object), `GC_NO_MEMORY` if
1635   * registration failed for lack of memory (and `GC_oom_fn` did not handle
1636   * the problem), `GC_UNIMPLEMENTED` if `GC_find_leak` is true.
1637   */
1638  GC_API int GC_CALL GC_general_register_disappearing_link(
1639      void ** /* `link` */, const void * /* `obj` */) GC_ATTR_NONNULL(1)
1640      GC_ATTR_NONNULL(2);
1641  
1642  /**
1643   * Moves a `link` previously registered via
1644   * `GC_general_register_disappearing_link` (or
1645   * `GC_register_disappearing_link`).  Does not change the target object
1646   * of the weak reference.  Does not change `*new_link` content.  May be
1647   * called with `new_link` equal to `link` (to check whether `link` has
1648   * been registered).  Returns `GC_SUCCESS` on success, `GC_DUPLICATE`
1649   * if there is already another disappearing link at the new location
1650   * (never returned if `new_link` is equal to `link`), `GC_NOT_FOUND`
1651   * if no `link` is registered at the original location.
1652   */
1653  GC_API int GC_CALL GC_move_disappearing_link(void ** /* `link` */,
1654                                               void ** /* `new_link` */)
1655      GC_ATTR_NONNULL(2);
1656  
1657  /**
1658   * Undoes a registration by either `GC_register_disappearing_link()` or
1659   * `GC_general_register_disappearing_link()`.  Returns 0 if `link` was
1660   * not actually registered (otherwise returns 1).
1661   */
1662  GC_API int GC_CALL GC_unregister_disappearing_link(void ** /* `link` */);
1663  
1664  /**
1665   * Similar to `GC_general_register_disappearing_link` but `*link` only
1666   * gets cleared when `obj` becomes truly inaccessible.  An object becomes
1667   * truly inaccessible when it can no longer be resurrected from its
1668   * finalizer (e.g. by assigning itself to a pointer traceable from root).
1669   * This can be used to implement "long" weak pointers easily and safely.
1670   */
1671  GC_API int GC_CALL GC_register_long_link(void ** /* `link` */,
1672                                           const void * /* `obj` */)
1673      GC_ATTR_NONNULL(1) GC_ATTR_NONNULL(2);
1674  
1675  /**
1676   * Similar to `GC_move_disappearing_link` but for a `link` previously
1677   * registered via `GC_register_long_link`.
1678   */
1679  GC_API int GC_CALL GC_move_long_link(void ** /* `link` */,
1680                                       void ** /* `new_link` */)
1681      GC_ATTR_NONNULL(2);
1682  
1683  /**
1684   * Similar to `GC_unregister_disappearing_link` but for `link`
1685   * registration done by `GC_register_long_link()`.
1686   */
1687  GC_API int GC_CALL GC_unregister_long_link(void ** /* `link` */);
1688  
1689  /*
1690   * Support of "toggle-refs" style of external memory management without
1691   * hooking up to the host retain/release machinery.  The idea of
1692   * "toggle-refs" is that an external reference to an object is kept and
1693   * it can be either a strong or weak reference; a weak reference is
1694   * used when the external peer has no interest in the object, and
1695   * a strong otherwise.
1696   */
1697  
1698  typedef enum {
1699    GC_TOGGLE_REF_DROP,
1700    GC_TOGGLE_REF_STRONG,
1701    GC_TOGGLE_REF_WEAK
1702  } GC_ToggleRefStatus;
1703  
1704  /**
1705   * The callback is to decide (return) the new state of a given object.
1706   * Invoked by the collector for all objects registered for "toggle-refs"
1707   * processing.  Invoked with the allocator lock held but the world is
1708   * running.
1709   */
1710  typedef GC_ToggleRefStatus(GC_CALLBACK *GC_toggleref_func)(void * /* `obj` */);
1711  
1712  /**
1713   * Set (register) a callback that decides the state of a given object (by,
1714   * probably, inspecting its native state).  The argument may be 0 (means
1715   * no callback).  Both the setter and the getter acquire the allocator lock
1716   * (in the reader mode in case of the getter).
1717   */
1718  GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func);
1719  GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void);
1720  
1721  /**
1722   * Register a given object for "toggle-refs" processing.  It will be
1723   * stored internally and the "toggle-refs" callback will be invoked on
1724   * the object until the callback returns `GC_TOGGLE_REF_DROP` or the
1725   * object is collected.  If `is_strong`, then the object is registered
1726   * with a strong ref, a weak one otherwise.  `obj` should be the starting
1727   * address of an object allocated by `GC_malloc` (`GC_debug_malloc`)
1728   * or friends.  Returns `GC_SUCCESS` if registration succeeded (or no
1729   * callback is registered yet), `GC_NO_MEMORY` if it failed for a lack
1730   * of memory reason.
1731   */
1732  GC_API int GC_CALL GC_toggleref_add(void * /* `obj` */, int /* `is_strong` */)
1733      GC_ATTR_NONNULL(1);
1734  GC_API int GC_CALL GC_debug_toggleref_add(void * /* `obj` */,
1735                                            int /* `is_strong` */)
1736      GC_ATTR_NONNULL(1);
1737  
1738  /**
1739   * Finalizer callback support.  Invoked by the collector (with the allocator
1740   * lock held) for each unreachable object enqueued for finalization.
1741   * Zero means no callback.  The setter and the getter acquire the allocator
1742   * lock too (in the reader mode in case of the getter).
1743   */
1744  typedef void(GC_CALLBACK *GC_await_finalize_proc)(void * /* `obj` */);
1745  GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc);
1746  GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void);
1747  
1748  /**
1749   * Returns a nonzero value (true) if `GC_invoke_finalizers()` has
1750   * something to do.  (Useful if finalizers can only be called from some
1751   * kind of "safe state" and getting into that safe state is expensive.)
1752   * Does not use any synchronization.
1753   */
1754  GC_API int GC_CALL GC_should_invoke_finalizers(void);
1755  
1756  /**
1757   * Set maximum amount of finalizers to run during a single invocation
1758   * of `GC_invoke_finalizers()`.  Zero means no limit.  Both the setter
1759   * and the getter acquire the allocator lock (in the reader mode in
1760   * case of the getter).  Note that invocation of `GC_finalize_all()`
1761   * resets the maximum amount value.
1762   */
1763  GC_API void GC_CALL GC_set_interrupt_finalizers(unsigned);
1764  GC_API unsigned GC_CALL GC_get_interrupt_finalizers(void);
1765  
1766  /**
1767   * Run finalizers for all objects that are ready to be finalized.
1768   * Return the number of finalizers that were run.  Normally this is
1769   * also called implicitly during some allocations.
1770   * If `GC_finalize_on_demand` is nonzero, it must be called explicitly.
1771   */
1772  GC_API int GC_CALL GC_invoke_finalizers(void);
1773  
1774  /*
1775   * Explicitly tell the collector that an object is reachable
1776   * at a particular program point.  This prevents the argument
1777   * pointer from being optimized away, even it is otherwise no
1778   * longer needed.  It should have no visible effect in the
1779   * absence of finalizers or disappearing links.  But it may be
1780   * needed to prevent finalizers from running while the
1781   * associated external resource is still in use.
1782   * The function is sometimes called to keep some object alive.
1783   */
1784  #if defined(__GNUC__) && !defined(__INTEL_COMPILER) \
1785      && !(defined(__APPLE__) && defined(__arm__) && defined(__TINYC__))
1786  /* TCC (as of v0.9.28rc) does not support asm on macOS/arm. */
1787  #  if defined(__CHERI_PURE_CAPABILITY__) || defined(__TINYC__)
1788  #    define GC_reachable_here(ptr) \
1789        __asm__ __volatile__(" " : : "g"(ptr) : "memory")
1790  #  elif defined(__e2k__)
1791  #    define GC_reachable_here(ptr) \
1792        __asm__ __volatile__(" " : : "r"(ptr) : "memory")
1793  #  else
1794  #    define GC_reachable_here(ptr) \
1795        __asm__ __volatile__(" " : : "X"(ptr) : "memory")
1796  #  endif
1797  #elif defined(LINT2)
1798  /* The definition is similar to that of `COVERT_DATAFLOW()`. */
1799  #  define GC_reachable_here(ptr) GC_noop1(~(GC_word)(ptr) ^ (~(GC_word)0))
1800  #else
1801  #  define GC_reachable_here(ptr) GC_noop1_ptr(GC_CAST_AWAY_CONST_PVOID(ptr))
1802  #endif
1803  
1804  /**
1805   * Make the argument of `GC_word` type appear live to compiler.
1806   * This could be used to prevent certain compiler false positive (FP)
1807   * warnings and misoptimizations.  Should be robust against the whole
1808   * program analysis.
1809   */
1810  GC_API void GC_CALL GC_noop1(GC_word);
1811  
1812  /** Same as `GC_noop1()` but for a pointer. */
1813  GC_API void GC_CALL GC_noop1_ptr(volatile void *);
1814  
1815  /**
1816   * `GC_set_warn_proc` can be used to redirect or filter warning messages.
1817   * `p` may not be a `NULL` pointer.  `msg` is a `printf` format string
1818   * (`arg` must match the format).  Both the setter and the getter acquire
1819   * the allocator lock (in the reader mode in case of the getter) to avoid
1820   * data race.  In GC v7.1 and before: the setter returned the value of
1821   * old `warn_proc`.  In GC v8.2.x and before: `msg` pointer type had
1822   * no `const` qualifier.
1823   */
1824  typedef void(GC_CALLBACK *GC_warn_proc)(const char * /* `msg` */,
1825                                          GC_uintptr_t /* `arg` */);
1826  GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc /* `p` */)
1827      GC_ATTR_NONNULL(1);
1828  GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void);
1829  
1830  /**
1831   * `GC_ignore_warn_proc` may be used as an argument for `GC_set_warn_proc()`
1832   * to suppress all warnings (unless statistics printing is turned on).
1833   * This is recommended for production code (release).
1834   */
1835  GC_API void GC_CALLBACK GC_ignore_warn_proc(const char *, GC_uintptr_t);
1836  
1837  /**
1838   * Change file descriptor of the collector log.  Unavailable on some
1839   * targets.
1840   */
1841  GC_API void GC_CALL GC_set_log_fd(int);
1842  
1843  /**
1844   * This is invoked on the collector fatal aborts (just before
1845   * OS-dependent `abort()` or `exit(1)` is called).  Must be non-`NULL`.
1846   * The default one outputs `msg` to the platform `stderr` provided
1847   * `msg` is non-`NULL`.  `msg` is `NULL` if invoked before `exit(1)`
1848   * otherwise `msg` is non-`NULL` (i.e., if invoked before `abort`).
1849   * Both the setter and the getter acquire the allocator lock (in the reader
1850   * mode in case of the getter).  The setter does not change `GC_abort_func`
1851   * if the library has been compiled with `SMALL_CONFIG` macro defined.
1852   */
1853  typedef void(GC_CALLBACK *GC_abort_func)(const char * /* `msg` */);
1854  GC_API void GC_CALL GC_set_abort_func(GC_abort_func) GC_ATTR_NONNULL(1);
1855  GC_API GC_abort_func GC_CALL GC_get_abort_func(void);
1856  
1857  /*
1858   * Clients should define `GC_EXIT_LACKS_NORETURN` macro if the platform
1859   * `exit()` does not have a `noreturn` attribute.
1860   */
1861  #ifdef GC_EXIT_LACKS_NORETURN
1862  #  define GC_OOM_ABORT_THROW_ATTRIBUTE /*< empty */
1863  #else
1864  #  define GC_OOM_ABORT_THROW_ATTRIBUTE GC_ATTR_NORETURN
1865  #endif
1866  
1867  /** A portable way to abort the application because of not enough memory. */
1868  GC_API GC_OOM_ABORT_THROW_ATTRIBUTE void GC_CALL GC_abort_on_oom(void);
1869  
1870  /*
1871   * The following is intended to be used by a higher level (e.g.
1872   * Java-like) finalization facility.  It is expected that finalization
1873   * code will arrange for hidden pointers to disappear.  Otherwise,
1874   * objects can be accessed after they have been collected.  Should not
1875   * be used in the find-leak mode.
1876   * Note that putting pointers in atomic objects or in non-pointer slots
1877   * of "typed" objects is equivalent to disguising them in this way, and
1878   * may have other advantages.  Note also that some code relies on that
1879   * the least significant bit of the argument (including for `NULL`) is
1880   * inverted by these primitives.
1881   * Important: converting a hidden pointer to a real pointer requires
1882   * verifying that the object still exists; this should involve
1883   * acquiring the allocator lock, at least in the reader mode, to avoid
1884   * a race with the collector (e.g., one thread might fetch hidden link
1885   * value, while another thread might collect the relevant object and
1886   * reuse the free space for another object).
1887   */
1888  typedef GC_uintptr_t GC_hidden_pointer;
1889  #define GC_HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
1890  #define GC_REVEAL_POINTER(p) ((void *)GC_HIDE_POINTER(p))
1891  
1892  #if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
1893  /*
1894   * This exists only for compatibility (the GC-prefixed symbols are
1895   * preferred for new code).
1896   */
1897  #  define HIDE_POINTER(p) GC_HIDE_POINTER(p)
1898  #  define REVEAL_POINTER(p) GC_REVEAL_POINTER(p)
1899  #endif
1900  
1901  /*
1902   * A slightly modified variant of `GC_HIDE_POINTER` which guarantees
1903   * not to "hide" `NULL` (i.e. passing zero argument gives zero result).
1904   * This might be useful in conjunction with `GC_register_disappearing_link`.
1905   * Note that unlike `GC_HIDE_POINTER`, inversion of the least significant
1906   * bit of the argument is not guaranteed.
1907   */
1908  #if defined(__CHERI_PURE_CAPABILITY__)
1909  #  define GC_HIDE_NZ_POINTER(p) ((GC_hidden_pointer)(-(intptr_t)(p)))
1910  #else
1911  #  define GC_HIDE_NZ_POINTER(p) ((GC_hidden_pointer)(-(GC_signed_word)(p)))
1912  #endif
1913  #define GC_REVEAL_NZ_POINTER(p) ((void *)GC_HIDE_NZ_POINTER(p))
1914  
1915  /*
1916   * The routines to acquire/release the GC (allocator) lock.
1917   * The lock is not reentrant.  `GC_alloc_unlock()` should not be called
1918   * unless the allocator lock is acquired by the current thread.
1919   */
1920  #ifdef GC_THREADS
1921  GC_API void GC_CALL GC_alloc_lock(void);
1922  GC_API void GC_CALL GC_alloc_unlock(void);
1923  #else
1924  /* No need for real locking if the client is single-threaded. */
1925  #  define GC_alloc_lock() (void)0
1926  #  define GC_alloc_unlock() (void)0
1927  #endif /* !GC_THREADS */
1928  
1929  typedef void *(GC_CALLBACK *GC_fn_type)(void * /* `client_data` */);
1930  
1931  /**
1932   * Execute given function with the allocator lock held (in the exclusive
1933   * mode).
1934   */
1935  GC_API void *GC_CALL GC_call_with_alloc_lock(GC_fn_type /* `fn` */,
1936                                               void * /* `client_data` */)
1937      GC_ATTR_NONNULL(1);
1938  
1939  /*
1940   * Execute given function with the allocator lock held in the reader
1941   * (shared) mode.  The 3rd argument (`release`), if nonzero, indicates
1942   * that `fn` might write some data that should be made visible to the
1943   * thread which acquires the allocator lock in the exclusive mode later.
1944   */
1945  #ifdef GC_THREADS
1946  GC_API void *GC_CALL GC_call_with_reader_lock(GC_fn_type /* `fn` */,
1947                                                void * /* `client_data` */,
1948                                                int /* `release` */)
1949      GC_ATTR_NONNULL(1);
1950  #else
1951  #  define GC_call_with_reader_lock(fn, cd, r) ((void)(r), (fn)(cd))
1952  #endif
1953  
1954  /*
1955   * These routines are intended to explicitly notify the collector
1956   * of new threads.  Often this is unnecessary because thread creation
1957   * is implicitly intercepted by the collector, using header-file defines,
1958   * or linker-based interception.  In the long run, the intent is to
1959   * always make redundant registration safe.  In the short run, this is
1960   * being implemented a platform at a time.  The interface is complicated
1961   * by the fact that we probably will not ever be able to automatically
1962   * determine the stack bottom for thread stacks on all platforms.
1963   */
1964  
1965  /**
1966   * Structure representing the bottom (cold end) of a thread stack.
1967   * On most platforms this contains just a single address.
1968   */
1969  struct GC_stack_base {
1970    /* The bottom of the general-purpose stack. */
1971    void *mem_base;
1972  #if defined(__e2k__) || defined(__ia64) || defined(__ia64__) \
1973      || defined(_M_IA64)
1974    /* The bottom of the register stack. */
1975    void *reg_base;
1976  #endif
1977  };
1978  
1979  typedef void *(GC_CALLBACK *GC_stack_base_func)(
1980      struct GC_stack_base * /* `sb` */, void * /* `arg` */);
1981  
1982  /**
1983   * Call a function with a stack base structure corresponding to somewhere in
1984   * the `GC_call_with_stack_base` frame.  This often can be used to provide
1985   * a sufficiently accurate stack bottom.  And we implement it everywhere.
1986   */
1987  GC_API void *GC_CALL GC_call_with_stack_base(GC_stack_base_func /* fn */,
1988                                               void * /* arg */)
1989      GC_ATTR_NONNULL(1);
1990  
1991  #define GC_SUCCESS 0
1992  
1993  /** Means was already registered. */
1994  #define GC_DUPLICATE 1
1995  
1996  /* Deprecated.  No thread support in the collector. */
1997  #define GC_NO_THREADS 2
1998  
1999  /** Not yet implemented on this platform. */
2000  #define GC_UNIMPLEMENTED 3
2001  
2002  /** Requested `link` not found (returned by `GC_move_disappearing_link()`). */
2003  #define GC_NOT_FOUND 4
2004  
2005  /**
2006   * Start the parallel marker threads, if available.  Useful, e.g.,
2007   * after POSIX `fork` in a child process (provided not followed by
2008   * `exec()`) or in single-threaded clients (provided it is OK for the
2009   * client to perform marking in parallel).  Acquires the allocator lock
2010   * to avoid a race.
2011   */
2012  GC_API void GC_CALL GC_start_mark_threads(void);
2013  
2014  #if defined(GC_DARWIN_THREADS) || defined(GC_WIN32_THREADS)
2015  /**
2016   * Use implicit thread registration and processing (via Win32 `DllMain`
2017   * or Darwin `task_threads`).  Deprecated.  Must be called before
2018   * `GC_INIT()` and other GC routines (or, at least, before going
2019   * multi-threaded).  Performs the collector initialization.  Should be
2020   * avoided if `GC_pthread_create`, `GC_beginthreadex` (or `GC_CreateThread`),
2021   * or `GC_register_my_thread` could be used instead.  Disables parallelized
2022   * garbage collection on Win32.
2023   */
2024  GC_API void GC_CALL GC_use_threads_discovery(void);
2025  #endif
2026  
2027  #ifdef GC_THREADS
2028  /**
2029   * Suggest the collector to use the specific signal to suspend threads.
2030   * Has no effect after the collector initialization and on non-POSIX systems.
2031   */
2032  GC_API void GC_CALL GC_set_suspend_signal(int);
2033  
2034  /**
2035   * Return the signal number (which is a constant after the collector
2036   * initialization) used by the collector to suspend threads on POSIX systems.
2037   * Return -1 otherwise.
2038   */
2039  GC_API int GC_CALL GC_get_suspend_signal(void);
2040  
2041  /**
2042   * Suggest the collector to use the specific signal to resume threads.
2043   * Has no effect after the collector initialization and on non-POSIX systems.
2044   * The same signal might be used for threads suspension and restart.
2045   */
2046  GC_API void GC_CALL GC_set_thr_restart_signal(int);
2047  
2048  /**
2049   * Return the signal number (which is a constant after the collector
2050   * initialization) used by the collector to restart (resume) threads on
2051   * POSIX systems.  Return -1 otherwise.
2052   */
2053  GC_API int GC_CALL GC_get_thr_restart_signal(void);
2054  
2055  /**
2056   * Explicitly enable `GC_register_my_thread()` invocation.
2057   * Done implicitly if a GC thread-creation function is called
2058   * (or implicit thread registration is activated, or the collector is
2059   * compiled with `GC_ALWAYS_MULTITHREADED` macro defined).  Otherwise,
2060   * it must be called from the main (or any previously registered)
2061   * thread between the collector initialization and the first explicit
2062   * registering of a thread (it should be called as late as possible).
2063   * Includes a `GC_start_mark_threads()` call.
2064   */
2065  GC_API void GC_CALL GC_allow_register_threads(void);
2066  
2067  /**
2068   * Register the current thread, with the indicated stack bottom, as
2069   * a new thread whose stack(s) should be traced by the collector.
2070   * If it is not implicitly called by the collector, this must be called
2071   * before a thread can allocate garbage-collected memory, or assign
2072   * pointers to the garbage-collected heap.  Once registered, a thread
2073   * will be stopped during garbage collections.  This call must be
2074   * previously enabled (see `GC_allow_register_threads`).  This should
2075   * never be called from the main thread, where it is always done
2076   * implicitly.  This is normally done implicitly if `GC_` functions are
2077   * called to create the thread, e.g. by include `gc.h` file (which
2078   * redefines some system functions) before calling the system thread
2079   * creation function.  Nonetheless, thread cleanup routines (e.g.,
2080   * `pthreads` key destructor) typically require manual thread
2081   * registering (and unregistering) if pointers to GC-allocated objects
2082   * are manipulated inside.  It is also always done implicitly on some
2083   * platforms if `GC_use_threads_discovery()` is called at start-up.
2084   * Except for the latter case, the explicit call is normally required
2085   * for threads created by third-party libraries.  A manually registered
2086   * thread requires manual unregistering.  Returns `GC_SUCCESS` on
2087   * success, `GC_DUPLICATE` if already registered.
2088   */
2089  GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *)
2090      GC_ATTR_NONNULL(1);
2091  
2092  /**
2093   * Return 1 (true) if the calling (current) thread is registered with the
2094   * garbage collector, 0 otherwise.  Acquires the allocator lock in the
2095   * reader mode.  If the thread is finished (e.g. running in a destructor
2096   * and not registered manually again), then it is considered as not
2097   * registered.
2098   */
2099  GC_API int GC_CALL GC_thread_is_registered(void);
2100  
2101  /**
2102   * Notify the collector about the stack and the alt-stack of the current
2103   * thread.  `normstack` and `normstack_size` are used to determine the
2104   * "normal" stack boundaries when a thread is suspended while it is on
2105   * an alt-stack.  Acquires the allocator lock in the reader mode.
2106   */
2107  GC_API void GC_CALL GC_register_altstack(void * /* `normstack` */,
2108                                           size_t /* `normstack_size` */,
2109                                           void * /* `altstack` */,
2110                                           size_t /* `altstack_size` */);
2111  
2112  /**
2113   * Unregister the current thread.  Only an explicitly registered thread
2114   * (i.e. for which `GC_register_my_thread()` returns `GC_SUCCESS`)
2115   * is allowed (and required) to call this function.  (As a special
2116   * exception, it is also allowed to once unregister the main thread.)
2117   * The thread may no longer allocate garbage-collected memory or
2118   * manipulate pointers to the garbage-collected heap after making this
2119   * call.  Specifically, if it wants to return or otherwise communicate
2120   * a pointer to the garbage-collected heap to another thread, it must
2121   * do this before calling `GC_unregister_my_thread`, most probably by
2122   * saving it in a global data structure.  Must not be called inside
2123   * a GC callback function (except for `GC_call_with_stack_base()` one).
2124   * Always returns `GC_SUCCESS`.
2125   */
2126  GC_API int GC_CALL GC_unregister_my_thread(void);
2127  
2128  /** Stop/start the world explicitly.  Not recommended for general use. */
2129  GC_API void GC_CALL GC_stop_world_external(void);
2130  GC_API void GC_CALL GC_start_world_external(void);
2131  
2132  /**
2133   * Provide a verifier/modifier of the stack pointer when pushing the
2134   * thread stacks.  This might be useful for a crude integration
2135   * with certain coroutine implementations.  `*sp_ptr` is the captured
2136   * stack pointer of the suspended thread with `pthread_id` (the latter
2137   * is actually of `pthread_t` type).  The functionality is unsupported
2138   * on some targets (the getter always returns 0 in such a case).
2139   * Both the setter and the getter acquire the allocator lock (in the reader
2140   * mode in case of the getter).  The client function (if provided) is called
2141   * with the allocator lock held and, might be, with the world stopped.
2142   */
2143  typedef void(GC_CALLBACK *GC_sp_corrector_proc)(void ** /* `sp_ptr` */,
2144                                                  void * /* `pthread_id` */);
2145  GC_API void GC_CALL GC_set_sp_corrector(GC_sp_corrector_proc);
2146  GC_API GC_sp_corrector_proc GC_CALL GC_get_sp_corrector(void);
2147  #endif /* GC_THREADS */
2148  
2149  /**
2150   * Wrapper for functions that are likely to block (or, at least, do not
2151   * allocate garbage-collected memory and/or manipulate pointers to the
2152   * garbage-collected heap) for an appreciable length of time.
2153   * While `fn` is running, the collector is said to be in the "inactive"
2154   * state for the current thread (this means that the thread is not
2155   * suspended and the thread's stack frames "belonging" to the functions
2156   * in the "inactive" state are not scanned during garbage collections).
2157   * It is assumed that the collector is already initialized and the
2158   * current thread is registered.  It is allowed for `fn` to call
2159   * `GC_call_with_gc_active()` (even recursively), thus temporarily
2160   * toggling the collector's state back to "active".  The latter
2161   * technique might be used to make stack scanning more precise (i.e.
2162   * scan only stack frames of functions that allocate garbage-collected
2163   * memory and/or manipulate pointers to the garbage-collected heap).
2164   * Acquires the allocator lock in the reader mode (but `fn` is called
2165   * not holding it).
2166   */
2167  GC_API void *GC_CALL GC_do_blocking(GC_fn_type /* `fn` */,
2168                                      void * /* `client_data` */)
2169      GC_ATTR_NONNULL(1);
2170  
2171  /**
2172   * Call a function switching to the "active" state of the collector for
2173   * the current thread (i.e. the user function is temporarily back
2174   * allowed to call any GC function and/or manipulate pointers to the
2175   * garbage-collected heap).  `GC_call_with_gc_active()` has the
2176   * functionality opposite to `GC_do_blocking()` one.  It is assumed
2177   * that the collector is already initialized and the current thread is
2178   * registered.  `fn` may toggle the collector thread's state
2179   * temporarily to "inactive" one by using `GC_do_blocking()`.
2180   * `GC_call_with_gc_active()` often can be used to provide
2181   * a sufficiently accurate stack bottom.  Acquires the allocator lock
2182   * in the reader mode (but `fn` is called not holding it).
2183   */
2184  GC_API void *GC_CALL GC_call_with_gc_active(GC_fn_type /* `fn` */,
2185                                              void * /* `client_data` */)
2186      GC_ATTR_NONNULL(1);
2187  
2188  /**
2189   * Attempt to fill in the `GC_stack_base` structure with the stack
2190   * bottom for this thread.  This appears to be required to implement
2191   * anything like the JNI (Java Native Interface) `AttachCurrentThread`
2192   * in an environment in which new threads are not automatically registered
2193   * with the collector.  It is also unfortunately hard to implement well
2194   * on many platforms.  Returns `GC_SUCCESS` or `GC_UNIMPLEMENTED`.
2195   * Acquires the allocator lock on some platforms.
2196   */
2197  GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *)
2198      GC_ATTR_NONNULL(1);
2199  
2200  /**
2201   * Fill in the GC_stack_base structure with the cold end (bottom) of
2202   * the stack of the current thread (or coroutine).
2203   * Unlike `GC_get_stack_base`, it retrieves the value stored in the
2204   * collector (which is initially set by the collector upon the thread
2205   * is started or registered manually but it could be later updated by
2206   * client using `GC_set_stackbottom`).  Returns the GC-internal
2207   * non-`NULL` handle of the thread which could be passed to
2208   * `GC_set_stackbottom()` later.  It is assumed that the collector is
2209   * already initialized and the thread is registered.  Acquires the
2210   * allocator lock in the reader mode.
2211   */
2212  GC_API void *GC_CALL GC_get_my_stackbottom(struct GC_stack_base *)
2213      GC_ATTR_NONNULL(1);
2214  
2215  /**
2216   * Set the cool end of the user (coroutine) stack of the specified thread.
2217   * The GC thread handle (`gc_thread_handle`) is either the one returned by
2218   * `GC_get_my_stackbottom()` or `NULL` (the latter designates the current
2219   * thread).  The caller should hold the allocator lock (e.g. using
2220   * `GC_call_with_reader_lock()` with `release` argument set to 1), the
2221   * reader mode should be enough typically, at least for the collector
2222   * itself (the client is responsible to avoid data race between this and
2223   * `GC_get_my_stackbottom` functions if the client acquires the allocator
2224   * lock in the reader mode).  Also, the function could be used for setting
2225   * `GC_stackbottom` value (the bottom of the primordial thread) before the
2226   * collector is initialized (the allocator lock is not needed to be
2227   * acquired at all in this case).
2228   */
2229  GC_API void GC_CALL GC_set_stackbottom(void * /* `gc_thread_handle` */,
2230                                         const struct GC_stack_base *)
2231      GC_ATTR_NONNULL(2);
2232  
2233  /*
2234   * The following routines are primarily intended for use with a preprocessor
2235   * which inserts calls to check C pointer arithmetic.  They indicate failure
2236   * by invoking the corresponding print procedure (`GC_same_obj_print_proc`,
2237   * `GC_is_valid_displacement_print_proc` or `GC_is_visible_print_proc`).
2238   */
2239  
2240  /**
2241   * Checked pointer pre- and post-increment operations.  Note that the second
2242   * argument (`how_much`) is in units of bytes, not multiples of the object
2243   * size.  This should either be invoked from a macro, or the call should be
2244   * automatically generated.
2245   */
2246  GC_API void *GC_CALL GC_pre_incr(void **, ptrdiff_t /* `how_much` */)
2247      GC_ATTR_NONNULL(1);
2248  GC_API void *GC_CALL GC_post_incr(void **, ptrdiff_t /* `how_much` */)
2249      GC_ATTR_NONNULL(1);
2250  
2251  /**
2252   * Check that `p` and `q` point to the same object.
2253   * `GC_same_obj_print_proc` is called (fail by default) if they do not.
2254   * Succeeds, as well, if neither `p` nor `q` points to the heap.
2255   * (May succeed also if both `p` and `q` point to between heap objects.)
2256   * Returns the first argument (`p`).  (The returned value may be hard to
2257   * use due to typing issues.  But if we had a suitable preprocessor...)
2258   * We assume this is somewhat performance critical (it should not be
2259   * called by production code, of course, but it can easily make even
2260   * debugging intolerably slow).
2261   */
2262  GC_API void *GC_CALL GC_same_obj(void * /* `p` */, void * /* `q` */);
2263  
2264  /**
2265   * Check that `p` is visible to the collector as a possibly pointer
2266   * containing location.  If it is not, call `GC_is_visible_print_proc`
2267   * (fail by default).  Always returns the argument (`p`).
2268   * May erroneously succeed in hard cases.  The function is intended for
2269   * debugging use with untyped allocations.  (The idea is that it should
2270   * be possible, though slow, to add such a call to all indirect pointer
2271   * stores.)  Currently almost useless for the multi-threaded worlds.
2272   */
2273  GC_API void *GC_CALL GC_is_visible(void * /* p */);
2274  
2275  /**
2276   * Check that if `p` is a pointer to a heap page, then it points to
2277   * a valid displacement within a heap object.  If it is not, invoke
2278   * `GC_is_valid_displacement_print_proc` (fail by default).
2279   * Always returns the argument (`p`).  Uninteresting in the
2280   * all-interior-pointers mode.  Note that we do not acquire the
2281   * allocator lock, since nothing relevant about the header should
2282   * change while we have a valid object pointer to the block.
2283   */
2284  GC_API void *GC_CALL GC_is_valid_displacement(void * /* `p` */);
2285  
2286  /**
2287   * Explicitly dump the collector state.  This is most often called from
2288   * the debugger, or by setting the `GC_DUMP_REGULARLY` environment
2289   * variable, but it may be useful to call it from client code during
2290   * debugging.  The current collection number is printed in the header
2291   * of the dump.  Acquires the allocator lock in the reader mode to
2292   * avoid data race.  Defined only if the library has been compiled
2293   * without `NO_DEBUGGING` macro defined.
2294   */
2295  GC_API void GC_CALL GC_dump(void);
2296  
2297  /**
2298   * The same as `GC_dump` but allows to specify the name of dump and
2299   * does not acquire the allocator lock.  If `name` is non-`NULL`, it is
2300   * printed to help identifying individual dumps.  Otherwise the current
2301   * collection number is used as the name.  Defined only if the library
2302   * has been compiled without `NO_DEBUGGING` macro defined.
2303   */
2304  GC_API void GC_CALL GC_dump_named(const char * /* `name` */);
2305  
2306  /**
2307   * Dump information about each block of every GC memory section.
2308   * Defined only if the library has been compiled without `NO_DEBUGGING`
2309   * macro defined.
2310   */
2311  GC_API void GC_CALL GC_dump_regions(void);
2312  
2313  /**
2314   * Dump information about every registered disappearing link and
2315   * finalizable object.  Defined only if the library has been compiled
2316   * without `NO_DEBUGGING` macro defined.
2317   */
2318  GC_API void GC_CALL GC_dump_finalization(void);
2319  
2320  typedef enum {
2321    GC_HEAP_SECTION_TYPE_FREE,
2322    GC_HEAP_SECTION_TYPE_PADDING,
2323    GC_HEAP_SECTION_TYPE_USED,
2324    GC_HEAP_SECTION_TYPE_UNMAPPED,
2325    GC_HEAP_SECTION_TYPE_FORWARDING,
2326    GC_HEAP_SECTION_TYPE_WHOLE_SECT
2327  } GC_heap_section_type;
2328  
2329  typedef void(GC_CALLBACK *GC_heap_section_proc)(
2330      void * /* `start` */, void * /* `finish` */,
2331      GC_heap_section_type /* `type` */, void * /* `client_data` */);
2332  
2333  /**
2334   * Apply `fn` to each heap section and each heap block inside.
2335   * Similar to `GC_apply_to_all_blocks()`.  Assumes the allocator lock
2336   * is held at least in the reader mode, but no assertion about it
2337   * by design.  Defined only if the library has been compiled without
2338   * `NO_DEBUGGING` macro defined.
2339   */
2340  GC_API void GC_CALL GC_foreach_heap_section_inner(GC_heap_section_proc fn,
2341                                                    void *client_data)
2342      GC_ATTR_NONNULL(1);
2343  
2344  /*
2345   * Safer, but slow, pointer addition.  Probably useful mainly with
2346   * a preprocessor.  Useful only for heap pointers.  Only the macros
2347   * without trailing digits are meant to be used by clients.  These are
2348   * designed to model the available C pointer arithmetic expressions.
2349   * Even then, these are probably more useful as documentation than as
2350   * a part of the API.  Note that `GC_PTR_ADD()` evaluates the first
2351   * argument more than once.
2352   */
2353  #if defined(GC_DEBUG) && (defined(__GNUC__) || defined(__clang__))
2354  #  define GC_PTR_ADD3(x, n, type_of_result) \
2355      ((type_of_result)GC_same_obj((x) + (n), (x)))
2356  #  define GC_PRE_INCR3(x, n, type_of_result) \
2357      ((type_of_result)GC_pre_incr((void **)&(x), (n) * sizeof(*(x))))
2358  #  define GC_POST_INCR3(x, n, type_of_result) \
2359      ((type_of_result)GC_post_incr((void **)&(x), (n) * sizeof(*(x))))
2360  #  define GC_PTR_ADD(x, n) GC_PTR_ADD3(x, n, __typeof__(x))
2361  #  define GC_PRE_INCR(x, n) GC_PRE_INCR3(x, n, __typeof__(x))
2362  #  define GC_POST_INCR(x) GC_POST_INCR3(x, 1, __typeof__(x))
2363  #  define GC_POST_DECR(x) GC_POST_INCR3(x, -1, __typeof__(x))
2364  #else /* !GC_DEBUG || !__GNUC__ */
2365  /*
2366   * We cannot do this right without `typeof`, which ANSI decided was not
2367   * sufficiently useful.  Without it we resort to the non-debug variant.
2368   */
2369  /* TODO: This should eventually support C++0x `decltype`. */
2370  #  define GC_PTR_ADD(x, n) ((x) + (n))
2371  #  define GC_PRE_INCR(x, n) ((x) += (n))
2372  #  define GC_POST_INCR(x) ((x)++)
2373  #  define GC_POST_DECR(x) ((x)--)
2374  #endif /* !GC_DEBUG || !__GNUC__ */
2375  
2376  /* Safer assignment of a pointer to a non-stack location. */
2377  #ifdef GC_DEBUG
2378  #  define GC_PTR_STORE(p, q)              \
2379      (*(void **)GC_is_visible((void *)(p)) \
2380       = GC_is_valid_displacement((void *)(q)))
2381  #else
2382  #  define GC_PTR_STORE(p, q) (*(void **)(p) = (void *)(q))
2383  #endif
2384  
2385  /*
2386   * `GC_PTR_STORE_AND_DIRTY(p, q)` is equivalent to `GC_PTR_STORE(p, q)`
2387   * followed by `GC_END_STUBBORN_CHANGE(p)` and `GC_reachable_here(q)`
2388   * (assuming `p` and `q` do not have side effects).
2389   */
2390  GC_API void GC_CALL GC_ptr_store_and_dirty(void * /* `p` */,
2391                                             const void * /* `q` */);
2392  GC_API void GC_CALL GC_debug_ptr_store_and_dirty(void * /* `p` */,
2393                                                   const void * /* `q` */);
2394  
2395  #ifdef GC_PTHREADS
2396  /*
2397   * For `pthreads` support, we generally need to intercept a number of
2398   * thread library calls.  We do that here by macro defining them.
2399   */
2400  #  ifdef __cplusplus
2401  } /* extern "C" */
2402  #  endif
2403  #  include "gc_pthread_redirects.h"
2404  #  ifdef __cplusplus
2405  extern "C" {
2406  #  endif
2407  #endif
2408  
2409  /**
2410   * This returns a list of objects with the link pointer located at the
2411   * beginning of each object.  The use of such list can greatly reduce
2412   * lock contention problems, since the allocator lock can be acquired
2413   * and released many fewer times.  Note that there is no "atomic"
2414   * variant of this function, as otherwise the links would not be seen
2415   * by the collector.  If the argument (`lb`) is zero, then it is
2416   * treated as 1.  The function is guaranteed never to return `NULL`
2417   * unless `GC_oom_fn()` returns `NULL`.
2418   */
2419  GC_API GC_ATTR_MALLOC void *GC_CALL GC_malloc_many(size_t /* `lb` */);
2420  
2421  /* Retrieve the next element in the list returned by `GC_malloc_many()`. */
2422  #define GC_NEXT(p) (*(void **)(p))
2423  
2424  /**
2425   * A filter function to control the scanning of dynamic libraries.
2426   * If implemented, called by the collector before registering a dynamic
2427   * library (discovered by the collector) section as a static data root
2428   * (called only as a last reason not to register).  The filename
2429   * (`dlpi_name`) of the library, the address and the length of the
2430   * memory region (section) are passed.  This routine should return
2431   * a nonzero value if that region should be scanned.  Always called
2432   * with the allocator lock held.  Depending on the platform, might be
2433   * called with the world stopped.
2434   */
2435  typedef int(GC_CALLBACK *GC_has_static_roots_func)(
2436      const char * /* `dlpi_name` */, void * /* `section_start` */,
2437      size_t /* `section_size` */);
2438  
2439  /**
2440   * Register a new callback (a user-supplied filter) to control the
2441   * scanning of dynamic libraries.  Replaces any previously registered
2442   * callback.  May be 0 (means no filtering).  May be unused on some
2443   * platforms (if the filtering is unimplemented or inappropriate).
2444   */
2445  GC_API void
2446      GC_CALL GC_register_has_static_roots_callback(GC_has_static_roots_func);
2447  
2448  #if !defined(CPPCHECK) && !defined(GC_WINDOWS_H_INCLUDED) && defined(WINAPI)
2449  /* Platform `windows.h` file is included before `gc.h` file. */
2450  #  define GC_WINDOWS_H_INCLUDED
2451  #endif
2452  
2453  #if defined(GC_WIN32_THREADS)                      \
2454      && (!defined(GC_PTHREADS) || defined(GC_BUILD) \
2455          || defined(GC_WINDOWS_H_INCLUDED))
2456  /*
2457   * Note: for Cygwin and pthreads-win32, this is skipped unless platform
2458   * `windows.h` file is included before `gc.h` file.
2459   */
2460  
2461  #  if (!defined(GC_NO_THREAD_DECLS) || defined(GC_BUILD)) \
2462        && !defined(GC_DONT_INCL_WINDOWS_H)
2463  
2464  /*
2465   * Including platform `windows.h` file in an `extern "C"` context
2466   * no longer works.
2467   */
2468  #    ifdef __cplusplus
2469  }
2470  #    endif
2471  
2472  #    if !defined(_WIN32_WCE) && !defined(__CEGCC__)
2473  #      include <process.h> /* for `_beginthreadex`, `_endthreadex` */
2474  #    endif
2475  
2476  #    if defined(GC_BUILD) || !defined(GC_DONT_INCLUDE_WINDOWS_H)
2477  #      include <windows.h>
2478  #      define GC_WINDOWS_H_INCLUDED
2479  #    endif
2480  
2481  #    ifdef __cplusplus
2482  extern "C" {
2483  #    endif
2484  
2485  #    ifdef GC_UNDERSCORE_STDCALL
2486  /*
2487   * Explicitly prefix exported/imported WINAPI (`__stdcall`) symbols
2488   * with "_" (underscore).  Might be useful if MinGW/x86 is used.
2489   */
2490  #      define GC_CreateThread _GC_CreateThread
2491  #      define GC_ExitThread _GC_ExitThread
2492  #    endif
2493  
2494  #    ifndef DECLSPEC_NORETURN
2495  /* Typically defined in platform `winnt.h` file. */
2496  #      ifdef GC_WINDOWS_H_INCLUDED
2497  #        define DECLSPEC_NORETURN /*< empty */
2498  #      else
2499  #        define DECLSPEC_NORETURN GC_ATTR_NORETURN
2500  #      endif
2501  #    endif
2502  
2503  #    ifdef _WIN64
2504  #      define GC_WIN32_SIZE_T GC_uintptr_t
2505  #    elif defined(GC_WINDOWS_H_INCLUDED)
2506  #      define GC_WIN32_SIZE_T DWORD
2507  #    else
2508  #      define GC_WIN32_SIZE_T unsigned long
2509  #    endif
2510  
2511  #    ifdef GC_INSIDE_DLL
2512  /* Export GC `DllMain()` to be invoked from the client `DllMain`. */
2513  #      ifdef GC_UNDERSCORE_STDCALL
2514  #        define GC_DllMain _GC_DllMain
2515  #      endif
2516  #      ifdef GC_WINDOWS_H_INCLUDED
2517  GC_API BOOL WINAPI GC_DllMain(HINSTANCE /* `inst` */, ULONG /* `reason` */,
2518                                LPVOID /* `reserved` */);
2519  #      else
2520  GC_API int __stdcall GC_DllMain(void *, unsigned long, void *);
2521  #      endif
2522  #    endif /* GC_INSIDE_DLL */
2523  
2524  /*
2525   * All threads must be created using `GC_CreateThread` or
2526   * `GC_beginthreadex`, or must explicitly call `GC_register_my_thread`
2527   * (and call `GC_unregister_my_thread` before thread termination), so
2528   * that they will be recorded in the thread table.  For backward
2529   * compatibility, it is possible to build the collector with `GC_DLL`
2530   * macro defined, and to call `GC_use_threads_discovery()`.
2531   * This implicitly registers all created threads, but appears to be
2532   * less robust.  Currently the collector expects all threads to fall
2533   * through and terminate normally, or call `GC_endthreadex` or
2534   * `GC_ExitThread`, so that the thread is properly unregistered.
2535   */
2536  #    ifdef GC_WINDOWS_H_INCLUDED
2537  GC_API HANDLE WINAPI GC_CreateThread(
2538      LPSECURITY_ATTRIBUTES /* `lpThreadAttributes` */,
2539      GC_WIN32_SIZE_T /* `dwStackSize` */,
2540      LPTHREAD_START_ROUTINE /* `lpStartAddress` */, LPVOID /* `lpParameter` */,
2541      DWORD /* `dwCreationFlags` */, LPDWORD /* `lpThreadId` */);
2542  
2543  GC_API DECLSPEC_NORETURN void WINAPI GC_ExitThread(DWORD /* `dwExitCode` */);
2544  #    else
2545  struct _SECURITY_ATTRIBUTES;
2546  GC_API void *__stdcall GC_CreateThread(struct _SECURITY_ATTRIBUTES *,
2547                                         GC_WIN32_SIZE_T,
2548                                         unsigned long(__stdcall *)(void *),
2549                                         void *, unsigned long, unsigned long *);
2550  GC_API DECLSPEC_NORETURN void __stdcall GC_ExitThread(unsigned long);
2551  #    endif
2552  
2553  #    if !defined(_WIN32_WCE) && !defined(__CEGCC__)
2554  GC_API GC_uintptr_t GC_CALL GC_beginthreadex(void * /* `security` */,
2555                                               unsigned /* `stack_size` */,
2556                                               unsigned(__stdcall *)(void *),
2557                                               void * /* `arglist` */,
2558                                               unsigned /* `initflag` */,
2559                                               unsigned * /* `thrdaddr` */);
2560  
2561  /*
2562   * Note: `_endthreadex()` is not currently marked as `noreturn` in VC++
2563   * and MinGW headers, so we do not mark it neither.
2564   */
2565  GC_API void GC_CALL GC_endthreadex(unsigned /* `retval` */);
2566  #    endif /* !_WIN32_WCE */
2567  
2568  #  endif /* !GC_NO_THREAD_DECLS */
2569  
2570  #  ifdef GC_WINMAIN_REDIRECT
2571  /*
2572   * The collector provides the real `WinMain()`, which starts a new thread
2573   * to call `GC_WinMain()` after initializing the collector.
2574   */
2575  #    define WinMain GC_WinMain
2576  #  endif
2577  
2578  /* For compatibility only. */
2579  #  define GC_use_DllMain GC_use_threads_discovery
2580  
2581  #  ifndef GC_NO_THREAD_REDIRECTS
2582  #    define CreateThread GC_CreateThread
2583  #    define ExitThread GC_ExitThread
2584  #    undef _beginthreadex
2585  #    define _beginthreadex GC_beginthreadex
2586  #    undef _endthreadex
2587  #    define _endthreadex GC_endthreadex
2588  /* `#define _beginthread { "Use _beginthreadex instead of _beginthread" }` */
2589  #  endif /* !GC_NO_THREAD_REDIRECTS */
2590  
2591  #endif /* GC_WIN32_THREADS */
2592  
2593  /**
2594   * The setter and the getter for switching "unmap as much as possible"
2595   * mode on(1) and off(0).  Has no effect unless unmapping is turned on.
2596   * Initial value is controlled by `GC_FORCE_UNMAP_ON_GCOLLECT` macro.
2597   * The setter and the getter are unsynchronized.
2598   */
2599  GC_API void GC_CALL GC_set_force_unmap_on_gcollect(int);
2600  GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void);
2601  
2602  /*
2603   * Fully portable code should call `GC_INIT()` from the main program
2604   * before making any other `GC_` calls.  On most platforms this is
2605   * a no-op and the collector self-initializes.  But a number of
2606   * platforms make that too hard.  A `GC_INIT()` call is required if the
2607   * collector is built with `THREAD_LOCAL_ALLOC` macro defined and the
2608   * initial allocation call is not to `GC_malloc` or `GC_malloc_atomic`.
2609   */
2610  
2611  #if defined(__CYGWIN32__) || defined(__CYGWIN__)
2612  /*
2613   * Similarly gnu-win32 DLLs need explicit initialization from the
2614   * main program, as does AIX.
2615   */
2616  #  ifdef __x86_64__
2617  /* Cygwin/x86_64 does not add leading underscore to symbols anymore. */
2618  extern int __data_start__[], __data_end__[];
2619  extern int __bss_start__[], __bss_end__[];
2620  #    define GC_DATASTART                                         \
2621        (GC_ADDR_LT((char *)__data_start__, (char *)__bss_start__) \
2622             ? (void *)__data_start__                              \
2623             : (void *)__bss_start__)
2624  #    define GC_DATAEND                                       \
2625        (GC_ADDR_LT((char *)__bss_end__, (char *)__data_end__) \
2626             ? (void *)__data_end__                            \
2627             : (void *)__bss_end__)
2628  #  else
2629  extern int _data_start__[], _data_end__[], _bss_start__[], _bss_end__[];
2630  #    define GC_DATASTART                                       \
2631        (GC_ADDR_LT((char *)_data_start__, (char *)_bss_start__) \
2632             ? (void *)_data_start__                             \
2633             : (void *)_bss_start__)
2634  #    define GC_DATAEND                                     \
2635        (GC_ADDR_LT((char *)_bss_end__, (char *)_data_end__) \
2636             ? (void *)_data_end__                           \
2637             : (void *)_bss_end__)
2638  #  endif /* !__x86_64__ */
2639  /*
2640   * This is required at least if the collector is in a DLL; and does not
2641   * hurt.
2642   */
2643  #  define GC_INIT_CONF_ROOTS                \
2644      GC_add_roots(GC_DATASTART, GC_DATAEND); \
2645      GC_gcollect() /*< for black-listing */
2646  #elif defined(_AIX)
2647  extern int _data[], _end[];
2648  #  define GC_DATASTART ((void *)_data)
2649  #  define GC_DATAEND ((void *)_end)
2650  #  define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART, GC_DATAEND)
2651  #elif (defined(HOST_ANDROID) || defined(__ANDROID__)) \
2652      && defined(IGNORE_DYNAMIC_LOADING)
2653  /*
2654   * This is ugly but seems the only way to register data roots of the
2655   * client shared library if the GC dynamic loading support is off.
2656   */
2657  #  pragma weak __dso_handle
2658  extern int __dso_handle[];
2659  GC_API void *GC_CALL GC_find_limit(void * /* `start` */, int /* `up` */);
2660  #  define GC_INIT_CONF_ROOTS                                               \
2661      (void)(__dso_handle != 0                                               \
2662                 ? (GC_add_roots(__dso_handle,                               \
2663                                 GC_find_limit(__dso_handle, 1 /* `up` */)), \
2664                    0)                                                       \
2665                 : 0)
2666  #else
2667  #  define GC_INIT_CONF_ROOTS (void)0
2668  #endif
2669  
2670  #ifdef GC_DONT_EXPAND
2671  /* Set `GC_dont_expand` to true at start-up. */
2672  #  define GC_INIT_CONF_DONT_EXPAND GC_set_dont_expand(1)
2673  #else
2674  #  define GC_INIT_CONF_DONT_EXPAND (void)0
2675  #endif
2676  
2677  #ifdef GC_FORCE_UNMAP_ON_GCOLLECT
2678  /* Turn on "unmap as much as possible on explicit GC" mode at start-up. */
2679  #  define GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT \
2680      GC_set_force_unmap_on_gcollect(1)
2681  #else
2682  #  define GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT (void)0
2683  #endif
2684  
2685  #ifdef GC_DONT_GC
2686  /*
2687   * This is for debugging only (useful if environment variables are
2688   * unsupported); cannot call `GC_disable()` as goes before the
2689   * collector initialization.
2690   */
2691  #  define GC_INIT_CONF_MAX_RETRIES (void)(GC_dont_gc = 1)
2692  #elif defined(GC_MAX_RETRIES) && !defined(CPPCHECK)
2693  /* Set `GC_max_retries` to the desired value at start-up. */
2694  #  define GC_INIT_CONF_MAX_RETRIES GC_set_max_retries(GC_MAX_RETRIES)
2695  #else
2696  #  define GC_INIT_CONF_MAX_RETRIES (void)0
2697  #endif
2698  
2699  #if defined(GC_ALLOCD_BYTES_PER_FINALIZER) && !defined(CPPCHECK)
2700  /* Set `GC_allocd_bytes_per_finalizer` to the desired value at start-up. */
2701  #  define GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER \
2702      GC_set_allocd_bytes_per_finalizer(GC_ALLOCD_BYTES_PER_FINALIZER)
2703  #else
2704  #  define GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER (void)0
2705  #endif
2706  
2707  #if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
2708  /* Set `GC_free_space_divisor` to the desired value at start-up. */
2709  #  define GC_INIT_CONF_FREE_SPACE_DIVISOR \
2710      GC_set_free_space_divisor(GC_FREE_SPACE_DIVISOR)
2711  #else
2712  #  define GC_INIT_CONF_FREE_SPACE_DIVISOR (void)0
2713  #endif
2714  
2715  #if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
2716  /* Set `GC_full_freq` to the desired value at start-up. */
2717  #  define GC_INIT_CONF_FULL_FREQ GC_set_full_freq(GC_FULL_FREQ)
2718  #else
2719  #  define GC_INIT_CONF_FULL_FREQ (void)0
2720  #endif
2721  
2722  #if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
2723  /* Set `GC_time_limit` (in ms) to the desired value at start-up. */
2724  #  define GC_INIT_CONF_TIME_LIMIT GC_set_time_limit(GC_TIME_LIMIT)
2725  #else
2726  #  define GC_INIT_CONF_TIME_LIMIT (void)0
2727  #endif
2728  
2729  #if defined(GC_MARKERS) && defined(GC_THREADS) && !defined(CPPCHECK)
2730  /*
2731   * Set the number of marker threads (including the initiating one) to
2732   * the desired value at start-up.
2733   */
2734  #  define GC_INIT_CONF_MARKERS GC_set_markers_count(GC_MARKERS)
2735  #else
2736  #  define GC_INIT_CONF_MARKERS (void)0
2737  #endif
2738  
2739  #if defined(GC_SIG_SUSPEND) && defined(GC_THREADS) && !defined(CPPCHECK)
2740  #  define GC_INIT_CONF_SUSPEND_SIGNAL GC_set_suspend_signal(GC_SIG_SUSPEND)
2741  #else
2742  #  define GC_INIT_CONF_SUSPEND_SIGNAL (void)0
2743  #endif
2744  
2745  #if defined(GC_SIG_THR_RESTART) && defined(GC_THREADS) && !defined(CPPCHECK)
2746  #  define GC_INIT_CONF_THR_RESTART_SIGNAL \
2747      GC_set_thr_restart_signal(GC_SIG_THR_RESTART)
2748  #else
2749  #  define GC_INIT_CONF_THR_RESTART_SIGNAL (void)0
2750  #endif
2751  
2752  #if defined(GC_MAXIMUM_HEAP_SIZE) && !defined(CPPCHECK)
2753  /*
2754   * Limit the heap size to the desired value (useful for debugging).
2755   * The limit could be overridden either at the program start-up by
2756   * the similar environment variable or anytime later by the corresponding
2757   * API function call.
2758   */
2759  #  define GC_INIT_CONF_MAXIMUM_HEAP_SIZE \
2760      GC_set_max_heap_size(GC_MAXIMUM_HEAP_SIZE)
2761  #else
2762  #  define GC_INIT_CONF_MAXIMUM_HEAP_SIZE (void)0
2763  #endif
2764  
2765  #ifdef GC_IGNORE_WARN
2766  /* Turn off all warnings at start-up (after the collector initialization). */
2767  #  define GC_INIT_CONF_IGNORE_WARN GC_set_warn_proc(GC_ignore_warn_proc)
2768  #else
2769  #  define GC_INIT_CONF_IGNORE_WARN (void)0
2770  #endif
2771  
2772  #if defined(GC_INITIAL_HEAP_SIZE) && !defined(CPPCHECK)
2773  /* Set heap size to the desired value at start-up. */
2774  #  define GC_INIT_CONF_INITIAL_HEAP_SIZE                                  \
2775      {                                                                     \
2776        size_t heap_size = GC_get_heap_size();                              \
2777        if (heap_size < (size_t)(GC_INITIAL_HEAP_SIZE))                     \
2778          (void)GC_expand_hp(((size_t)(GC_INITIAL_HEAP_SIZE)) - heap_size); \
2779      }
2780  #else
2781  #  define GC_INIT_CONF_INITIAL_HEAP_SIZE (void)0
2782  #endif
2783  
2784  /**
2785   * Portable clients should call this at the program start-up.
2786   * More over, some platforms require this call to be done strictly from
2787   * the primordial thread.  Multiple invocations are harmless.
2788   */
2789  #define GC_INIT()                                     \
2790    {                                                   \
2791      GC_INIT_CONF_DONT_EXPAND; /*< pre-init */         \
2792      GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT;             \
2793      GC_INIT_CONF_MAX_RETRIES;                         \
2794      GC_INIT_CONF_ALLOCD_BYTES_PER_FINALIZER;          \
2795      GC_INIT_CONF_FREE_SPACE_DIVISOR;                  \
2796      GC_INIT_CONF_FULL_FREQ;                           \
2797      GC_INIT_CONF_TIME_LIMIT;                          \
2798      GC_INIT_CONF_MARKERS;                             \
2799      GC_INIT_CONF_SUSPEND_SIGNAL;                      \
2800      GC_INIT_CONF_THR_RESTART_SIGNAL;                  \
2801      GC_INIT_CONF_MAXIMUM_HEAP_SIZE;                   \
2802      GC_init();          /*< real GC initialization */ \
2803      GC_INIT_CONF_ROOTS; /*< post-init */              \
2804      GC_INIT_CONF_IGNORE_WARN;                         \
2805      GC_INIT_CONF_INITIAL_HEAP_SIZE;                   \
2806    }
2807  
2808  /**
2809   * win32s may not free all resources on process exit.
2810   * This explicitly deallocates the heap.  Defined only for Windows.
2811   */
2812  GC_API void GC_CALL GC_win32_free_heap(void);
2813  
2814  #if defined(__SYMBIAN32__)
2815  GC_API void GC_CALL GC_init_global_static_roots(void);
2816  #endif
2817  
2818  #ifdef __cplusplus
2819  } /* extern "C" */
2820  #endif
2821  
2822  #endif /* GC_H */
2823