alloc.c raw

   1  /*
   2   * Copyright (c) 1988-1989 Hans-J. Boehm, Alan J. Demers
   3   * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
   4   * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
   5   * Copyright (c) 1999-2011 Hewlett-Packard Development Company, L.P.
   6   * Copyright (c) 2008-2022 Ivan Maidanski
   7   *
   8   * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
   9   * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
  10   *
  11   * Permission is hereby granted to use or copy this program
  12   * for any purpose, provided the above notices are retained on all copies.
  13   * Permission to modify the code and to distribute modified code is granted,
  14   * provided the above notices are retained, and a notice that the code was
  15   * modified is included with the above copyright notice.
  16   */
  17  
  18  #include "private/gc_priv.h"
  19  
  20  /*
  21   * Separate free lists are maintained for different sized objects up
  22   * to `MAXOBJBYTES`.
  23   * The call `GC_allocobj(lg, kind)` ensures that the free list for the given
  24   * kind of objects of the given size in granules is a nonempty one.
  25   * It returns a pointer to the first entry on the free list.
  26   * In a single-threaded world, `GC_allocobj` may be called to allocate
  27   * an object of small size `lb` (and `NORMAL` kind) as follows
  28   * (`GC_generic_malloc_inner` is a wrapper over `GC_allocobj` which also
  29   * fills in `GC_size_map` if needed):
  30   *
  31   * ```
  32   *   lg = GC_size_map[lb];
  33   *   op = GC_objfreelist[lg];
  34   *   if (NULL == op) {
  35   *     op = GC_generic_malloc_inner(lb, NORMAL, 0);
  36   *   } else {
  37   *     GC_objfreelist[lg] = obj_link(op);
  38   *     GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
  39   *   }
  40   * ```
  41   *
  42   * Note that this is very fast if the free list is not empty; it should
  43   * only involve the execution of 4 or 5 simple instructions.
  44   * All composite objects on freelists are cleared, except for
  45   * their first "pointer-sized" word.
  46   */
  47  
  48  /*
  49   * The allocator uses `GC_allochblk` to allocate large chunks of objects.
  50   * These chunks all start on addresses that are multiples of `HBLKSIZE`.
  51   * Each allocated chunk has an associated header, which can be located
  52   * quickly based on the address of the chunk.  This makes it possible
  53   * to check quickly whether an arbitrary address corresponds to an object
  54   * administered by the allocator.  (See `headers.c` file for details.)
  55   */
  56  
  57  /* Number of bytes not intended to be collected. */
  58  word GC_non_gc_bytes = 0;
  59  
  60  word GC_gc_no = 0;
  61  
  62  #ifndef NO_CLOCK
  63  
  64  static unsigned long full_gc_total_time = 0; /*< in ms, may wrap */
  65  static unsigned long stopped_mark_total_time = 0;
  66  static unsigned32 full_gc_total_ns_frac = 0; /*< fraction of 1 ms */
  67  static unsigned32 stopped_mark_total_ns_frac = 0;
  68  
  69  /*
  70   * Do performance measurements if set to `TRUE` (e.g., accumulation of
  71   * the total time of full collections).
  72   */
  73  static GC_bool measure_performance = FALSE;
  74  
  75  GC_API void GC_CALL
  76  GC_start_performance_measurement(void)
  77  {
  78    measure_performance = TRUE;
  79  }
  80  
  81  GC_API unsigned long GC_CALL
  82  GC_get_full_gc_total_time(void)
  83  {
  84    return full_gc_total_time;
  85  }
  86  
  87  GC_API unsigned long GC_CALL
  88  GC_get_stopped_mark_total_time(void)
  89  {
  90    return stopped_mark_total_time;
  91  }
  92  
  93  /*
  94   * Variables for world-stop average delay time statistic computation.
  95   * `world_stopped_total_divisor` is incremented every world stop and
  96   * halved when reached its maximum (or upon `world_stopped_total_time`
  97   * overflow).  In milliseconds.
  98   */
  99  /* TODO: Store the nanosecond part. */
 100  static unsigned world_stopped_total_time = 0;
 101  static unsigned world_stopped_total_divisor = 0;
 102  
 103  #  ifndef MAX_TOTAL_TIME_DIVISOR
 104  /*
 105   * We shall not use big values here (so "outdated" delay time values would
 106   * have less impact on "average" delay time value than newer ones).
 107   */
 108  #    define MAX_TOTAL_TIME_DIVISOR 1000
 109  #  endif
 110  
 111  GC_API unsigned long GC_CALL
 112  GC_get_avg_stopped_mark_time_ns(void)
 113  {
 114    unsigned long total_time;
 115    unsigned divisor;
 116  
 117    READER_LOCK();
 118    total_time = (unsigned long)world_stopped_total_time;
 119    divisor = world_stopped_total_divisor;
 120    READER_UNLOCK();
 121    if (0 == divisor) {
 122      GC_ASSERT(0 == total_time);
 123      /*
 124       * No world-stopped collection has occurred since the start of
 125       * performance measurements.
 126       */
 127      return 0;
 128    }
 129  
 130    /* Halve values to prevent overflow during the multiplication. */
 131    for (; total_time > ~0UL / (1000UL * 1000); total_time >>= 1) {
 132      divisor >>= 1;
 133      if (UNLIKELY(0 == divisor)) {
 134        /* The actual result is larger than representable value. */
 135        return ~0UL;
 136      }
 137    }
 138  
 139    return total_time * (1000UL * 1000) / divisor;
 140  }
 141  
 142  #endif /* !NO_CLOCK */
 143  
 144  #ifndef GC_DISABLE_INCREMENTAL
 145  GC_INNER GC_bool GC_incremental = FALSE; /*< by default, stop the world */
 146  STATIC GC_bool GC_should_start_incremental_collection = FALSE;
 147  #endif
 148  
 149  GC_API int GC_CALL
 150  GC_is_incremental_mode(void)
 151  {
 152    return (int)GC_incremental;
 153  }
 154  
 155  #ifdef THREADS
 156  int GC_parallel = FALSE; /*< parallel collection is off by default */
 157  #endif
 158  
 159  #if defined(GC_FULL_FREQ) && !defined(CPPCHECK)
 160  int GC_full_freq = GC_FULL_FREQ;
 161  #else
 162  /*
 163   * Every `GC_full_freq + 1` collection is a full collection, whether we
 164   * need it or not.
 165   */
 166  int GC_full_freq = 19;
 167  #endif
 168  
 169  /* Indicate whether a full collection due to heap growth is needed. */
 170  STATIC GC_bool GC_need_full_gc = FALSE;
 171  
 172  #ifdef THREAD_LOCAL_ALLOC
 173  GC_INNER GC_bool GC_world_stopped = FALSE;
 174  #endif
 175  
 176  STATIC GC_bool GC_disable_automatic_collection = FALSE;
 177  
 178  GC_API void GC_CALL
 179  GC_set_disable_automatic_collection(int value)
 180  {
 181    LOCK();
 182    GC_disable_automatic_collection = (GC_bool)value;
 183    UNLOCK();
 184  }
 185  
 186  GC_API int GC_CALL
 187  GC_get_disable_automatic_collection(void)
 188  {
 189    int value;
 190  
 191    READER_LOCK();
 192    value = (int)GC_disable_automatic_collection;
 193    READER_UNLOCK();
 194    return value;
 195  }
 196  
 197  STATIC word GC_used_heap_size_after_full = 0;
 198  
 199  /*
 200   * The version macros are now defined in `gc_version.h` file, which is
 201   * included by `gc.h` file, which, in turn, is included by `gc_priv.h` file.
 202   */
 203  #ifndef GC_NO_VERSION_VAR
 204  EXTERN_C_BEGIN
 205  extern const GC_VERSION_VAL_T GC_version;
 206  EXTERN_C_END
 207  
 208  const GC_VERSION_VAL_T GC_version = ((GC_VERSION_VAL_T)GC_VERSION_MAJOR << 16)
 209                                      | (GC_VERSION_MINOR << 8)
 210                                      | GC_VERSION_MICRO;
 211  #endif
 212  
 213  GC_API GC_VERSION_VAL_T GC_CALL
 214  GC_get_version(void)
 215  {
 216    return ((GC_VERSION_VAL_T)GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8)
 217           | GC_VERSION_MICRO;
 218  }
 219  
 220  GC_API int GC_CALL
 221  GC_get_dont_add_byte_at_end(void)
 222  {
 223  #ifdef DONT_ADD_BYTE_AT_END
 224    return 1;
 225  #else
 226    /* This is meaningful only if `GC_all_interior_pointers`. */
 227    return 0;
 228  #endif
 229  }
 230  
 231  /* Some more variables. */
 232  
 233  #ifdef GC_DONT_EXPAND
 234  int GC_dont_expand = TRUE;
 235  #else
 236  int GC_dont_expand = FALSE;
 237  #endif
 238  
 239  #if defined(GC_FREE_SPACE_DIVISOR) && !defined(CPPCHECK)
 240  word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR; /*< must be positive */
 241  #else
 242  word GC_free_space_divisor = 3;
 243  #endif
 244  
 245  GC_INNER int GC_CALLBACK
 246  GC_never_stop_func(void)
 247  {
 248    return FALSE;
 249  }
 250  
 251  #if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
 252  /*
 253   * We try to keep pause times from exceeding this by much.
 254   * In milliseconds.
 255   */
 256  unsigned long GC_time_limit = GC_TIME_LIMIT;
 257  #elif defined(PARALLEL_MARK)
 258  /*
 259   * The parallel marker cannot be interrupted for now, so the time limit
 260   * is absent by default.
 261   */
 262  unsigned long GC_time_limit = GC_TIME_UNLIMITED;
 263  #else
 264  unsigned long GC_time_limit = 15;
 265  #endif
 266  
 267  #ifndef NO_CLOCK
 268  /*
 269   * The nanoseconds add-on to `GC_time_limit` value.  Not updated by
 270   * `GC_set_time_limit()`.  Ignored if the value of `GC_time_limit` is
 271   * `GC_TIME_UNLIMITED`.
 272   */
 273  STATIC unsigned long GC_time_lim_nsec = 0;
 274  
 275  #  define TV_NSEC_LIMIT (1000UL * 1000) /*< amount of nanoseconds in 1 ms */
 276  
 277  GC_API void GC_CALL
 278  GC_set_time_limit_tv(struct GC_timeval_s tv)
 279  {
 280    GC_ASSERT(tv.tv_ms <= GC_TIME_UNLIMITED);
 281    GC_ASSERT(tv.tv_nsec < TV_NSEC_LIMIT);
 282    GC_time_limit = tv.tv_ms;
 283    GC_time_lim_nsec = tv.tv_nsec;
 284  }
 285  
 286  GC_API struct GC_timeval_s GC_CALL
 287  GC_get_time_limit_tv(void)
 288  {
 289    struct GC_timeval_s tv;
 290  
 291    tv.tv_ms = GC_time_limit;
 292    tv.tv_nsec = GC_time_lim_nsec;
 293    return tv;
 294  }
 295  
 296  /* Time at which we stopped world.  Used only by `GC_timeout_stop_func()`. */
 297  STATIC CLOCK_TYPE GC_start_time = CLOCK_TYPE_INITIALIZER;
 298  #endif /* !NO_CLOCK */
 299  
 300  /* Number of attempts at finishing collection within `GC_time_limit`. */
 301  STATIC int GC_n_attempts = 0;
 302  
 303  /* Note: accessed holding the allocator lock. */
 304  STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
 305  
 306  GC_API void GC_CALL
 307  GC_set_stop_func(GC_stop_func stop_func)
 308  {
 309    GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
 310    LOCK();
 311    GC_default_stop_func = stop_func;
 312    UNLOCK();
 313  }
 314  
 315  GC_API GC_stop_func GC_CALL
 316  GC_get_stop_func(void)
 317  {
 318    GC_stop_func stop_func;
 319  
 320    READER_LOCK();
 321    stop_func = GC_default_stop_func;
 322    READER_UNLOCK();
 323    return stop_func;
 324  }
 325  
 326  #if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
 327  #  define GC_timeout_stop_func GC_default_stop_func
 328  #else
 329  STATIC int GC_CALLBACK
 330  GC_timeout_stop_func(void)
 331  {
 332    CLOCK_TYPE current_time;
 333    static unsigned count = 0;
 334    unsigned long time_diff, nsec_diff;
 335  
 336    GC_ASSERT(I_HOLD_LOCK());
 337    if (GC_default_stop_func())
 338      return TRUE;
 339  
 340    if (GC_time_limit == GC_TIME_UNLIMITED || (count++ & 3) != 0)
 341      return FALSE;
 342  
 343    GET_TIME(current_time);
 344    time_diff = MS_TIME_DIFF(current_time, GC_start_time);
 345    nsec_diff = NS_FRAC_TIME_DIFF(current_time, GC_start_time);
 346  #  if defined(CPPCHECK)
 347    GC_noop1_ptr(&nsec_diff);
 348  #  endif
 349    if (time_diff >= GC_time_limit
 350        && (time_diff > GC_time_limit || nsec_diff >= GC_time_lim_nsec)) {
 351      GC_COND_LOG_PRINTF("Abandoning stopped marking after %lu ms %lu ns"
 352                         " (attempt %d)\n",
 353                         time_diff, nsec_diff, GC_n_attempts);
 354      return TRUE;
 355    }
 356  
 357    return FALSE;
 358  }
 359  #endif /* !GC_DISABLE_INCREMENTAL */
 360  
 361  #ifdef THREADS
 362  GC_INNER word GC_total_stacksize = 0;
 363  #endif
 364  
 365  /* The lowest value returned by `min_bytes_allocd()`. */
 366  static size_t min_bytes_allocd_minimum = 1;
 367  
 368  GC_API void GC_CALL
 369  GC_set_min_bytes_allocd(size_t value)
 370  {
 371    GC_ASSERT(value > 0);
 372    min_bytes_allocd_minimum = value;
 373  }
 374  
 375  GC_API size_t GC_CALL
 376  GC_get_min_bytes_allocd(void)
 377  {
 378    return min_bytes_allocd_minimum;
 379  }
 380  
 381  /*
 382   * Return the minimum number of bytes that must be allocated between
 383   * collections to amortize the cost of the latter.  Should be nonzero.
 384   */
 385  static word
 386  min_bytes_allocd(void)
 387  {
 388    word result;
 389    word stack_size;
 390    /*
 391     * Total size of roots, it includes double stack size, since the stack
 392     * is expensive to scan.
 393     */
 394    word total_root_size;
 395    /* Estimate of memory to be scanned during normal collection. */
 396    word scan_size;
 397  
 398    GC_ASSERT(I_HOLD_LOCK());
 399  #ifdef THREADS
 400    if (GC_need_to_lock) {
 401      /* We are multi-threaded... */
 402      stack_size = GC_total_stacksize;
 403      /*
 404       * For now, we just use the value computed during the latest garbage
 405       * collection.
 406       */
 407  #  ifdef DEBUG_THREADS
 408      GC_log_printf("Total stacks size: %lu\n", (unsigned long)stack_size);
 409  #  endif
 410    } else
 411  #endif
 412    /* else */ {
 413  #ifdef STACK_NOT_SCANNED
 414      stack_size = 0;
 415  #elif defined(STACK_GROWS_UP)
 416      stack_size = (word)(GC_approx_sp() - GC_stackbottom);
 417  #else
 418      stack_size = (word)(GC_stackbottom - GC_approx_sp());
 419  #endif
 420    }
 421  
 422    total_root_size = 2 * stack_size + GC_root_size;
 423    scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4 + total_root_size;
 424    result = scan_size / GC_free_space_divisor;
 425    if (GC_incremental) {
 426      result /= 2;
 427    }
 428    return result > min_bytes_allocd_minimum ? result : min_bytes_allocd_minimum;
 429  }
 430  
 431  /* Number of explicitly managed bytes of storage at last collection. */
 432  STATIC word GC_non_gc_bytes_at_gc = 0;
 433  
 434  /*
 435   * Return the number of bytes allocated, adjusted for explicit storage
 436   * management, etc.  This number is used in deciding when to trigger
 437   * collections.
 438   */
 439  STATIC word
 440  GC_adj_bytes_allocd(void)
 441  {
 442    GC_signed_word result;
 443    GC_signed_word expl_managed = (GC_signed_word)GC_non_gc_bytes
 444                                  - (GC_signed_word)GC_non_gc_bytes_at_gc;
 445  
 446    /*
 447     * Do not count what was explicitly freed, or newly allocated for
 448     * explicit management.  Note that deallocating an explicitly managed
 449     * object should not alter result, assuming the client is playing by
 450     * the rules.
 451     */
 452    result = (GC_signed_word)GC_bytes_allocd + (GC_signed_word)GC_bytes_dropped
 453             - (GC_signed_word)GC_bytes_freed
 454             + (GC_signed_word)GC_finalizer_bytes_freed - expl_managed;
 455    if (result > (GC_signed_word)GC_bytes_allocd) {
 456      /* Probably a client bug or unfortunate scheduling. */
 457      result = (GC_signed_word)GC_bytes_allocd;
 458    }
 459    /*
 460     * We count objects enqueued for finalization as though they had been
 461     * reallocated this round.  Finalization is visible to user.
 462     * And if we do not count this, we have stability problems for programs
 463     * that finalize all objects.
 464     */
 465    result += (GC_signed_word)GC_bytes_finalized;
 466    if (result < (GC_signed_word)(GC_bytes_allocd >> 3)) {
 467      /*
 468       * Always count at least 1/8 of the allocations.  We do not want to
 469       * collect too infrequently, since that would inhibit coalescing of
 470       * free storage blocks.  This also makes us partially robust against
 471       * client bugs.
 472       */
 473      result = (GC_signed_word)(GC_bytes_allocd >> 3);
 474    }
 475    return (word)result;
 476  }
 477  
 478  /*
 479   * Clear up a few frames worth of garbage left at the top of the stack.
 480   * This is used to prevent us from accidentally treating garbage left
 481   * on the stack by other parts of the collector as roots.
 482   * This differs from the code in `misc.c` file, which actually tries
 483   * to keep the stack clear of long-lived, client-generated garbage.
 484   */
 485  STATIC void
 486  GC_clear_a_few_frames(void)
 487  {
 488  #ifndef CLEAR_STACK_NPTRS
 489  #  define CLEAR_STACK_NPTRS 64 /*< pointers */
 490  #endif
 491    volatile ptr_t frames[CLEAR_STACK_NPTRS];
 492  
 493    BZERO(CAST_AWAY_VOLATILE_PVOID(frames), sizeof(frames));
 494  }
 495  
 496  GC_API void GC_CALL
 497  GC_start_incremental_collection(void)
 498  {
 499  #ifndef GC_DISABLE_INCREMENTAL
 500    LOCK();
 501    if (GC_incremental) {
 502      GC_should_start_incremental_collection = TRUE;
 503      if (!GC_dont_gc) {
 504        GC_collect_a_little_inner(1);
 505      }
 506    }
 507    UNLOCK();
 508  #endif
 509  }
 510  
 511  GC_INNER GC_bool
 512  GC_should_collect(void)
 513  {
 514    static word last_min_bytes_allocd;
 515    static word last_gc_no;
 516  
 517    GC_ASSERT(I_HOLD_LOCK());
 518    if (last_gc_no != GC_gc_no) {
 519      last_min_bytes_allocd = min_bytes_allocd();
 520      last_gc_no = GC_gc_no;
 521    }
 522  #ifndef GC_DISABLE_INCREMENTAL
 523    if (GC_should_start_incremental_collection) {
 524      GC_should_start_incremental_collection = FALSE;
 525      return TRUE;
 526    }
 527  #endif
 528    if (GC_disable_automatic_collection)
 529      return FALSE;
 530  
 531    if (GC_last_heap_growth_gc_no == GC_gc_no) {
 532      /* Avoid expanding past limits used by black-listing. */
 533      return TRUE;
 534    }
 535  
 536    return GC_adj_bytes_allocd() >= last_min_bytes_allocd;
 537  }
 538  
 539  /*
 540   * Called at start of full collections.  Not called if 0.  Called with
 541   * the allocator lock held.  Not used by the collector itself.
 542   */
 543  /* `STATIC` */ GC_start_callback_proc GC_start_call_back = 0;
 544  
 545  GC_API void GC_CALL
 546  GC_set_start_callback(GC_start_callback_proc fn)
 547  {
 548    LOCK();
 549    GC_start_call_back = fn;
 550    UNLOCK();
 551  }
 552  
 553  GC_API GC_start_callback_proc GC_CALL
 554  GC_get_start_callback(void)
 555  {
 556    GC_start_callback_proc fn;
 557  
 558    READER_LOCK();
 559    fn = GC_start_call_back;
 560    READER_UNLOCK();
 561    return fn;
 562  }
 563  
 564  GC_INLINE void
 565  GC_notify_full_gc(void)
 566  {
 567    if (GC_start_call_back != 0) {
 568      (*GC_start_call_back)();
 569    }
 570  }
 571  
 572  STATIC GC_bool GC_is_full_gc = FALSE;
 573  
 574  STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
 575  STATIC void GC_finish_collection(void);
 576  
 577  /*
 578   * Initiate a garbage collection if appropriate.  Choose judiciously
 579   * between partial, full, and stop-world collections.
 580   */
 581  STATIC void
 582  GC_maybe_gc(void)
 583  {
 584    static int n_partial_gcs = 0;
 585  
 586    GC_ASSERT(I_HOLD_LOCK());
 587    ASSERT_CANCEL_DISABLED();
 588    if (!GC_should_collect())
 589      return;
 590  
 591    if (!GC_incremental) {
 592      GC_gcollect_inner();
 593      return;
 594    }
 595  
 596    GC_ASSERT(!GC_collection_in_progress());
 597  #ifdef PARALLEL_MARK
 598    if (GC_parallel)
 599      GC_wait_for_reclaim();
 600  #endif
 601    if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
 602      GC_COND_LOG_PRINTF(
 603          "***>Full mark for collection #%lu after %lu allocd bytes\n",
 604          (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
 605      GC_notify_full_gc();
 606      ENTER_GC();
 607      GC_promote_black_lists();
 608      (void)GC_reclaim_all((GC_stop_func)0, TRUE);
 609      GC_clear_marks();
 610      EXIT_GC();
 611      n_partial_gcs = 0;
 612      GC_is_full_gc = TRUE;
 613    } else {
 614      n_partial_gcs++;
 615    }
 616  
 617    /*
 618     * Try to mark with the world stopped.  If we run out of time, then this
 619     * turns into an incremental marking.
 620     */
 621  #ifndef NO_CLOCK
 622    if (GC_time_limit != GC_TIME_UNLIMITED)
 623      GET_TIME(GC_start_time);
 624  #endif
 625    if (GC_stopped_mark(GC_timeout_stop_func)) {
 626      SAVE_CALLERS_TO_LAST_STACK();
 627      GC_finish_collection();
 628    } else if (!GC_is_full_gc) {
 629      /* Count this as the first attempt. */
 630      GC_n_attempts++;
 631    }
 632  }
 633  
 634  STATIC GC_on_collection_event_proc GC_on_collection_event = 0;
 635  
 636  GC_API void GC_CALL
 637  GC_set_on_collection_event(GC_on_collection_event_proc fn)
 638  {
 639    /* `fn` may be 0 (means no event notifier). */
 640    LOCK();
 641    GC_on_collection_event = fn;
 642    UNLOCK();
 643  }
 644  
 645  GC_API GC_on_collection_event_proc GC_CALL
 646  GC_get_on_collection_event(void)
 647  {
 648    GC_on_collection_event_proc fn;
 649  
 650    READER_LOCK();
 651    fn = GC_on_collection_event;
 652    READER_UNLOCK();
 653    return fn;
 654  }
 655  
 656  GC_INNER GC_bool
 657  GC_try_to_collect_inner(GC_stop_func stop_func)
 658  {
 659  #ifndef NO_CLOCK
 660    CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
 661    GC_bool start_time_valid;
 662  #endif
 663  
 664    ASSERT_CANCEL_DISABLED();
 665    GC_ASSERT(I_HOLD_LOCK());
 666    GC_ASSERT(GC_is_initialized);
 667    if (GC_dont_gc || (*stop_func)())
 668      return FALSE;
 669    if (GC_on_collection_event)
 670      GC_on_collection_event(GC_EVENT_START);
 671    if (GC_incremental && GC_collection_in_progress()) {
 672      GC_COND_LOG_PRINTF(
 673          "GC_try_to_collect_inner: finishing collection in progress\n");
 674      /* Just finish collection already in progress. */
 675      do {
 676        if ((*stop_func)()) {
 677          /* TODO: Notify `GC_EVENT_ABANDON`. */
 678          return FALSE;
 679        }
 680        GC_collect_a_little_inner(1);
 681      } while (GC_collection_in_progress());
 682    }
 683    GC_notify_full_gc();
 684  #ifndef NO_CLOCK
 685    start_time_valid = FALSE;
 686    if ((GC_print_stats | (int)measure_performance) != 0) {
 687      if (GC_print_stats)
 688        GC_log_printf("Initiating full world-stop collection!\n");
 689      start_time_valid = TRUE;
 690      GET_TIME(start_time);
 691    }
 692  #endif
 693    GC_promote_black_lists();
 694    /*
 695     * Make sure all blocks have been reclaimed, so sweep routines do not
 696     * see cleared mark bits.  If we are guaranteed to finish, then this
 697     * is unnecessary.  In the find-leak case, we have to finish to
 698     * guarantee that previously unmarked objects are not reported as leaks.
 699     */
 700  #ifdef PARALLEL_MARK
 701    if (GC_parallel)
 702      GC_wait_for_reclaim();
 703  #endif
 704    ENTER_GC();
 705    if ((GC_find_leak_inner || stop_func != GC_never_stop_func)
 706        && !GC_reclaim_all(stop_func, FALSE)) {
 707      /* Aborted.  So far everything is still consistent. */
 708      EXIT_GC();
 709      /* TODO: Notify `GC_EVENT_ABANDON`. */
 710      return FALSE;
 711    }
 712    GC_invalidate_mark_state(); /*< flush mark stack */
 713    GC_clear_marks();
 714    SAVE_CALLERS_TO_LAST_STACK();
 715    GC_is_full_gc = TRUE;
 716    EXIT_GC();
 717    if (!GC_stopped_mark(stop_func)) {
 718      if (!GC_incremental) {
 719        /*
 720         * We are partially done and have no way to complete or use
 721         * current work.  Reestablish invariants as cheaply as possible.
 722         */
 723        GC_invalidate_mark_state();
 724        GC_unpromote_black_lists();
 725      } else {
 726        /*
 727         * We claim the world is already (or still) consistent.
 728         * We will finish incrementally.
 729         */
 730      }
 731      /* TODO: Notify `GC_EVENT_ABANDON`. */
 732      return FALSE;
 733    }
 734    GC_finish_collection();
 735  #ifndef NO_CLOCK
 736    if (start_time_valid) {
 737      CLOCK_TYPE current_time;
 738      unsigned long time_diff, ns_frac_diff;
 739  
 740      GET_TIME(current_time);
 741      time_diff = MS_TIME_DIFF(current_time, start_time);
 742      ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
 743      if (measure_performance) {
 744        full_gc_total_time += time_diff; /*< may wrap */
 745        full_gc_total_ns_frac += (unsigned32)ns_frac_diff;
 746        if (full_gc_total_ns_frac >= (unsigned32)1000000UL) {
 747          /* Overflow of the nanoseconds part. */
 748          full_gc_total_ns_frac -= (unsigned32)1000000UL;
 749          full_gc_total_time++;
 750        }
 751      }
 752      if (GC_print_stats)
 753        GC_log_printf("Complete collection took %lu ms %lu ns\n", time_diff,
 754                      ns_frac_diff);
 755    }
 756  #endif
 757    if (GC_on_collection_event)
 758      GC_on_collection_event(GC_EVENT_END);
 759    return TRUE;
 760  }
 761  
 762  /* The number of extra calls to `GC_mark_some` that we have made. */
 763  STATIC size_t GC_deficit = 0;
 764  
 765  /* The default value of `GC_rate`. */
 766  #ifndef GC_RATE
 767  #  define GC_RATE 10
 768  #endif
 769  
 770  /*
 771   * When `GC_collect_a_little_inner()` performs `n_blocks` units of
 772   * garbage collection work, a unit is intended to touch roughly
 773   * `GC_rate` pages.  (But, every once in a while, we do more than that.)
 774   * This needs to be a fairly large number with our current incremental
 775   * collection strategy, since otherwise we allocate too much during
 776   * garbage collection, and the cleanup gets expensive.
 777   */
 778  STATIC unsigned GC_rate = GC_RATE;
 779  
 780  GC_API void GC_CALL
 781  GC_set_rate(int value)
 782  {
 783    GC_ASSERT(value > 0);
 784    GC_rate = (unsigned)value;
 785  }
 786  
 787  GC_API int GC_CALL
 788  GC_get_rate(void)
 789  {
 790    return (int)GC_rate;
 791  }
 792  
 793  /* The default maximum number of prior attempts at world stop marking. */
 794  #ifndef MAX_PRIOR_ATTEMPTS
 795  #  define MAX_PRIOR_ATTEMPTS 3
 796  #endif
 797  
 798  /*
 799   * The maximum number of prior attempts at world stop marking.
 800   * A value of 1 means that we finish the second time, no matter how long
 801   * it takes.  Does not count the initial root scan for a full collection.
 802   */
 803  static int max_prior_attempts = MAX_PRIOR_ATTEMPTS;
 804  
 805  GC_API void GC_CALL
 806  GC_set_max_prior_attempts(int value)
 807  {
 808    GC_ASSERT(value >= 0);
 809    max_prior_attempts = value;
 810  }
 811  
 812  GC_API int GC_CALL
 813  GC_get_max_prior_attempts(void)
 814  {
 815    return max_prior_attempts;
 816  }
 817  
 818  GC_INNER void
 819  GC_collect_a_little_inner(size_t n_blocks)
 820  {
 821    IF_CANCEL(int cancel_state;)
 822  
 823    GC_ASSERT(I_HOLD_LOCK());
 824    GC_ASSERT(GC_is_initialized);
 825    DISABLE_CANCEL(cancel_state);
 826    if (GC_incremental && GC_collection_in_progress()) {
 827      size_t i;
 828      size_t max_deficit = GC_rate * n_blocks;
 829  
 830      ENTER_GC();
 831  #ifdef PARALLEL_MARK
 832      if (GC_time_limit != GC_TIME_UNLIMITED)
 833        GC_parallel_mark_disabled = TRUE;
 834  #endif
 835      for (i = GC_deficit; i < max_deficit; i++) {
 836        if (GC_mark_some(NULL))
 837          break;
 838      }
 839  #ifdef PARALLEL_MARK
 840      GC_parallel_mark_disabled = FALSE;
 841  #endif
 842      EXIT_GC();
 843  
 844      if (i < max_deficit && !GC_dont_gc) {
 845        GC_ASSERT(!GC_collection_in_progress());
 846        /* Need to follow up with a full collection. */
 847        SAVE_CALLERS_TO_LAST_STACK();
 848  #ifdef PARALLEL_MARK
 849        if (GC_parallel)
 850          GC_wait_for_reclaim();
 851  #endif
 852  #ifndef NO_CLOCK
 853        if (GC_time_limit != GC_TIME_UNLIMITED
 854            && GC_n_attempts < max_prior_attempts)
 855          GET_TIME(GC_start_time);
 856  #endif
 857        if (GC_stopped_mark(GC_n_attempts < max_prior_attempts
 858                                ? GC_timeout_stop_func
 859                                : GC_never_stop_func)) {
 860          GC_finish_collection();
 861        } else {
 862          GC_n_attempts++;
 863        }
 864      }
 865      if (GC_deficit > 0) {
 866        GC_deficit = GC_deficit > max_deficit ? GC_deficit - max_deficit : 0;
 867      }
 868    } else if (!GC_dont_gc) {
 869      GC_maybe_gc();
 870    }
 871    RESTORE_CANCEL(cancel_state);
 872  }
 873  
 874  #if !defined(NO_FIND_LEAK) || !defined(SHORT_DBG_HDRS)
 875  GC_INNER void (*GC_check_heap)(void) = 0;
 876  GC_INNER void (*GC_print_all_smashed)(void) = 0;
 877  #endif
 878  
 879  GC_API int GC_CALL
 880  GC_collect_a_little(void)
 881  {
 882    int result;
 883  
 884    if (UNLIKELY(!GC_is_initialized))
 885      GC_init();
 886    LOCK();
 887    /*
 888     * Note: if the collection is in progress, this may do marking (not
 889     * stopping the world) even in case of disabled garbage collection.
 890     */
 891    GC_collect_a_little_inner(1);
 892    result = (int)GC_collection_in_progress();
 893    UNLOCK();
 894    if (GC_debugging_started && !result)
 895      GC_print_all_smashed();
 896    return result;
 897  }
 898  
 899  #ifdef THREADS
 900  GC_API void GC_CALL
 901  GC_stop_world_external(void)
 902  {
 903    GC_ASSERT(GC_is_initialized);
 904    LOCK();
 905  #  ifdef THREAD_LOCAL_ALLOC
 906    GC_ASSERT(!GC_world_stopped);
 907  #  endif
 908    ENTER_GC();
 909    STOP_WORLD();
 910  #  ifdef THREAD_LOCAL_ALLOC
 911    GC_world_stopped = TRUE;
 912  #  endif
 913  }
 914  
 915  GC_API void GC_CALL
 916  GC_start_world_external(void)
 917  {
 918  #  ifdef THREAD_LOCAL_ALLOC
 919    GC_ASSERT(GC_world_stopped);
 920    GC_world_stopped = FALSE;
 921  #  else
 922    GC_ASSERT(GC_is_initialized);
 923  #  endif
 924    START_WORLD();
 925    EXIT_GC();
 926    UNLOCK();
 927  }
 928  #endif /* THREADS */
 929  
 930  #ifdef USE_MUNMAP
 931  #  ifndef MUNMAP_THRESHOLD
 932  #    define MUNMAP_THRESHOLD 7
 933  #  endif
 934  GC_INNER unsigned GC_unmap_threshold = MUNMAP_THRESHOLD;
 935  
 936  #  define IF_USE_MUNMAP(x) x
 937  #  define COMMA_IF_USE_MUNMAP(x) /* comma */ , x
 938  #else
 939  #  define IF_USE_MUNMAP(x)
 940  #  define COMMA_IF_USE_MUNMAP(x)
 941  #endif /* !USE_MUNMAP */
 942  
 943  /*
 944   * We stop the world and mark from all roots.  If `stop_func()` ever
 945   * returns `TRUE`, we may fail and return `FALSE`.  Increment `GC_gc_no`
 946   * if we succeed.
 947   */
 948  STATIC GC_bool
 949  GC_stopped_mark(GC_stop_func stop_func)
 950  {
 951    ptr_t cold_gc_frame = GC_approx_sp();
 952    unsigned abandoned_at;
 953  #ifndef NO_CLOCK
 954    CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
 955    GC_bool start_time_valid = FALSE;
 956  #endif
 957  
 958    GC_ASSERT(I_HOLD_LOCK());
 959    GC_ASSERT(GC_is_initialized);
 960    ENTER_GC();
 961  #if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
 962    GC_add_current_malloc_heap();
 963  #endif
 964  #if defined(REGISTER_LIBRARIES_EARLY)
 965    GC_cond_register_dynamic_libraries();
 966  #endif
 967  
 968  #if !defined(GC_NO_FINALIZATION) && !defined(GC_TOGGLE_REFS_NOT_NEEDED)
 969    GC_process_togglerefs();
 970  #endif
 971  
 972    /* Output blank line for convenience here. */
 973    GC_COND_LOG_PRINTF(
 974        "\n--> Marking for collection #%lu after %lu allocated bytes\n",
 975        (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
 976  #ifndef NO_CLOCK
 977    if (GC_PRINT_STATS_FLAG || measure_performance) {
 978      GET_TIME(start_time);
 979      start_time_valid = TRUE;
 980    }
 981  #endif
 982  #ifdef THREADS
 983    if (GC_on_collection_event)
 984      GC_on_collection_event(GC_EVENT_PRE_STOP_WORLD);
 985  #endif
 986    STOP_WORLD();
 987  #ifdef THREADS
 988    if (GC_on_collection_event)
 989      GC_on_collection_event(GC_EVENT_POST_STOP_WORLD);
 990  #  ifdef THREAD_LOCAL_ALLOC
 991    GC_world_stopped = TRUE;
 992  #  elif defined(CPPCHECK)
 993    /* Workaround a warning about adjacent same `if` condition. */
 994    (void)0;
 995  #  endif
 996  #endif
 997  
 998  #ifdef MAKE_BACK_GRAPH
 999    if (GC_print_back_height) {
1000      GC_build_back_graph();
1001    }
1002  #endif
1003  
1004    /* Notify about marking from all roots. */
1005    if (GC_on_collection_event)
1006      GC_on_collection_event(GC_EVENT_MARK_START);
1007  
1008    /* Minimize junk left in my registers and on the stack. */
1009    GC_clear_a_few_frames();
1010    GC_noop6(0, 0, 0, 0, 0, 0);
1011  
1012    GC_initiate_gc();
1013  #ifdef PARALLEL_MARK
1014    if (stop_func != GC_never_stop_func)
1015      GC_parallel_mark_disabled = TRUE;
1016  #endif
1017    for (abandoned_at = 1; !(*stop_func)(); abandoned_at++) {
1018      if (GC_mark_some(cold_gc_frame)) {
1019  #ifdef PARALLEL_MARK
1020        if (GC_parallel && GC_parallel_mark_disabled) {
1021          GC_COND_LOG_PRINTF("Stopped marking done after %u iterations"
1022                             " with disabled parallel marker\n",
1023                             abandoned_at - 1);
1024        }
1025  #endif
1026        abandoned_at = 0;
1027        break;
1028      }
1029    }
1030  #ifdef PARALLEL_MARK
1031    GC_parallel_mark_disabled = FALSE;
1032  #endif
1033  
1034    if (abandoned_at > 0) {
1035      /* Give the mutator a chance. */
1036      GC_deficit = abandoned_at - 1;
1037      /* TODO: Notify `GC_EVENT_MARK_ABANDON`. */
1038    } else {
1039      GC_gc_no++;
1040      /* Check all debugged objects for consistency. */
1041      if (GC_debugging_started)
1042        GC_check_heap();
1043      if (GC_on_collection_event)
1044        GC_on_collection_event(GC_EVENT_MARK_END);
1045    }
1046  
1047  #ifdef THREADS
1048    if (GC_on_collection_event)
1049      GC_on_collection_event(GC_EVENT_PRE_START_WORLD);
1050  #endif
1051  #ifdef THREAD_LOCAL_ALLOC
1052    GC_world_stopped = FALSE;
1053  #endif
1054    START_WORLD();
1055  #ifdef THREADS
1056    if (GC_on_collection_event)
1057      GC_on_collection_event(GC_EVENT_POST_START_WORLD);
1058  #endif
1059  
1060  #ifndef NO_CLOCK
1061    if (start_time_valid) {
1062      CLOCK_TYPE current_time;
1063      unsigned long time_diff, ns_frac_diff;
1064  
1065      /* TODO: Avoid code duplication from `GC_try_to_collect_inner`. */
1066      GET_TIME(current_time);
1067      time_diff = MS_TIME_DIFF(current_time, start_time);
1068      ns_frac_diff = NS_FRAC_TIME_DIFF(current_time, start_time);
1069      if (measure_performance) {
1070        stopped_mark_total_time += time_diff; /*< may wrap */
1071        stopped_mark_total_ns_frac += (unsigned32)ns_frac_diff;
1072        if (stopped_mark_total_ns_frac >= (unsigned32)1000000UL) {
1073          stopped_mark_total_ns_frac -= (unsigned32)1000000UL;
1074          stopped_mark_total_time++;
1075        }
1076      }
1077  
1078      if (GC_PRINT_STATS_FLAG || measure_performance) {
1079        unsigned total_time = world_stopped_total_time;
1080        unsigned divisor = world_stopped_total_divisor;
1081  
1082        /* Compute new world-stop delay total time. */
1083        if (total_time > (((unsigned)-1) >> 1)
1084            || divisor >= MAX_TOTAL_TIME_DIVISOR) {
1085          /* Halve values if overflow occurs. */
1086          total_time >>= 1;
1087          divisor >>= 1;
1088        }
1089        total_time += time_diff < (((unsigned)-1) >> 1) ? (unsigned)time_diff
1090                                                        : ((unsigned)-1) >> 1;
1091        /* Update old `world_stopped_total_time` and its divisor. */
1092        world_stopped_total_time = total_time;
1093        world_stopped_total_divisor = ++divisor;
1094        if (GC_PRINT_STATS_FLAG && 0 == abandoned_at) {
1095          GC_ASSERT(divisor != 0);
1096          GC_log_printf("World-stopped marking took %lu ms %lu ns"
1097                        " (%u ms in average)\n",
1098                        time_diff, ns_frac_diff, total_time / divisor);
1099        }
1100      }
1101    }
1102  #endif
1103  
1104    EXIT_GC();
1105    if (0 == abandoned_at)
1106      return TRUE;
1107    GC_COND_LOG_PRINTF("Abandoned stopped marking after %u iterations\n",
1108                       abandoned_at - 1);
1109    return FALSE;
1110  }
1111  
1112  GC_INNER void
1113  GC_set_fl_marks(ptr_t q)
1114  {
1115  #ifdef GC_ASSERTIONS
1116    ptr_t q2;
1117  #endif
1118    struct hblk *h = HBLKPTR(q);
1119    const struct hblk *last_h = h;
1120    hdr *hhdr;
1121  #ifdef MARK_BIT_PER_OBJ
1122    size_t sz;
1123  #endif
1124  
1125    GC_ASSERT(q != NULL);
1126    hhdr = HDR(h);
1127  #ifdef MARK_BIT_PER_OBJ
1128    sz = hhdr->hb_sz;
1129  #endif
1130  #ifdef GC_ASSERTIONS
1131    q2 = (ptr_t)obj_link(q);
1132  #endif
1133    for (;;) {
1134      size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz);
1135  
1136      if (!mark_bit_from_hdr(hhdr, bit_no)) {
1137        set_mark_bit_from_hdr(hhdr, bit_no);
1138        INCR_MARKS(hhdr);
1139      }
1140      q = (ptr_t)obj_link(q);
1141      if (NULL == q)
1142        break;
1143  #ifdef GC_ASSERTIONS
1144      /*
1145       * Detect a cycle in the free list.  The algorithm is to have
1146       * a "twice faster" iterator over the list which meets the first
1147       * one in case of a cycle existing in the list.
1148       */
1149      if (q2 != NULL) {
1150        q2 = (ptr_t)obj_link(q2);
1151        GC_ASSERT(q2 != q);
1152        if (q2 != NULL) {
1153          q2 = (ptr_t)obj_link(q2);
1154          GC_ASSERT(q2 != q);
1155        }
1156      }
1157  #endif
1158  
1159      h = HBLKPTR(q);
1160      if (UNLIKELY(h != last_h)) {
1161        last_h = h;
1162        /* Update `hhdr` and `sz`. */
1163        hhdr = HDR(h);
1164  #ifdef MARK_BIT_PER_OBJ
1165        sz = hhdr->hb_sz;
1166  #endif
1167      }
1168    }
1169  }
1170  
1171  #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1172  /*
1173   * Check that all mark bits for the free list, whose first entry is
1174   * `*pfreelist`, are set.  The check is skipped if `*pfreelist` points to
1175   * a special value.
1176   */
1177  void
1178  GC_check_fl_marks(void **pfreelist)
1179  {
1180    /*
1181     * TODO: There is a data race with `GC_FAST_MALLOC_GRANS` (which does
1182     * not do atomic updates to the free-list).  The race seems to be
1183     * harmless, and for now we just skip this check in case of TSan.
1184     */
1185  #  if defined(AO_HAVE_load_acquire_read) && !defined(THREAD_SANITIZER)
1186    ptr_t list = GC_cptr_load_acquire_read((volatile ptr_t *)pfreelist);
1187    /* Atomic operations are used because the world is running. */
1188    ptr_t p, prev, next;
1189  
1190    if (ADDR(list) <= HBLKSIZE)
1191      return;
1192  
1193    prev = (ptr_t)pfreelist;
1194    for (p = list; p != NULL; p = next) {
1195      if (!GC_is_marked(p)) {
1196        ABORT_ARG2("Unmarked local free-list entry", ": object %p on list %p",
1197                   (void *)p, (void *)list);
1198      }
1199  
1200      /*
1201       * While traversing the free-list, it re-reads the pointer to the
1202       * current node before accepting its next pointer and bails out
1203       * if the latter has changed.  That way, it will not try to follow
1204       * the pointer which might be been modified after the object was
1205       * returned to the client.  It might perform the mark-check on the
1206       * just allocated object but that should be harmless.
1207       */
1208      next = GC_cptr_load_acquire_read((volatile ptr_t *)p);
1209      if (GC_cptr_load((volatile ptr_t *)prev) != p)
1210        break;
1211      prev = p;
1212    }
1213  #  else
1214    /* FIXME: Not implemented (just skipped). */
1215    (void)pfreelist;
1216  #  endif
1217  }
1218  #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
1219  
1220  /*
1221   * Clear all mark bits for the free list (specified by the first entry).
1222   * Decrement `GC_bytes_found` by number of bytes on free list.
1223   */
1224  STATIC void
1225  GC_clear_fl_marks(ptr_t q)
1226  {
1227    struct hblk *h = HBLKPTR(q);
1228    const struct hblk *last_h = h;
1229    hdr *hhdr = HDR(h);
1230    size_t sz = hhdr->hb_sz; /*< normally set only once */
1231  
1232    for (;;) {
1233      size_t bit_no = MARK_BIT_NO((size_t)((ptr_t)q - (ptr_t)h), sz);
1234  
1235      if (mark_bit_from_hdr(hhdr, bit_no)) {
1236        size_t n_marks = hhdr->hb_n_marks;
1237  
1238  #ifdef LINT2
1239        if (0 == n_marks)
1240          ABORT("hhdr->hb_n_marks cannot be zero");
1241  #else
1242        GC_ASSERT(n_marks != 0);
1243  #endif
1244        clear_mark_bit_from_hdr(hhdr, bit_no);
1245        n_marks--;
1246  #ifdef PARALLEL_MARK
1247        /* Approximate count, do not decrement to zero! */
1248        if (n_marks != 0 || !GC_parallel) {
1249          hhdr->hb_n_marks = n_marks;
1250        }
1251  #else
1252        hhdr->hb_n_marks = n_marks;
1253  #endif
1254      }
1255      GC_bytes_found -= (GC_signed_word)sz;
1256  
1257      q = (ptr_t)obj_link(q);
1258      if (NULL == q)
1259        break;
1260  
1261      h = HBLKPTR(q);
1262      if (UNLIKELY(h != last_h)) {
1263        last_h = h;
1264        /* Update `hhdr` and `sz`. */
1265        hhdr = HDR(h);
1266        sz = hhdr->hb_sz;
1267      }
1268    }
1269  }
1270  
1271  /* Mark all objects on the free lists for every object kind. */
1272  static void
1273  set_all_fl_marks(void)
1274  {
1275    unsigned kind;
1276  
1277    for (kind = 0; kind < GC_n_kinds; kind++) {
1278      word size; /*< current object size */
1279  
1280      for (size = 1; size <= MAXOBJGRANULES; size++) {
1281        ptr_t q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
1282  
1283        if (q != NULL)
1284          GC_set_fl_marks(q);
1285      }
1286    }
1287  }
1288  
1289  /*
1290   * Clear free-list mark bits.  Also subtract memory remaining from
1291   * `GC_bytes_found` count.
1292   */
1293  static void
1294  clear_all_fl_marks(void)
1295  {
1296    unsigned kind;
1297  
1298    for (kind = 0; kind < GC_n_kinds; kind++) {
1299      word size; /*< current object size */
1300  
1301      for (size = 1; size <= MAXOBJGRANULES; size++) {
1302        ptr_t q = (ptr_t)GC_obj_kinds[kind].ok_freelist[size];
1303  
1304        if (q != NULL)
1305          GC_clear_fl_marks(q);
1306      }
1307    }
1308  }
1309  
1310  #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1311  void GC_check_tls(void);
1312  #endif
1313  
1314  GC_on_heap_resize_proc GC_on_heap_resize = 0;
1315  
1316  /* Used for logging only. */
1317  GC_INLINE int
1318  GC_compute_heap_usage_percent(void)
1319  {
1320    word used = GC_composite_in_use + GC_atomic_in_use + GC_bytes_allocd;
1321    word heap_sz = GC_heapsize - GC_unmapped_bytes;
1322  #if defined(CPPCHECK)
1323    word limit = (GC_WORD_MAX >> 1) / 50; /*< to avoid a false positive */
1324  #else
1325    const word limit = GC_WORD_MAX / 100;
1326  #endif
1327  
1328    return used >= heap_sz ? 0
1329           : used < limit  ? (int)((used * 100) / heap_sz)
1330                           : (int)(used / (heap_sz / 100));
1331  }
1332  
1333  #define GC_DBGLOG_PRINT_HEAP_IN_USE()                                        \
1334    GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n", \
1335                     GC_compute_heap_usage_percent(),                          \
1336                     TO_KiB_UL(GC_composite_in_use),                           \
1337                     TO_KiB_UL(GC_atomic_in_use + GC_bytes_allocd))
1338  
1339  /*
1340   * Finish up a collection.  Assumes mark bits are consistent, but the
1341   * world is otherwise running.
1342   */
1343  STATIC void
1344  GC_finish_collection(void)
1345  {
1346  #ifndef NO_CLOCK
1347    CLOCK_TYPE start_time = CLOCK_TYPE_INITIALIZER;
1348    CLOCK_TYPE finalize_time = CLOCK_TYPE_INITIALIZER;
1349  #endif
1350  
1351    GC_ASSERT(I_HOLD_LOCK());
1352  #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC) \
1353      && !defined(DBG_HDRS_ALL)
1354    /* Check that we marked some of our own data. */
1355    GC_check_tls();
1356    /* TODO: Add more checks. */
1357  #endif
1358  
1359  #ifndef NO_CLOCK
1360    if (GC_print_stats)
1361      GET_TIME(start_time);
1362  #endif
1363    if (GC_on_collection_event)
1364      GC_on_collection_event(GC_EVENT_RECLAIM_START);
1365  
1366  #ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
1367    if (GC_bytes_found > 0)
1368      GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
1369  #endif
1370    GC_bytes_found = 0;
1371  #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1372    if (GETENV("GC_PRINT_ADDRESS_MAP") != NULL) {
1373      GC_print_address_map();
1374    }
1375  #endif
1376    COND_DUMP;
1377    if (GC_find_leak_inner) {
1378      set_all_fl_marks();
1379      /* This just checks; it does not really reclaim anything. */
1380      GC_start_reclaim(TRUE);
1381    }
1382  
1383  #ifndef GC_NO_FINALIZATION
1384    GC_finalize();
1385  #endif
1386  #ifndef NO_CLOCK
1387    if (GC_print_stats)
1388      GET_TIME(finalize_time);
1389  #endif
1390  #ifdef MAKE_BACK_GRAPH
1391    if (GC_print_back_height) {
1392      GC_traverse_back_graph();
1393    }
1394  #endif
1395  
1396    /*
1397     * Clear free-list mark bits, in case they got accidentally marked
1398     * (or `GC_find_leak` is set and they were intentionally marked).
1399     * Note that composite objects on free list are cleared, thus
1400     * accidentally marking a free list is not a problem; but some objects
1401     * on the list itself might be marked, and the given function call
1402     * fixes it.
1403     */
1404    clear_all_fl_marks();
1405  
1406    GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
1407                          (long)GC_bytes_found);
1408  
1409    /* Reconstruct free lists to contain everything not marked. */
1410    GC_start_reclaim(FALSE);
1411  
1412  #ifdef USE_MUNMAP
1413    if (GC_unmap_threshold > 0    /*< memory unmapping enabled? */
1414        && LIKELY(GC_gc_no != 1)) /*< do not unmap during `GC_init` */
1415      GC_unmap_old(GC_unmap_threshold);
1416  
1417    GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
1418  #endif
1419    GC_ASSERT(GC_our_mem_bytes >= GC_heapsize);
1420    GC_DBGLOG_PRINTF(
1421        "GC #%lu freed %ld bytes, heap %lu KiB (" IF_USE_MUNMAP(
1422            "+ %lu KiB unmapped ") "+ %lu KiB internal)\n",
1423        (unsigned long)GC_gc_no, (long)GC_bytes_found,
1424        TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
1425        COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)),
1426        TO_KiB_UL(GC_our_mem_bytes - GC_heapsize + sizeof(GC_arrays)));
1427    GC_DBGLOG_PRINT_HEAP_IN_USE();
1428    if (GC_is_full_gc) {
1429      GC_used_heap_size_after_full = GC_heapsize - GC_large_free_bytes;
1430      GC_need_full_gc = FALSE;
1431    } else {
1432      GC_need_full_gc = GC_heapsize - GC_used_heap_size_after_full
1433                        > min_bytes_allocd() + GC_large_free_bytes;
1434    }
1435  
1436    /* Reset or increment counters for next cycle. */
1437    GC_n_attempts = 0;
1438    GC_is_full_gc = FALSE;
1439    GC_bytes_allocd_before_gc += GC_bytes_allocd;
1440    GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
1441    GC_bytes_allocd = 0;
1442    GC_bytes_dropped = 0;
1443    GC_bytes_freed = 0;
1444    GC_finalizer_bytes_freed = 0;
1445  
1446    if (GC_on_collection_event)
1447      GC_on_collection_event(GC_EVENT_RECLAIM_END);
1448  #ifndef NO_CLOCK
1449    if (GC_print_stats) {
1450      CLOCK_TYPE done_time;
1451  
1452      GET_TIME(done_time);
1453  #  if !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1454      /* A convenient place to output finalization statistics. */
1455      GC_print_finalization_stats();
1456  #  endif
1457      GC_log_printf("Finalize and initiate sweep took %lu ms %lu ns"
1458                    " + %lu ms %lu ns\n",
1459                    MS_TIME_DIFF(finalize_time, start_time),
1460                    NS_FRAC_TIME_DIFF(finalize_time, start_time),
1461                    MS_TIME_DIFF(done_time, finalize_time),
1462                    NS_FRAC_TIME_DIFF(done_time, finalize_time));
1463    }
1464  #elif !defined(SMALL_CONFIG) && !defined(GC_NO_FINALIZATION)
1465    if (GC_print_stats)
1466      GC_print_finalization_stats();
1467  #endif
1468  }
1469  
1470  /* Note: accessed with the allocator lock held. */
1471  STATIC word GC_heapsize_at_forced_unmap = 0;
1472  
1473  /* Note: if `stop_func` is 0, then `GC_default_stop_func` is used instead. */
1474  STATIC GC_bool
1475  GC_try_to_collect_general(GC_stop_func stop_func, GC_bool force_unmap)
1476  {
1477    GC_bool result;
1478  #ifdef USE_MUNMAP
1479    unsigned old_unmap_threshold;
1480  #endif
1481    IF_CANCEL(int cancel_state;)
1482  
1483    if (UNLIKELY(!GC_is_initialized))
1484      GC_init();
1485    if (GC_debugging_started)
1486      GC_print_all_smashed();
1487    GC_notify_or_invoke_finalizers();
1488    LOCK();
1489    if (force_unmap) {
1490      /*
1491       * Record current heap size to make heap growth more conservative
1492       * afterwards (as if the heap is growing from zero size again).
1493       */
1494      GC_heapsize_at_forced_unmap = GC_heapsize;
1495    }
1496    DISABLE_CANCEL(cancel_state);
1497  #ifdef USE_MUNMAP
1498    old_unmap_threshold = GC_unmap_threshold;
1499    if (force_unmap || (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
1500      GC_unmap_threshold = 1; /*< unmap as much as possible */
1501  #endif
1502    /* Minimize junk left in my registers. */
1503    GC_noop6(0, 0, 0, 0, 0, 0);
1504    result = GC_try_to_collect_inner(stop_func != 0 ? stop_func
1505                                                    : GC_default_stop_func);
1506  #ifdef USE_MUNMAP
1507    /* Restore it. */
1508    GC_unmap_threshold = old_unmap_threshold;
1509  #endif
1510    RESTORE_CANCEL(cancel_state);
1511    UNLOCK();
1512    if (result) {
1513      if (GC_debugging_started)
1514        GC_print_all_smashed();
1515      GC_notify_or_invoke_finalizers();
1516    }
1517    return result;
1518  }
1519  
1520  /* Externally callable routines to invoke full, stop-the-world collection. */
1521  
1522  GC_API int GC_CALL
1523  GC_try_to_collect(GC_stop_func stop_func)
1524  {
1525    GC_ASSERT(NONNULL_ARG_NOT_NULL(stop_func));
1526    return (int)GC_try_to_collect_general(stop_func, FALSE);
1527  }
1528  
1529  GC_API void GC_CALL
1530  GC_gcollect(void)
1531  {
1532    /*
1533     * Zero is passed as stop_func to get `GC_default_stop_func` value
1534     * while holding the allocator lock (to prevent data race).
1535     */
1536    (void)GC_try_to_collect_general(0, FALSE);
1537    if (get_have_errors())
1538      GC_print_all_errors();
1539  }
1540  
1541  GC_API void GC_CALL
1542  GC_gcollect_and_unmap(void)
1543  {
1544    /* Collect and force memory unmapping to OS. */
1545    (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
1546  }
1547  
1548  GC_INNER ptr_t
1549  GC_os_get_mem(size_t bytes)
1550  {
1551    ptr_t space;
1552  
1553    GC_ASSERT(I_HOLD_LOCK());
1554    space = (ptr_t)GET_MEM(bytes); /*< `HBLKSIZE`-aligned */
1555    if (UNLIKELY(NULL == space))
1556      return NULL;
1557  #ifdef USE_PROC_FOR_LIBRARIES
1558    /* Add `HBLKSIZE`-aligned `GET_MEM`-generated block to `GC_our_memory`. */
1559    if (GC_n_memory >= MAX_HEAP_SECTS)
1560      ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1561    GC_our_memory[GC_n_memory].hs_start = space;
1562    GC_our_memory[GC_n_memory].hs_bytes = bytes;
1563    GC_n_memory++;
1564  #endif
1565    GC_our_mem_bytes += bytes;
1566    GC_VERBOSE_LOG_PRINTF("Got %lu bytes from OS\n", (unsigned long)bytes);
1567    return space;
1568  }
1569  
1570  /*
1571   * Use the chunk of memory starting at `h` of size `sz` as part of the heap.
1572   * Assumes `h` is `HBLKSIZE`-aligned, `sz` is a multiple of `HBLKSIZE`.
1573   */
1574  STATIC void
1575  GC_add_to_heap(struct hblk *h, size_t sz)
1576  {
1577    hdr *hhdr;
1578    ptr_t endp;
1579    size_t old_capacity = 0;
1580    void *old_heap_sects = NULL;
1581  #ifdef GC_ASSERTIONS
1582    size_t i;
1583  #endif
1584  
1585    GC_ASSERT(I_HOLD_LOCK());
1586    GC_ASSERT(ADDR(h) % HBLKSIZE == 0);
1587    GC_ASSERT(sz % HBLKSIZE == 0);
1588    GC_ASSERT(sz > 0);
1589    GC_ASSERT(GC_all_nils != NULL);
1590  
1591    if (UNLIKELY(GC_n_heap_sects == GC_capacity_heap_sects)) {
1592      /* Allocate new `GC_heap_sects` with sufficient capacity. */
1593  #ifndef INITIAL_HEAP_SECTS
1594  #  define INITIAL_HEAP_SECTS 32
1595  #endif
1596      size_t new_capacity
1597          = GC_n_heap_sects > 0 ? GC_n_heap_sects * 2 : INITIAL_HEAP_SECTS;
1598      void *new_heap_sects
1599          = GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
1600  
1601      if (NULL == new_heap_sects) {
1602        /* Retry with smaller yet sufficient capacity. */
1603        new_capacity = GC_n_heap_sects + INITIAL_HEAP_SECTS;
1604        new_heap_sects
1605            = GC_scratch_alloc(new_capacity * sizeof(struct HeapSect));
1606        if (NULL == new_heap_sects)
1607          ABORT("Insufficient memory for heap sections");
1608      }
1609      old_capacity = GC_capacity_heap_sects;
1610      old_heap_sects = GC_heap_sects;
1611      /* Transfer `GC_heap_sects` contents to the newly allocated array. */
1612      if (GC_n_heap_sects > 0)
1613        BCOPY(old_heap_sects, new_heap_sects,
1614              GC_n_heap_sects * sizeof(struct HeapSect));
1615      GC_capacity_heap_sects = new_capacity;
1616      GC_heap_sects = (struct HeapSect *)new_heap_sects;
1617      GC_COND_LOG_PRINTF("Grew heap sections array to %lu elements\n",
1618                         (unsigned long)new_capacity);
1619    }
1620  
1621    while (UNLIKELY(ADDR(h) <= HBLKSIZE)) {
1622      /* Cannot handle memory near address zero. */
1623      ++h;
1624      sz -= HBLKSIZE;
1625      if (0 == sz)
1626        return;
1627    }
1628    while (UNLIKELY(ADDR(h) >= GC_WORD_MAX - sz)) {
1629      /* Prevent overflow when calculating `endp`. */
1630      sz -= HBLKSIZE;
1631      if (0 == sz)
1632        return;
1633    }
1634    endp = (ptr_t)h + sz;
1635  
1636    hhdr = GC_install_header(h);
1637    if (UNLIKELY(NULL == hhdr)) {
1638      /*
1639       * This is extremely unlikely. Cannot add it.  This will almost
1640       * certainly result in a `NULL` returned from the allocator, which
1641       * is entirely appropriate.
1642       */
1643      return;
1644    }
1645  #ifdef GC_ASSERTIONS
1646    /* Ensure no intersection between sections. */
1647    for (i = 0; i < GC_n_heap_sects; i++) {
1648      ptr_t hs_start = GC_heap_sects[i].hs_start;
1649      ptr_t hs_end = hs_start + GC_heap_sects[i].hs_bytes;
1650  
1651      GC_ASSERT(!(ADDR_INSIDE((ptr_t)h, hs_start, hs_end)
1652                  || (ADDR_LT(hs_start, endp) && ADDR_GE(hs_end, endp))
1653                  || (ADDR_LT((ptr_t)h, hs_start) && ADDR_LT(hs_end, endp))));
1654    }
1655  #endif
1656    GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)h;
1657    GC_heap_sects[GC_n_heap_sects].hs_bytes = sz;
1658    GC_n_heap_sects++;
1659    hhdr->hb_block = h;
1660    hhdr->hb_sz = sz;
1661    hhdr->hb_flags = 0;
1662    GC_freehblk(h);
1663    GC_heapsize += sz;
1664  
1665    if (ADDR_GE((ptr_t)GC_least_plausible_heap_addr, (ptr_t)h)
1666        || UNLIKELY(NULL == GC_least_plausible_heap_addr)) {
1667      /*
1668       * Making it a little smaller than necessary prevents us from
1669       * getting a false hit from the variable itself.  There is some
1670       * unintentional reflection here.
1671       */
1672      GC_least_plausible_heap_addr = (ptr_t)h - sizeof(ptr_t);
1673    }
1674    if (ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, endp)) {
1675      GC_greatest_plausible_heap_addr = endp;
1676    }
1677  #ifdef SET_REAL_HEAP_BOUNDS
1678    if (ADDR(h) < GC_least_real_heap_addr
1679        || UNLIKELY(0 == GC_least_real_heap_addr))
1680      GC_least_real_heap_addr = ADDR(h) - sizeof(ptr_t);
1681    if (GC_greatest_real_heap_addr < ADDR(endp)) {
1682  #  ifdef INCLUDE_LINUX_THREAD_DESCR
1683      /* Avoid heap intersection with the static data roots. */
1684      GC_exclude_static_roots_inner((ptr_t)h, endp);
1685  #  endif
1686      GC_greatest_real_heap_addr = ADDR(endp);
1687    }
1688  #endif
1689    GC_handle_protected_regions_limit();
1690    if (UNLIKELY(old_capacity > 0)) {
1691  #ifndef GWW_VDB
1692      /*
1693       * Recycling may call `GC_add_to_heap()` again but should not cause
1694       * resizing of `GC_heap_sects`.
1695       */
1696      GC_scratch_recycle_no_gww(old_heap_sects,
1697                                old_capacity * sizeof(struct HeapSect));
1698  #else
1699      /* TODO: Implement GWW-aware recycling as in `alloc_mark_stack`. */
1700      GC_noop1_ptr(old_heap_sects);
1701  #endif
1702    }
1703  }
1704  
1705  #ifndef NO_DEBUGGING
1706  void
1707  GC_print_heap_sects(void)
1708  {
1709    size_t i;
1710  
1711    GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
1712              (unsigned long)GC_heapsize /*, */
1713                  COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
1714  
1715    for (i = 0; i < GC_n_heap_sects; i++) {
1716      ptr_t start = GC_heap_sects[i].hs_start;
1717      size_t len = GC_heap_sects[i].hs_bytes;
1718      unsigned nbl = 0;
1719  #  ifndef NO_BLACK_LISTING
1720      struct hblk *h;
1721  
1722      for (h = (struct hblk *)start; ADDR_LT((ptr_t)h, start + len); h++) {
1723        if (GC_is_black_listed(h, HBLKSIZE))
1724          nbl++;
1725      }
1726  #  endif
1727      GC_printf("Section %u from %p to %p %u/%lu blacklisted\n", (unsigned)i,
1728                (void *)start, (void *)&start[len], nbl,
1729                (unsigned long)divHBLKSZ(len));
1730    }
1731  }
1732  #endif /* !NO_DEBUGGING */
1733  
1734  void *GC_least_plausible_heap_addr = MAKE_CPTR(GC_WORD_MAX);
1735  void *GC_greatest_plausible_heap_addr = NULL;
1736  
1737  STATIC word GC_max_heapsize = 0;
1738  
1739  GC_API void GC_CALL
1740  GC_set_max_heap_size(GC_word n)
1741  {
1742    GC_max_heapsize = n;
1743  }
1744  
1745  word GC_max_retries = 0;
1746  
1747  GC_INNER void
1748  GC_scratch_recycle_inner(void *ptr, size_t sz)
1749  {
1750    size_t page_offset;
1751    size_t displ = 0;
1752    size_t recycled_bytes;
1753  
1754    GC_ASSERT(I_HOLD_LOCK());
1755    if (NULL == ptr)
1756      return;
1757  
1758    GC_ASSERT(sz != 0);
1759    GC_ASSERT(GC_page_size != 0);
1760    /* TODO: Assert correct memory flags if `GWW_VDB`. */
1761    page_offset = ADDR(ptr) & (GC_page_size - 1);
1762    if (page_offset != 0)
1763      displ = GC_page_size - page_offset;
1764    recycled_bytes = sz > displ ? (sz - displ) & ~(GC_page_size - 1) : 0;
1765    GC_COND_LOG_PRINTF("Recycle %lu/%lu scratch-allocated bytes at %p\n",
1766                       (unsigned long)recycled_bytes, (unsigned long)sz, ptr);
1767    if (recycled_bytes > 0)
1768      GC_add_to_heap((struct hblk *)((ptr_t)ptr + displ), recycled_bytes);
1769  }
1770  
1771  GC_INNER GC_bool
1772  GC_expand_hp_inner(word n)
1773  {
1774    size_t sz;
1775    struct hblk *space;
1776    /* Number of bytes by which we expect the heap to expand soon. */
1777    word expansion_slop;
1778  
1779    GC_ASSERT(I_HOLD_LOCK());
1780    GC_ASSERT(GC_page_size != 0);
1781    if (0 == n)
1782      n = 1;
1783    sz = ROUNDUP_PAGESIZE((size_t)n * HBLKSIZE);
1784    GC_DBGLOG_PRINT_HEAP_IN_USE();
1785    if (GC_max_heapsize != 0
1786        && (GC_max_heapsize < (word)sz
1787            || GC_heapsize > GC_max_heapsize - (word)sz)) {
1788      /* Exceeded the self-imposed limit. */
1789      return FALSE;
1790    }
1791    space = (struct hblk *)GC_os_get_mem(sz);
1792    if (UNLIKELY(NULL == space)) {
1793      WARN("Failed to expand heap by %" WARN_PRIuPTR " KiB\n", sz >> 10);
1794      return FALSE;
1795    }
1796    GC_last_heap_growth_gc_no = GC_gc_no;
1797    GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
1798                      TO_KiB_UL(GC_heapsize + sz),
1799                      (unsigned long)GC_bytes_allocd);
1800  
1801    /*
1802     * Adjust heap limits generously for black-listing to work better.
1803     * `GC_add_to_heap()` performs minimal adjustment needed for correctness.
1804     */
1805    expansion_slop = min_bytes_allocd() + 4 * MAXHINCR * HBLKSIZE;
1806    if ((0 == GC_last_heap_addr && (ADDR(space) & SIGNB) == 0)
1807        || (GC_last_heap_addr != 0 && GC_last_heap_addr < ADDR(space))) {
1808      /* Assume the heap is growing up. */
1809      if (LIKELY(ADDR(space) < GC_WORD_MAX - (sz + expansion_slop))) {
1810        ptr_t new_limit = (ptr_t)space + sz + expansion_slop;
1811  
1812        if (ADDR_LT((ptr_t)GC_greatest_plausible_heap_addr, new_limit))
1813          GC_greatest_plausible_heap_addr = new_limit;
1814      }
1815    } else {
1816      /* Heap is growing down. */
1817      if (LIKELY(ADDR(space) > expansion_slop + sizeof(ptr_t))) {
1818        ptr_t new_limit = (ptr_t)space - expansion_slop - sizeof(ptr_t);
1819  
1820        if (ADDR_LT(new_limit, (ptr_t)GC_least_plausible_heap_addr))
1821          GC_least_plausible_heap_addr = new_limit;
1822      }
1823    }
1824    GC_last_heap_addr = ADDR(space);
1825  
1826    GC_add_to_heap(space, sz);
1827    if (GC_on_heap_resize)
1828      (*GC_on_heap_resize)(GC_heapsize);
1829  
1830    return TRUE;
1831  }
1832  
1833  GC_API int GC_CALL
1834  GC_expand_hp(size_t bytes)
1835  {
1836    size_t n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(bytes);
1837    word old_heapsize;
1838    GC_bool result;
1839  
1840    if (UNLIKELY(!GC_is_initialized))
1841      GC_init();
1842    LOCK();
1843    old_heapsize = GC_heapsize;
1844    result = GC_expand_hp_inner(n_blocks);
1845    if (result) {
1846      GC_requested_heapsize += bytes;
1847      if (GC_dont_gc) {
1848        /* Do not call `WARN()` if the heap growth is intentional. */
1849        GC_ASSERT(GC_heapsize >= old_heapsize);
1850        GC_heapsize_on_gc_disable += GC_heapsize - old_heapsize;
1851      }
1852    }
1853    UNLOCK();
1854    /*
1855     * Really returns a `GC_bool` value, but the function is externally
1856     * visible, so that is clumsy.
1857     */
1858    return (int)result;
1859  }
1860  
1861  GC_INNER unsigned GC_fail_count = 0;
1862  
1863  /*
1864   * The minimum value of the ratio of allocated bytes since the latest
1865   * collection to the amount of finalizers created since that collection
1866   * which triggers the collection instead heap expansion.  Has no effect
1867   * in the incremental mode.
1868   */
1869  #if defined(GC_ALLOCD_BYTES_PER_FINALIZER) && !defined(CPPCHECK)
1870  STATIC word GC_allocd_bytes_per_finalizer = GC_ALLOCD_BYTES_PER_FINALIZER;
1871  #else
1872  STATIC word GC_allocd_bytes_per_finalizer = 10000;
1873  #endif
1874  
1875  GC_API void GC_CALL
1876  GC_set_allocd_bytes_per_finalizer(GC_word value)
1877  {
1878    GC_allocd_bytes_per_finalizer = value;
1879  }
1880  
1881  GC_API GC_word GC_CALL
1882  GC_get_allocd_bytes_per_finalizer(void)
1883  {
1884    return GC_allocd_bytes_per_finalizer;
1885  }
1886  
1887  static word last_fo_entries = 0;
1888  static word last_bytes_finalized = 0;
1889  
1890  GC_INNER GC_bool
1891  GC_collect_or_expand(word needed_blocks, unsigned flags, GC_bool retry)
1892  {
1893    GC_bool gc_not_stopped = TRUE;
1894    word blocks_to_get;
1895    IF_CANCEL(int cancel_state;)
1896  
1897    GC_ASSERT(I_HOLD_LOCK());
1898    GC_ASSERT(GC_is_initialized);
1899    DISABLE_CANCEL(cancel_state);
1900    if (!GC_incremental && !GC_dont_gc
1901        && ((GC_dont_expand && GC_bytes_allocd > 0)
1902            || (GC_fo_entries > last_fo_entries
1903                && (last_bytes_finalized | GC_bytes_finalized) != 0
1904                && (GC_fo_entries - last_fo_entries)
1905                           * GC_allocd_bytes_per_finalizer
1906                       > GC_bytes_allocd)
1907            || GC_should_collect())) {
1908      /*
1909       * Try to do a full collection using "default" `stop_func` (unless
1910       * nothing has been allocated since the latest collection or heap
1911       * expansion is disabled).
1912       */
1913      gc_not_stopped = GC_try_to_collect_inner(
1914          GC_bytes_allocd > 0 && (!GC_dont_expand || !retry)
1915              ? GC_default_stop_func
1916              : GC_never_stop_func);
1917      if (gc_not_stopped || !retry) {
1918        /*
1919         * Either the collection has not been aborted or this is the
1920         * first attempt (in a loop).
1921         */
1922        last_fo_entries = GC_fo_entries;
1923        last_bytes_finalized = GC_bytes_finalized;
1924        RESTORE_CANCEL(cancel_state);
1925        return TRUE;
1926      }
1927    }
1928  
1929    blocks_to_get = (GC_heapsize - GC_heapsize_at_forced_unmap)
1930                        / (HBLKSIZE * GC_free_space_divisor)
1931                    + needed_blocks;
1932    if (blocks_to_get > MAXHINCR) {
1933  #ifdef NO_BLACK_LISTING
1934      UNUSED_ARG(flags);
1935      blocks_to_get = needed_blocks > MAXHINCR ? needed_blocks : MAXHINCR;
1936  #else
1937      word slop;
1938  
1939      /*
1940       * Get the minimum required to make it likely that we can satisfy
1941       * the current request in the presence of black-listing.  This will
1942       * probably be bigger than `MAXHINCR`.
1943       */
1944      if ((flags & IGNORE_OFF_PAGE) != 0) {
1945        slop = 4;
1946      } else {
1947        slop = 2 * divHBLKSZ(BL_LIMIT);
1948        if (slop > needed_blocks)
1949          slop = needed_blocks;
1950      }
1951      if (needed_blocks + slop > MAXHINCR) {
1952        blocks_to_get = needed_blocks + slop;
1953      } else {
1954        blocks_to_get = MAXHINCR;
1955      }
1956  #endif
1957      if (blocks_to_get > divHBLKSZ(GC_WORD_MAX))
1958        blocks_to_get = divHBLKSZ(GC_WORD_MAX);
1959    } else if (blocks_to_get < MINHINCR) {
1960      blocks_to_get = MINHINCR;
1961    }
1962  
1963    if (GC_max_heapsize > GC_heapsize) {
1964      word max_get_blocks = divHBLKSZ(GC_max_heapsize - GC_heapsize);
1965      if (blocks_to_get > max_get_blocks)
1966        blocks_to_get
1967            = max_get_blocks > needed_blocks ? max_get_blocks : needed_blocks;
1968    }
1969  
1970  #ifdef USE_MUNMAP
1971    if (GC_unmap_threshold > 1) {
1972      /*
1973       * Return as much memory to the OS as possible before trying to
1974       * get memory from it.
1975       */
1976      GC_unmap_old(0);
1977    }
1978  #endif
1979    if (!GC_expand_hp_inner(blocks_to_get)
1980        && (blocks_to_get == needed_blocks
1981            || !GC_expand_hp_inner(needed_blocks))) {
1982      if (!gc_not_stopped) {
1983        /* Do not increment `GC_fail_count` here (and no warning). */
1984        GC_gcollect_inner();
1985        GC_ASSERT(0 == GC_bytes_allocd);
1986      } else if (GC_fail_count++ < GC_max_retries) {
1987        WARN("Out of Memory!  Trying to continue...\n", 0);
1988        GC_gcollect_inner();
1989      } else {
1990  #ifdef USE_MUNMAP
1991        GC_ASSERT(GC_heapsize >= GC_unmapped_bytes);
1992  #endif
1993  #if !defined(SMALL_CONFIG) && (CPP_WORDSZ >= 32)
1994  #  define MAX_HEAPSIZE_WARNED_IN_BYTES (5 << 20) /*< 5 MB */
1995  
1996        if (GC_heapsize > (word)MAX_HEAPSIZE_WARNED_IN_BYTES) {
1997          WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " MiB."
1998               " Returning NULL!\n",
1999               (GC_heapsize - GC_unmapped_bytes) >> 20);
2000        } else
2001  #endif
2002        /* else */ {
2003          WARN("Out of Memory! Heap size: %" WARN_PRIuPTR " bytes."
2004               " Returning NULL!\n",
2005               GC_heapsize - GC_unmapped_bytes);
2006        }
2007        RESTORE_CANCEL(cancel_state);
2008        return FALSE;
2009      }
2010    } else if (GC_fail_count) {
2011      GC_COND_LOG_PRINTF("Memory available again...\n");
2012    }
2013    RESTORE_CANCEL(cancel_state);
2014    return TRUE;
2015  }
2016  
2017  GC_INNER ptr_t
2018  GC_allocobj(size_t lg, int kind)
2019  {
2020  #define MAX_ALLOCOBJ_RETRIES 3
2021    int retry_cnt = 0;
2022    void **flh = &GC_obj_kinds[kind].ok_freelist[lg];
2023  #ifndef GC_DISABLE_INCREMENTAL
2024    GC_bool tried_minor = FALSE;
2025  #endif
2026  
2027    GC_ASSERT(I_HOLD_LOCK());
2028    GC_ASSERT(GC_is_initialized);
2029    if (UNLIKELY(0 == lg))
2030      return NULL;
2031  
2032    while (NULL == *flh) {
2033      /*
2034       * Only a few iterations are expected at most, otherwise something
2035       * is wrong in one of the functions called below.
2036       */
2037      if (retry_cnt > MAX_ALLOCOBJ_RETRIES)
2038        ABORT("Too many retries in GC_allocobj");
2039  #ifndef GC_DISABLE_INCREMENTAL
2040      if (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED && !GC_dont_gc) {
2041        /*
2042         * True incremental mode, not just generational.
2043         * Do our share of marking work.
2044         */
2045        GC_collect_a_little_inner(1);
2046      }
2047  #endif
2048      /* Sweep blocks for objects of this size. */
2049      GC_ASSERT(!GC_is_full_gc || NULL == GC_obj_kinds[kind].ok_reclaim_list
2050                || NULL == GC_obj_kinds[kind].ok_reclaim_list[lg]);
2051      GC_continue_reclaim(lg, kind);
2052  #if defined(CPPCHECK)
2053      GC_noop1_ptr(&flh);
2054  #endif
2055      if (*flh != NULL)
2056        break;
2057  
2058      GC_new_hblk(lg, kind);
2059  #if defined(CPPCHECK)
2060      GC_noop1_ptr(&flh);
2061  #endif
2062      if (*flh != NULL)
2063        break;
2064  
2065  #ifndef GC_DISABLE_INCREMENTAL
2066      if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED && !tried_minor
2067          && !GC_dont_gc) {
2068        GC_collect_a_little_inner(1);
2069        tried_minor = TRUE;
2070        continue;
2071      }
2072  #endif
2073      if (UNLIKELY(!GC_collect_or_expand(1, 0 /* flags */, retry_cnt > 0)))
2074        return NULL;
2075      retry_cnt++;
2076    }
2077    /* Successful allocation; reset failure count. */
2078    GC_fail_count = 0;
2079    return (ptr_t)(*flh);
2080  }
2081