finalize.c raw

   1  /*
   2   * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
   3   * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
   4   * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
   5   * Copyright (c) 2007 Free Software Foundation, Inc.
   6   * Copyright (c) 2008-2022 Ivan Maidanski
   7   *
   8   * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
   9   * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
  10   *
  11   * Permission is hereby granted to use or copy this program
  12   * for any purpose, provided the above notices are retained on all copies.
  13   * Permission to modify the code and to distribute modified code is granted,
  14   * provided the above notices are retained, and a notice that the code was
  15   * modified is included with the above copyright notice.
  16   */
  17  
  18  #include "private/gc_pmark.h"
  19  
  20  #ifndef GC_NO_FINALIZATION
  21  #  include "gc/javaxfc.h" /*< to get `GC_finalize_all()` as `extern "C"` */
  22  
  23  /*
  24   * Type of mark procedure used for marking from finalizable object.
  25   * This procedure normally does not mark the object, only its descendants.
  26   */
  27  typedef void (*finalization_mark_proc)(ptr_t /* `finalizable_obj_ptr` */);
  28  
  29  #  define HASH3(addr, size, log_size)                               \
  30      ((size_t)((ADDR(addr) >> 3) ^ (ADDR(addr) >> (3 + (log_size)))) \
  31       & ((size) - (size_t)1))
  32  #  define HASH2(addr, log_size) HASH3(addr, (size_t)1 << (log_size), log_size)
  33  
  34  struct hash_chain_entry {
  35    GC_hidden_pointer hidden_key;
  36    struct hash_chain_entry *next;
  37  };
  38  
  39  struct disappearing_link {
  40    struct hash_chain_entry prolog;
  41  #  define dl_hidden_link prolog.hidden_key /*< field to be cleared */
  42  #  define dl_next(x) (struct disappearing_link *)((x)->prolog.next)
  43  #  define dl_set_next(x, y) \
  44      (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
  45    GC_hidden_pointer dl_hidden_obj; /*< pointer to object base */
  46  };
  47  
  48  struct finalizable_object {
  49    struct hash_chain_entry prolog;
  50    /*
  51     * Pointer to object base.  No longer hidden once object is on
  52     * `finalize_now` queue.
  53     */
  54  #  define fo_hidden_base prolog.hidden_key
  55  #  define fo_next(x) (struct finalizable_object *)((x)->prolog.next)
  56  #  define fo_set_next(x, y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
  57    GC_finalization_proc fo_fn;          /*< the finalizer */
  58    finalization_mark_proc fo_mark_proc; /*< mark-through procedure */
  59    ptr_t fo_client_data;
  60    size_t fo_object_sz; /*< in bytes */
  61  };
  62  
  63  #  ifdef AO_HAVE_store
  64  /*
  65   * Update `finalize_now` atomically as `GC_should_invoke_finalizers`
  66   * does not acquire the allocator lock.
  67   */
  68  #    define SET_FINALIZE_NOW(fo) \
  69        GC_cptr_store((volatile ptr_t *)&GC_fnlz_roots.finalize_now, (ptr_t)(fo))
  70  #  else
  71  #    define SET_FINALIZE_NOW(fo) (void)(GC_fnlz_roots.finalize_now = (fo))
  72  #  endif /* !THREADS */
  73  
  74  GC_API void GC_CALL
  75  GC_push_finalizer_structures(void)
  76  {
  77    GC_ASSERT(ADDR(&GC_dl_hashtbl.head) % ALIGNMENT == 0);
  78    GC_ASSERT(ADDR(&GC_fnlz_roots) % ALIGNMENT == 0);
  79  #  ifndef GC_LONG_REFS_NOT_NEEDED
  80    GC_ASSERT(ADDR(&GC_ll_hashtbl.head) % ALIGNMENT == 0);
  81    GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
  82  #  endif
  83    GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
  84    GC_PUSH_ALL_SYM(GC_fnlz_roots);
  85    /* `GC_toggleref_arr` is pushed specially by `GC_mark_togglerefs`. */
  86  }
  87  
  88  /*
  89   * Threshold of `log_size` to initiate full collection before growing
  90   * a hash table.
  91   */
  92  #  ifndef GC_ON_GROW_LOG_SIZE_MIN
  93  #    define GC_ON_GROW_LOG_SIZE_MIN LOG_HBLKSIZE
  94  #  endif
  95  
  96  /*
  97   * Ensure the hash table has enough capacity.  `*table_ptr` is a pointer
  98   * to an array of hash headers.  `*log_size_ptr` is the log of its current
  99   * size.  We update both `*table_ptr` and `*log_size_ptr` on success.
 100   */
 101  STATIC void
 102  GC_grow_table(struct hash_chain_entry ***table_ptr, unsigned *log_size_ptr,
 103                const size_t *entries_ptr)
 104  {
 105    size_t i;
 106    struct hash_chain_entry *p;
 107    unsigned log_old_size = *log_size_ptr;
 108    unsigned log_new_size = log_old_size + 1;
 109    size_t old_size = NULL == *table_ptr ? 0 : (size_t)1 << log_old_size;
 110    size_t new_size = (size_t)1 << log_new_size;
 111    /* FIXME: Power-of-two size often gets rounded up to one more page. */
 112    struct hash_chain_entry **new_table;
 113  
 114    GC_ASSERT(I_HOLD_LOCK());
 115    /*
 116     * Avoid growing the table in case of at least 25% of entries can
 117     * be deleted by enforcing a collection.  Ignored for small tables.
 118     * In the incremental mode we skip this optimization, as we want to
 119     * avoid triggering a full collection whenever possible.
 120     */
 121    if (log_old_size >= (unsigned)GC_ON_GROW_LOG_SIZE_MIN && !GC_incremental) {
 122      IF_CANCEL(int cancel_state;)
 123  
 124      DISABLE_CANCEL(cancel_state);
 125      GC_gcollect_inner();
 126      RESTORE_CANCEL(cancel_state);
 127      /* `GC_finalize` might decrease entries value. */
 128      if (*entries_ptr < ((size_t)1 << log_old_size) - (*entries_ptr >> 2))
 129        return;
 130    }
 131  
 132    new_table = (struct hash_chain_entry **)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
 133        new_size * sizeof(struct hash_chain_entry *), NORMAL);
 134    if (NULL == new_table) {
 135      if (NULL == *table_ptr) {
 136        ABORT("Insufficient space for initial table allocation");
 137      } else {
 138        return;
 139      }
 140    }
 141    for (i = 0; i < old_size; i++) {
 142      for (p = (*table_ptr)[i]; p != NULL;) {
 143        ptr_t real_key = (ptr_t)GC_REVEAL_POINTER(p->hidden_key);
 144        struct hash_chain_entry *next = p->next;
 145        size_t new_hash = HASH3(real_key, new_size, log_new_size);
 146  
 147        p->next = new_table[new_hash];
 148        GC_dirty(p);
 149        new_table[new_hash] = p;
 150        p = next;
 151      }
 152    }
 153    *log_size_ptr = log_new_size;
 154    *table_ptr = new_table;
 155    GC_dirty(new_table); /*< entire object */
 156  }
 157  
 158  GC_API int GC_CALL
 159  GC_register_disappearing_link(void **link)
 160  {
 161    ptr_t base;
 162  
 163    base = (ptr_t)GC_base(link);
 164    if (base == 0)
 165      ABORT("Bad arg to GC_register_disappearing_link");
 166    return GC_general_register_disappearing_link(link, base);
 167  }
 168  
 169  STATIC int
 170  GC_register_disappearing_link_inner(struct dl_hashtbl_s *dl_hashtbl,
 171                                      void **link, const void *obj,
 172                                      const char *tbl_log_name)
 173  {
 174    struct disappearing_link *curr_dl;
 175    size_t index;
 176    struct disappearing_link *new_dl;
 177  
 178    GC_ASSERT(GC_is_initialized);
 179    if (UNLIKELY(GC_find_leak_inner))
 180      return GC_UNIMPLEMENTED;
 181  #  ifdef GC_ASSERTIONS
 182    /* Just check accessibility. */
 183    GC_noop1_ptr(*link);
 184  #  endif
 185    LOCK();
 186    GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
 187    if (UNLIKELY(NULL == dl_hashtbl->head)
 188        || UNLIKELY(dl_hashtbl->entries > ((size_t)1 << dl_hashtbl->log_size))) {
 189      GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl->head,
 190                    &dl_hashtbl->log_size, &dl_hashtbl->entries);
 191      GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
 192                         1U << dl_hashtbl->log_size);
 193    }
 194    index = HASH2(link, dl_hashtbl->log_size);
 195    for (curr_dl = dl_hashtbl->head[index]; curr_dl != 0;
 196         curr_dl = dl_next(curr_dl)) {
 197      if (curr_dl->dl_hidden_link == GC_HIDE_POINTER(link)) {
 198        /* Alternatively, `GC_HIDE_NZ_POINTER()` could be used instead. */
 199        curr_dl->dl_hidden_obj = GC_HIDE_POINTER(obj);
 200        UNLOCK();
 201        return GC_DUPLICATE;
 202      }
 203    }
 204    new_dl = (struct disappearing_link *)GC_INTERNAL_MALLOC(
 205        sizeof(struct disappearing_link), NORMAL);
 206    if (UNLIKELY(NULL == new_dl)) {
 207      GC_oom_func oom_fn = GC_oom_fn;
 208      UNLOCK();
 209      new_dl = (struct disappearing_link *)(*oom_fn)(
 210          sizeof(struct disappearing_link));
 211      if (0 == new_dl) {
 212        return GC_NO_MEMORY;
 213      }
 214      /* It is not likely we will make it here, but... */
 215      LOCK();
 216      /* Recalculate `index` since the table may grow. */
 217      index = HASH2(link, dl_hashtbl->log_size);
 218      /* Check again that our disappearing link not in the table. */
 219      for (curr_dl = dl_hashtbl->head[index]; curr_dl != 0;
 220           curr_dl = dl_next(curr_dl)) {
 221        if (curr_dl->dl_hidden_link == GC_HIDE_POINTER(link)) {
 222          curr_dl->dl_hidden_obj = GC_HIDE_POINTER(obj);
 223          UNLOCK();
 224  #  ifndef DBG_HDRS_ALL
 225          /* Free unused `new_dl` returned by `GC_oom_fn()`. */
 226          GC_free(new_dl);
 227  #  endif
 228          return GC_DUPLICATE;
 229        }
 230      }
 231    }
 232    new_dl->dl_hidden_obj = GC_HIDE_POINTER(obj);
 233    new_dl->dl_hidden_link = GC_HIDE_POINTER(link);
 234    dl_set_next(new_dl, dl_hashtbl->head[index]);
 235    GC_dirty(new_dl);
 236    dl_hashtbl->head[index] = new_dl;
 237    dl_hashtbl->entries++;
 238    GC_dirty(dl_hashtbl->head + index);
 239    UNLOCK();
 240    return GC_SUCCESS;
 241  }
 242  
 243  GC_API int GC_CALL
 244  GC_general_register_disappearing_link(void **link, const void *obj)
 245  {
 246    if ((ADDR(link) & (ALIGNMENT - 1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
 247      ABORT("Bad arg to GC_general_register_disappearing_link");
 248    return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj, "dl");
 249  }
 250  
 251  #  ifdef DBG_HDRS_ALL
 252  #    define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
 253  #  else
 254  #    define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
 255  #  endif
 256  
 257  /* Unregisters given `link` and returns the link entry to free. */
 258  GC_INLINE struct disappearing_link *
 259  GC_unregister_disappearing_link_inner(struct dl_hashtbl_s *dl_hashtbl,
 260                                        void **link)
 261  {
 262    struct disappearing_link *curr_dl;
 263    struct disappearing_link *prev_dl = NULL;
 264    size_t index;
 265  
 266    GC_ASSERT(I_HOLD_LOCK());
 267    if (UNLIKELY(NULL == dl_hashtbl->head))
 268      return NULL;
 269  
 270    index = HASH2(link, dl_hashtbl->log_size);
 271    for (curr_dl = dl_hashtbl->head[index]; curr_dl;
 272         curr_dl = dl_next(curr_dl)) {
 273      if (curr_dl->dl_hidden_link == GC_HIDE_POINTER(link)) {
 274        /* Remove found entry from the table. */
 275        if (NULL == prev_dl) {
 276          dl_hashtbl->head[index] = dl_next(curr_dl);
 277          GC_dirty(dl_hashtbl->head + index);
 278        } else {
 279          dl_set_next(prev_dl, dl_next(curr_dl));
 280          GC_dirty(prev_dl);
 281        }
 282        dl_hashtbl->entries--;
 283        break;
 284      }
 285      prev_dl = curr_dl;
 286    }
 287    return curr_dl;
 288  }
 289  
 290  GC_API int GC_CALL
 291  GC_unregister_disappearing_link(void **link)
 292  {
 293    struct disappearing_link *curr_dl;
 294  
 295    if ((ADDR(link) & (ALIGNMENT - 1)) != 0) {
 296      /* Nothing to do. */
 297      return 0;
 298    }
 299  
 300    LOCK();
 301    curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
 302    UNLOCK();
 303    if (NULL == curr_dl)
 304      return 0;
 305    FREE_DL_ENTRY(curr_dl);
 306    return 1;
 307  }
 308  
 309  /*
 310   * Mark from one finalizable object using the specified mark procedure.
 311   * May not mark the object pointed to by `real_ptr` (i.e, it is the job
 312   * of the caller, if appropriate).  Note that this is called with the
 313   * mutator running.  This is safe only if the mutator (client) gets
 314   * the allocator lock to reveal hidden pointers.
 315   */
 316  GC_INLINE void
 317  GC_mark_fo(ptr_t real_ptr, finalization_mark_proc fo_mark_proc)
 318  {
 319    GC_ASSERT(I_HOLD_LOCK());
 320    fo_mark_proc(real_ptr);
 321    /* Process objects pushed by the mark procedure. */
 322    while (!GC_mark_stack_empty())
 323      MARK_FROM_MARK_STACK();
 324  }
 325  
 326  /* Complete a collection in progress, if any. */
 327  GC_INLINE void
 328  GC_complete_ongoing_collection(void)
 329  {
 330    if (UNLIKELY(GC_collection_in_progress())) {
 331      while (!GC_mark_some(NULL)) {
 332        /* Empty. */
 333      }
 334    }
 335  }
 336  
 337  /* Toggle-refs support. */
 338  
 339  #  ifndef GC_TOGGLE_REFS_NOT_NEEDED
 340  typedef union toggle_ref_u GCToggleRef;
 341  
 342  STATIC GC_toggleref_func GC_toggleref_callback = 0;
 343  
 344  GC_INNER void
 345  GC_process_togglerefs(void)
 346  {
 347    size_t i;
 348    size_t new_size = 0;
 349    GC_bool needs_barrier = FALSE;
 350  
 351    GC_ASSERT(I_HOLD_LOCK());
 352    for (i = 0; i < GC_toggleref_array_size; ++i) {
 353      GCToggleRef *r = &GC_toggleref_arr[i];
 354      void *obj = r->strong_ref;
 355  
 356      if ((ADDR(obj) & 1) != 0) {
 357        obj = GC_REVEAL_POINTER(r->weak_ref);
 358        GC_ASSERT((ADDR(obj) & 1) == 0);
 359      }
 360      if (NULL == obj)
 361        continue;
 362  
 363      switch (GC_toggleref_callback(obj)) {
 364      case GC_TOGGLE_REF_DROP:
 365        break;
 366      case GC_TOGGLE_REF_STRONG:
 367        GC_toggleref_arr[new_size++].strong_ref = obj;
 368        needs_barrier = TRUE;
 369        break;
 370      case GC_TOGGLE_REF_WEAK:
 371        GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
 372        break;
 373      default:
 374        ABORT("Bad toggle-ref status returned by callback");
 375      }
 376    }
 377  
 378    if (new_size < GC_toggleref_array_size) {
 379      BZERO(&GC_toggleref_arr[new_size],
 380            (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
 381      GC_toggleref_array_size = new_size;
 382    }
 383    if (needs_barrier)
 384      GC_dirty(GC_toggleref_arr); /*< entire object */
 385  }
 386  
 387  STATIC void GC_normal_finalize_mark_proc(ptr_t);
 388  
 389  STATIC void
 390  GC_mark_togglerefs(void)
 391  {
 392    size_t i;
 393  
 394    GC_ASSERT(I_HOLD_LOCK());
 395    if (NULL == GC_toggleref_arr)
 396      return;
 397  
 398    GC_set_mark_bit(GC_toggleref_arr);
 399    for (i = 0; i < GC_toggleref_array_size; ++i) {
 400      void *obj = GC_toggleref_arr[i].strong_ref;
 401      if (obj != NULL && (ADDR(obj) & 1) == 0) {
 402        /* Push and mark the object. */
 403        GC_mark_fo((ptr_t)obj, GC_normal_finalize_mark_proc);
 404        GC_set_mark_bit(obj);
 405        GC_complete_ongoing_collection();
 406      }
 407    }
 408  }
 409  
 410  STATIC void
 411  GC_clear_togglerefs(void)
 412  {
 413    size_t i;
 414  
 415    GC_ASSERT(I_HOLD_LOCK());
 416    for (i = 0; i < GC_toggleref_array_size; ++i) {
 417      GCToggleRef *r = &GC_toggleref_arr[i];
 418  
 419      if ((ADDR(r->strong_ref) & 1) != 0) {
 420        if (!GC_is_marked(GC_REVEAL_POINTER(r->weak_ref))) {
 421          r->weak_ref = 0;
 422        } else {
 423          /* No need to copy, this garbage collector is a non-moving one. */
 424        }
 425      }
 426    }
 427  }
 428  
 429  GC_API void GC_CALL
 430  GC_set_toggleref_func(GC_toggleref_func fn)
 431  {
 432    LOCK();
 433    GC_toggleref_callback = fn;
 434    UNLOCK();
 435  }
 436  
 437  GC_API GC_toggleref_func GC_CALL
 438  GC_get_toggleref_func(void)
 439  {
 440    GC_toggleref_func fn;
 441  
 442    READER_LOCK();
 443    fn = GC_toggleref_callback;
 444    READER_UNLOCK();
 445    return fn;
 446  }
 447  
 448  static GC_bool
 449  ensure_toggleref_capacity(size_t capacity_inc)
 450  {
 451    GC_ASSERT(I_HOLD_LOCK());
 452    if (NULL == GC_toggleref_arr) {
 453      /* Set the initial capacity. */
 454      GC_toggleref_array_capacity = 32;
 455  
 456      GC_toggleref_arr = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
 457          GC_toggleref_array_capacity * sizeof(GCToggleRef), NORMAL);
 458      if (NULL == GC_toggleref_arr)
 459        return FALSE;
 460    }
 461    if (GC_toggleref_array_size + capacity_inc >= GC_toggleref_array_capacity) {
 462      GCToggleRef *new_array;
 463      while (GC_toggleref_array_capacity
 464             < GC_toggleref_array_size + capacity_inc) {
 465        GC_toggleref_array_capacity *= 2;
 466        if ((GC_toggleref_array_capacity
 467             & ((size_t)1 << (sizeof(size_t) * 8 - 1)))
 468            != 0) {
 469          /* An overflow. */
 470          return FALSE;
 471        }
 472      }
 473  
 474      new_array = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
 475          GC_toggleref_array_capacity * sizeof(GCToggleRef), NORMAL);
 476      if (UNLIKELY(NULL == new_array))
 477        return FALSE;
 478      if (LIKELY(GC_toggleref_array_size > 0))
 479        BCOPY(GC_toggleref_arr, new_array,
 480              GC_toggleref_array_size * sizeof(GCToggleRef));
 481      GC_INTERNAL_FREE(GC_toggleref_arr);
 482      GC_toggleref_arr = new_array;
 483    }
 484    return TRUE;
 485  }
 486  
 487  GC_API int GC_CALL
 488  GC_toggleref_add(void *obj, int is_strong_ref)
 489  {
 490    int res = GC_SUCCESS;
 491  
 492    GC_ASSERT(NONNULL_ARG_NOT_NULL(obj));
 493    LOCK();
 494    GC_ASSERT((ADDR(obj) & 1) == 0 && obj == GC_base(obj));
 495    if (GC_toggleref_callback != 0) {
 496      if (!ensure_toggleref_capacity(1)) {
 497        res = GC_NO_MEMORY;
 498      } else {
 499        GCToggleRef *r = &GC_toggleref_arr[GC_toggleref_array_size];
 500  
 501        if (is_strong_ref) {
 502          r->strong_ref = obj;
 503          GC_dirty(GC_toggleref_arr + GC_toggleref_array_size);
 504        } else {
 505          r->weak_ref = GC_HIDE_POINTER(obj);
 506          GC_ASSERT((r->weak_ref & 1) != 0);
 507        }
 508        GC_toggleref_array_size++;
 509      }
 510    }
 511    UNLOCK();
 512    return res;
 513  }
 514  #  endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
 515  
 516  /* Finalizer callback support. */
 517  
 518  STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
 519  
 520  GC_API void GC_CALL
 521  GC_set_await_finalize_proc(GC_await_finalize_proc fn)
 522  {
 523    LOCK();
 524    GC_object_finalized_proc = fn;
 525    UNLOCK();
 526  }
 527  
 528  GC_API GC_await_finalize_proc GC_CALL
 529  GC_get_await_finalize_proc(void)
 530  {
 531    GC_await_finalize_proc fn;
 532  
 533    READER_LOCK();
 534    fn = GC_object_finalized_proc;
 535    READER_UNLOCK();
 536    return fn;
 537  }
 538  
 539  #  ifndef GC_LONG_REFS_NOT_NEEDED
 540  GC_API int GC_CALL
 541  GC_register_long_link(void **link, const void *obj)
 542  {
 543    if ((ADDR(link) & (ALIGNMENT - 1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
 544      ABORT("Bad arg to GC_register_long_link");
 545    return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
 546                                               "long dl");
 547  }
 548  
 549  GC_API int GC_CALL
 550  GC_unregister_long_link(void **link)
 551  {
 552    struct disappearing_link *curr_dl;
 553  
 554    if ((ADDR(link) & (ALIGNMENT - 1)) != 0) {
 555      /* Nothing to do. */
 556      return 0;
 557    }
 558    LOCK();
 559    curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
 560    UNLOCK();
 561    if (NULL == curr_dl)
 562      return 0;
 563    FREE_DL_ENTRY(curr_dl);
 564    return 1;
 565  }
 566  #  endif /* !GC_LONG_REFS_NOT_NEEDED */
 567  
 568  #  ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
 569  STATIC int
 570  GC_move_disappearing_link_inner(struct dl_hashtbl_s *dl_hashtbl, void **link,
 571                                  void **new_link)
 572  {
 573    struct disappearing_link *curr_dl, *new_dl;
 574    struct disappearing_link *prev_dl = NULL;
 575    size_t curr_index, new_index;
 576    GC_hidden_pointer curr_hidden_link, new_hidden_link;
 577  
 578  #    ifdef GC_ASSERTIONS
 579    GC_noop1_ptr(*new_link);
 580  #    endif
 581    GC_ASSERT(I_HOLD_LOCK());
 582    if (UNLIKELY(NULL == dl_hashtbl->head))
 583      return GC_NOT_FOUND;
 584  
 585    /* Find current link. */
 586    curr_index = HASH2(link, dl_hashtbl->log_size);
 587    curr_hidden_link = GC_HIDE_POINTER(link);
 588    for (curr_dl = dl_hashtbl->head[curr_index]; curr_dl;
 589         curr_dl = dl_next(curr_dl)) {
 590      if (curr_dl->dl_hidden_link == curr_hidden_link)
 591        break;
 592      prev_dl = curr_dl;
 593    }
 594    if (UNLIKELY(NULL == curr_dl)) {
 595      return GC_NOT_FOUND;
 596    } else if (link == new_link) {
 597      /* Nothing to do. */
 598      return GC_SUCCESS;
 599    }
 600  
 601    /* `link` is found; now check `new_link` is not present. */
 602    new_index = HASH2(new_link, dl_hashtbl->log_size);
 603    new_hidden_link = GC_HIDE_POINTER(new_link);
 604    for (new_dl = dl_hashtbl->head[new_index]; new_dl;
 605         new_dl = dl_next(new_dl)) {
 606      if (new_dl->dl_hidden_link == new_hidden_link) {
 607        /* Target already registered; bail out. */
 608        return GC_DUPLICATE;
 609      }
 610    }
 611  
 612    /* Remove from old, add to new, update `link`. */
 613    if (NULL == prev_dl) {
 614      dl_hashtbl->head[curr_index] = dl_next(curr_dl);
 615    } else {
 616      dl_set_next(prev_dl, dl_next(curr_dl));
 617      GC_dirty(prev_dl);
 618    }
 619    curr_dl->dl_hidden_link = new_hidden_link;
 620    dl_set_next(curr_dl, dl_hashtbl->head[new_index]);
 621    dl_hashtbl->head[new_index] = curr_dl;
 622    GC_dirty(curr_dl);
 623    GC_dirty(dl_hashtbl->head); /*< entire object */
 624    return GC_SUCCESS;
 625  }
 626  
 627  GC_API int GC_CALL
 628  GC_move_disappearing_link(void **link, void **new_link)
 629  {
 630    int result;
 631  
 632    if ((ADDR(new_link) & (ALIGNMENT - 1)) != 0
 633        || !NONNULL_ARG_NOT_NULL(new_link))
 634      ABORT("Bad new_link arg to GC_move_disappearing_link");
 635    if ((ADDR(link) & (ALIGNMENT - 1)) != 0) {
 636      /* Nothing to do. */
 637      return GC_NOT_FOUND;
 638    }
 639    LOCK();
 640    result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
 641    UNLOCK();
 642    return result;
 643  }
 644  
 645  #    ifndef GC_LONG_REFS_NOT_NEEDED
 646  GC_API int GC_CALL
 647  GC_move_long_link(void **link, void **new_link)
 648  {
 649    int result;
 650  
 651    if ((ADDR(new_link) & (ALIGNMENT - 1)) != 0
 652        || !NONNULL_ARG_NOT_NULL(new_link))
 653      ABORT("Bad new_link arg to GC_move_long_link");
 654    if ((ADDR(link) & (ALIGNMENT - 1)) != 0) {
 655      /* Nothing to do. */
 656      return GC_NOT_FOUND;
 657    }
 658    LOCK();
 659    result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
 660    UNLOCK();
 661    return result;
 662  }
 663  #    endif
 664  #  endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
 665  
 666  /*
 667   * Various finalization marker procedures.  Note that mark stack overflow
 668   * is handled by the caller, and is not a disaster.
 669   */
 670  
 671  #  if defined(_MSC_VER) && defined(I386)
 672  GC_ATTR_NOINLINE
 673  /* Otherwise some optimizer bug is tickled in VC for x86 (v19, at least). */
 674  #  endif
 675  STATIC void
 676  GC_normal_finalize_mark_proc(ptr_t p)
 677  {
 678    GC_mark_stack_top = GC_push_obj(p, HDR(p), GC_mark_stack_top,
 679                                    GC_mark_stack + GC_mark_stack_size);
 680  }
 681  
 682  /*
 683   * This only pays very partial attention to the mark descriptor.
 684   * It does the right thing for normal and atomic objects, and treats
 685   * most others as normal.
 686   */
 687  STATIC void
 688  GC_ignore_self_finalize_mark_proc(ptr_t p)
 689  {
 690    const hdr *hhdr = HDR(p);
 691    word descr = hhdr->hb_descr;
 692    ptr_t current_p;
 693    ptr_t scan_limit;
 694    ptr_t target_limit = p + hhdr->hb_sz - 1;
 695  
 696    if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
 697      scan_limit = p + descr - sizeof(ptr_t);
 698    } else {
 699      scan_limit = target_limit + 1 - sizeof(ptr_t);
 700    }
 701    for (current_p = p; ADDR_GE(scan_limit, current_p); current_p += ALIGNMENT) {
 702      ptr_t q;
 703  
 704      LOAD_PTR_OR_CONTINUE(q, current_p);
 705      if (ADDR_LT(q, p) || ADDR_LT(target_limit, q)) {
 706        GC_PUSH_ONE_HEAP(q, current_p, GC_mark_stack_top);
 707      }
 708    }
 709  }
 710  
 711  STATIC void
 712  GC_null_finalize_mark_proc(ptr_t p)
 713  {
 714    UNUSED_ARG(p);
 715  }
 716  
 717  /*
 718   * `GC_unreachable_finalize_mark_proc` is an alias for normal marking,
 719   * but it is explicitly tested for, and triggers different behavior.
 720   * Objects registered in this way are not finalized if they are reachable
 721   * by other finalizable objects, even if those other objects specify
 722   * no ordering.
 723   */
 724  STATIC void
 725  GC_unreachable_finalize_mark_proc(ptr_t p)
 726  {
 727    /*
 728     * A dummy comparison to ensure the compiler not to optimize two
 729     * identical functions into a single one (thus, to ensure a unique
 730     * address of each).  Alternatively, `GC_noop1_ptr(p)` could be used.
 731     */
 732    if (UNLIKELY(NULL == p))
 733      return;
 734  
 735    GC_normal_finalize_mark_proc(p);
 736  }
 737  
 738  /* Avoid the work if unreachable finalizable objects are not used. */
 739  /* TODO: Turn `need_unreachable_finalization` into a counter. */
 740  static GC_bool need_unreachable_finalization = FALSE;
 741  
 742  /*
 743   * Register a finalization function.  See `gc.h` file for details.
 744   * The last parameter is a procedure that determines marking for
 745   * finalization ordering.  Any objects marked by that procedure will be
 746   * guaranteed to not have been finalized when this finalizer is invoked.
 747   */
 748  STATIC void
 749  GC_register_finalizer_inner(void *obj, GC_finalization_proc fn, void *cd,
 750                              GC_finalization_proc *ofn, void **ocd,
 751                              finalization_mark_proc mp)
 752  {
 753    struct finalizable_object *curr_fo;
 754    size_t index;
 755    struct finalizable_object *new_fo = 0;
 756    const hdr *hhdr = NULL; /*< initialized to prevent warning */
 757  
 758    GC_ASSERT(GC_is_initialized);
 759    if (UNLIKELY(GC_find_leak_inner)) {
 760      /* No-op.  `*ocd` and `*ofn` remain unchanged. */
 761      return;
 762    }
 763    LOCK();
 764    GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
 765    if (mp == GC_unreachable_finalize_mark_proc)
 766      need_unreachable_finalization = TRUE;
 767    if (UNLIKELY(NULL == GC_fnlz_roots.fo_head)
 768        || UNLIKELY(GC_fo_entries > ((size_t)1 << GC_log_fo_table_size))) {
 769      GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
 770                    &GC_log_fo_table_size, &GC_fo_entries);
 771      GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
 772                         1U << GC_log_fo_table_size);
 773    }
 774    for (;;) {
 775      struct finalizable_object *prev_fo = NULL;
 776      GC_oom_func oom_fn;
 777  
 778      index = HASH2(obj, GC_log_fo_table_size);
 779      curr_fo = GC_fnlz_roots.fo_head[index];
 780      while (curr_fo != NULL) {
 781        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
 782        if (curr_fo->fo_hidden_base == GC_HIDE_POINTER(obj)) {
 783          /*
 784           * Interruption by a signal in the middle of this should be safe.
 785           * The client may see only `*ocd` updated, but we will declare that
 786           * to be his problem.
 787           */
 788          if (ocd)
 789            *ocd = curr_fo->fo_client_data;
 790          if (ofn)
 791            *ofn = curr_fo->fo_fn;
 792          /* Delete the structure for `obj`. */
 793          if (prev_fo == 0) {
 794            GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
 795          } else {
 796            fo_set_next(prev_fo, fo_next(curr_fo));
 797            GC_dirty(prev_fo);
 798          }
 799          if (fn == 0) {
 800            GC_fo_entries--;
 801            /*
 802             * May not happen if we get a signal.  But a high estimate will
 803             * only make the table larger than necessary.
 804             */
 805  #  if !defined(THREADS) && !defined(DBG_HDRS_ALL)
 806            GC_free(curr_fo);
 807  #  endif
 808          } else {
 809            curr_fo->fo_fn = fn;
 810            curr_fo->fo_client_data = (ptr_t)cd;
 811            curr_fo->fo_mark_proc = mp;
 812            GC_dirty(curr_fo);
 813            /*
 814             * Reinsert it.  We deleted it first to maintain consistency in
 815             * the event of a signal.
 816             */
 817            if (prev_fo == 0) {
 818              GC_fnlz_roots.fo_head[index] = curr_fo;
 819            } else {
 820              fo_set_next(prev_fo, curr_fo);
 821              GC_dirty(prev_fo);
 822            }
 823          }
 824          if (NULL == prev_fo)
 825            GC_dirty(GC_fnlz_roots.fo_head + index);
 826          UNLOCK();
 827  #  ifndef DBG_HDRS_ALL
 828          /* Free unused `new_fo` returned by `GC_oom_fn()`. */
 829          GC_free(new_fo);
 830  #  endif
 831          return;
 832        }
 833        prev_fo = curr_fo;
 834        curr_fo = fo_next(curr_fo);
 835      }
 836      if (UNLIKELY(new_fo != 0)) {
 837        /* `new_fo` is returned by `GC_oom_fn()`. */
 838        GC_ASSERT(fn != 0);
 839  #  ifdef LINT2
 840        if (NULL == hhdr)
 841          ABORT("Bad hhdr in GC_register_finalizer_inner");
 842  #  endif
 843        break;
 844      }
 845      if (fn == 0) {
 846        if (ocd)
 847          *ocd = 0;
 848        if (ofn)
 849          *ofn = 0;
 850        UNLOCK();
 851        return;
 852      }
 853      GET_HDR(obj, hhdr);
 854      if (UNLIKELY(NULL == hhdr)) {
 855        /* We will not collect it, hence finalizer would not be run. */
 856        if (ocd)
 857          *ocd = 0;
 858        if (ofn)
 859          *ofn = 0;
 860        UNLOCK();
 861        return;
 862      }
 863      new_fo = (struct finalizable_object *)GC_INTERNAL_MALLOC(
 864          sizeof(struct finalizable_object), NORMAL);
 865      if (LIKELY(new_fo != 0))
 866        break;
 867      oom_fn = GC_oom_fn;
 868      UNLOCK();
 869      new_fo = (struct finalizable_object *)(*oom_fn)(
 870          sizeof(struct finalizable_object));
 871      if (0 == new_fo) {
 872        /* No enough memory.  `*ocd` and `*ofn` remain unchanged. */
 873        return;
 874      }
 875      /* It is not likely we will make it here, but... */
 876      LOCK();
 877      /*
 878       * Recalculate index since the table may grow and check again that
 879       * our finalizer is not in the table.
 880       */
 881    }
 882    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
 883    if (ocd)
 884      *ocd = 0;
 885    if (ofn)
 886      *ofn = 0;
 887    new_fo->fo_hidden_base = GC_HIDE_POINTER(obj);
 888    new_fo->fo_fn = fn;
 889    new_fo->fo_client_data = (ptr_t)cd;
 890    new_fo->fo_object_sz = hhdr->hb_sz;
 891    new_fo->fo_mark_proc = mp;
 892    fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
 893    GC_dirty(new_fo);
 894    GC_fo_entries++;
 895    GC_fnlz_roots.fo_head[index] = new_fo;
 896    GC_dirty(GC_fnlz_roots.fo_head + index);
 897    UNLOCK();
 898  }
 899  
 900  GC_API void GC_CALL
 901  GC_register_finalizer(void *obj, GC_finalization_proc fn, void *cd,
 902                        GC_finalization_proc *ofn, void **ocd)
 903  {
 904    GC_register_finalizer_inner(obj, fn, cd, ofn, ocd,
 905                                GC_normal_finalize_mark_proc);
 906  }
 907  
 908  GC_API void GC_CALL
 909  GC_register_finalizer_ignore_self(void *obj, GC_finalization_proc fn, void *cd,
 910                                    GC_finalization_proc *ofn, void **ocd)
 911  {
 912    GC_register_finalizer_inner(obj, fn, cd, ofn, ocd,
 913                                GC_ignore_self_finalize_mark_proc);
 914  }
 915  
 916  GC_API void GC_CALL
 917  GC_register_finalizer_no_order(void *obj, GC_finalization_proc fn, void *cd,
 918                                 GC_finalization_proc *ofn, void **ocd)
 919  {
 920    GC_register_finalizer_inner(obj, fn, cd, ofn, ocd,
 921                                GC_null_finalize_mark_proc);
 922  }
 923  
 924  GC_API void GC_CALL
 925  GC_register_finalizer_unreachable(void *obj, GC_finalization_proc fn, void *cd,
 926                                    GC_finalization_proc *ofn, void **ocd)
 927  {
 928    GC_ASSERT(GC_java_finalization);
 929    GC_register_finalizer_inner(obj, fn, cd, ofn, ocd,
 930                                GC_unreachable_finalize_mark_proc);
 931  }
 932  
 933  #  ifndef NO_DEBUGGING
 934  STATIC void
 935  GC_dump_finalization_links(const struct dl_hashtbl_s *dl_hashtbl)
 936  {
 937    size_t dl_size = (size_t)1 << dl_hashtbl->log_size;
 938    size_t i;
 939  
 940    if (NULL == dl_hashtbl->head) {
 941      /* The table is empty. */
 942      return;
 943    }
 944  
 945    for (i = 0; i < dl_size; i++) {
 946      struct disappearing_link *curr_dl;
 947  
 948      for (curr_dl = dl_hashtbl->head[i]; curr_dl != 0;
 949           curr_dl = dl_next(curr_dl)) {
 950        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj);
 951        ptr_t real_link = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_link);
 952  
 953        GC_printf("Object: %p, link value: %p, link addr: %p\n",
 954                  (void *)real_ptr, *(void **)real_link, (void *)real_link);
 955      }
 956    }
 957  }
 958  
 959  GC_API void GC_CALL
 960  GC_dump_finalization(void)
 961  {
 962    struct finalizable_object *curr_fo;
 963    size_t i;
 964    size_t fo_size
 965        = GC_fnlz_roots.fo_head == NULL ? 0 : (size_t)1 << GC_log_fo_table_size;
 966  
 967    GC_printf("\n***Disappearing (short) links:\n");
 968    GC_dump_finalization_links(&GC_dl_hashtbl);
 969  #    ifndef GC_LONG_REFS_NOT_NEEDED
 970    GC_printf("\n***Disappearing long links:\n");
 971    GC_dump_finalization_links(&GC_ll_hashtbl);
 972  #    endif
 973    GC_printf("\n***Finalizers:\n");
 974    for (i = 0; i < fo_size; i++) {
 975      for (curr_fo = GC_fnlz_roots.fo_head[i]; curr_fo != NULL;
 976           curr_fo = fo_next(curr_fo)) {
 977        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
 978  
 979        GC_printf("Finalizable object: %p\n", (void *)real_ptr);
 980      }
 981    }
 982  }
 983  #  endif /* !NO_DEBUGGING */
 984  
 985  #  ifndef SMALL_CONFIG
 986  STATIC size_t GC_old_dl_entries = 0; /*< for stats printing */
 987  #    ifndef GC_LONG_REFS_NOT_NEEDED
 988  STATIC size_t GC_old_ll_entries = 0;
 989  #    endif
 990  #  endif /* !SMALL_CONFIG */
 991  
 992  #  ifndef THREADS
 993  /*
 994   * Checks and updates the level of finalizers recursion.
 995   * Returns `NULL` if `GC_invoke_finalizers()` should not be called by
 996   * the collector (to minimize the risk of a deep finalizers recursion),
 997   * otherwise returns a pointer to `GC_finalizer_nested`.
 998   */
 999  STATIC unsigned char *
1000  GC_check_finalizer_nested(void)
1001  {
1002    unsigned nesting_level = GC_finalizer_nested;
1003    if (nesting_level) {
1004      /*
1005       * We are inside another `GC_invoke_finalizers()`.  Skip some
1006       * implicitly-called `GC_invoke_finalizers()` depending on the
1007       * nesting (recursion) level.
1008       */
1009      if ((unsigned)(++GC_finalizer_skipped) < (1U << nesting_level))
1010        return NULL;
1011      GC_finalizer_skipped = 0;
1012    }
1013    GC_finalizer_nested = (unsigned char)(nesting_level + 1);
1014    return &GC_finalizer_nested;
1015  }
1016  #  endif /* !THREADS */
1017  
1018  GC_INLINE void
1019  GC_make_disappearing_links_disappear(struct dl_hashtbl_s *dl_hashtbl,
1020                                       GC_bool is_remove_dangling)
1021  {
1022    size_t i;
1023    size_t dl_size = (size_t)1 << dl_hashtbl->log_size;
1024    GC_bool needs_barrier = FALSE;
1025  
1026    GC_ASSERT(I_HOLD_LOCK());
1027    if (NULL == dl_hashtbl->head) {
1028      /* The table is empty. */
1029      return;
1030    }
1031  
1032    for (i = 0; i < dl_size; i++) {
1033      struct disappearing_link *curr_dl, *next_dl;
1034      struct disappearing_link *prev_dl = NULL;
1035  
1036      for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL; curr_dl = next_dl) {
1037        next_dl = dl_next(curr_dl);
1038  #  if defined(GC_ASSERTIONS) && !defined(THREAD_SANITIZER)
1039        /* Check accessibility of the location pointed by the link. */
1040        GC_noop1_ptr(*(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link));
1041  #  endif
1042        if (is_remove_dangling) {
1043          ptr_t real_link
1044              = (ptr_t)GC_base(GC_REVEAL_POINTER(curr_dl->dl_hidden_link));
1045  
1046          if (NULL == real_link || LIKELY(GC_is_marked(real_link))) {
1047            prev_dl = curr_dl;
1048            continue;
1049          }
1050        } else {
1051          if (LIKELY(GC_is_marked(
1052                  (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj)))) {
1053            prev_dl = curr_dl;
1054            continue;
1055          }
1056          *(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link) = NULL;
1057        }
1058  
1059        /* Delete `curr_dl` entry from `dl_hashtbl`. */
1060        if (NULL == prev_dl) {
1061          dl_hashtbl->head[i] = next_dl;
1062          needs_barrier = TRUE;
1063        } else {
1064          dl_set_next(prev_dl, next_dl);
1065          GC_dirty(prev_dl);
1066        }
1067        GC_clear_mark_bit(curr_dl);
1068        dl_hashtbl->entries--;
1069      }
1070    }
1071    if (needs_barrier)
1072      GC_dirty(dl_hashtbl->head); /*< entire object */
1073  }
1074  
1075  GC_INNER void
1076  GC_finalize(void)
1077  {
1078    struct finalizable_object *curr_fo, *prev_fo, *next_fo;
1079    ptr_t real_ptr;
1080    size_t i;
1081    size_t fo_size
1082        = GC_fnlz_roots.fo_head == NULL ? 0 : (size_t)1 << GC_log_fo_table_size;
1083    GC_bool needs_barrier = FALSE;
1084  
1085    GC_ASSERT(I_HOLD_LOCK());
1086  #  ifndef SMALL_CONFIG
1087    /* Save current `GC_dl_entries` value for stats printing. */
1088    GC_old_dl_entries = GC_dl_hashtbl.entries;
1089  #    ifndef GC_LONG_REFS_NOT_NEEDED
1090    /* Save current `GC_ll_entries` value for stats printing. */
1091    GC_old_ll_entries = GC_ll_hashtbl.entries;
1092  #    endif
1093  #  endif
1094  
1095  #  ifndef GC_TOGGLE_REFS_NOT_NEEDED
1096    GC_mark_togglerefs();
1097  #  endif
1098    GC_make_disappearing_links_disappear(&GC_dl_hashtbl, FALSE);
1099  
1100    /*
1101     * Mark all objects reachable via chains of 1 or more pointers from
1102     * finalizable objects.
1103     */
1104    GC_ASSERT(!GC_collection_in_progress());
1105    for (i = 0; i < fo_size; i++) {
1106      for (curr_fo = GC_fnlz_roots.fo_head[i]; curr_fo != NULL;
1107           curr_fo = fo_next(curr_fo)) {
1108        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
1109        real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1110        if (!GC_is_marked(real_ptr)) {
1111          GC_MARKED_FOR_FINALIZATION(real_ptr);
1112          GC_mark_fo(real_ptr, curr_fo->fo_mark_proc);
1113          if (GC_is_marked(real_ptr)) {
1114            WARN("Finalization cycle involving %p\n", real_ptr);
1115          }
1116        }
1117      }
1118    }
1119    /* Enqueue for finalization all objects that are still unreachable. */
1120    GC_bytes_finalized = 0;
1121    for (i = 0; i < fo_size; i++) {
1122      curr_fo = GC_fnlz_roots.fo_head[i];
1123      prev_fo = NULL;
1124      while (curr_fo != NULL) {
1125        real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1126        if (!GC_is_marked(real_ptr)) {
1127          if (!GC_java_finalization) {
1128            GC_set_mark_bit(real_ptr);
1129          }
1130          /* Delete from hash table. */
1131          next_fo = fo_next(curr_fo);
1132          if (NULL == prev_fo) {
1133            GC_fnlz_roots.fo_head[i] = next_fo;
1134            if (GC_object_finalized_proc) {
1135              GC_dirty(GC_fnlz_roots.fo_head + i);
1136            } else {
1137              needs_barrier = TRUE;
1138            }
1139          } else {
1140            fo_set_next(prev_fo, next_fo);
1141            GC_dirty(prev_fo);
1142          }
1143          GC_fo_entries--;
1144          if (GC_object_finalized_proc)
1145            GC_object_finalized_proc(real_ptr);
1146  
1147          /* Add to list of objects awaiting finalization. */
1148          fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1149          GC_dirty(curr_fo);
1150          SET_FINALIZE_NOW(curr_fo);
1151          /* Unhide object pointer so any future collections will see it. */
1152          curr_fo->fo_hidden_base
1153              = (GC_hidden_pointer)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1154  
1155          GC_bytes_finalized
1156              += (word)curr_fo->fo_object_sz + sizeof(struct finalizable_object);
1157          GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
1158          curr_fo = next_fo;
1159        } else {
1160          prev_fo = curr_fo;
1161          curr_fo = fo_next(curr_fo);
1162        }
1163      }
1164    }
1165  
1166    if (GC_java_finalization) {
1167      /*
1168       * Make sure we mark everything reachable from objects finalized
1169       * using the no-order `fo_mark_proc`.
1170       */
1171      for (curr_fo = GC_fnlz_roots.finalize_now; curr_fo != NULL;
1172           curr_fo = fo_next(curr_fo)) {
1173        real_ptr = (ptr_t)curr_fo->fo_hidden_base; /*< revealed */
1174        if (!GC_is_marked(real_ptr)) {
1175          if (curr_fo->fo_mark_proc == GC_null_finalize_mark_proc) {
1176            GC_mark_fo(real_ptr, GC_normal_finalize_mark_proc);
1177          }
1178          if (curr_fo->fo_mark_proc != GC_unreachable_finalize_mark_proc) {
1179            GC_set_mark_bit(real_ptr);
1180          }
1181        }
1182      }
1183  
1184      /*
1185       * Now revive finalize-when-unreachable objects reachable from other
1186       * finalizable objects.
1187       */
1188      if (need_unreachable_finalization) {
1189        curr_fo = GC_fnlz_roots.finalize_now;
1190  #  if defined(GC_ASSERTIONS) || defined(LINT2)
1191        if (curr_fo != NULL && NULL == GC_fnlz_roots.fo_head)
1192          ABORT("GC_fnlz_roots.fo_head is null");
1193  #  endif
1194        for (prev_fo = NULL; curr_fo != NULL;
1195             prev_fo = curr_fo, curr_fo = next_fo) {
1196          next_fo = fo_next(curr_fo);
1197          if (curr_fo->fo_mark_proc != GC_unreachable_finalize_mark_proc)
1198            continue;
1199  
1200          real_ptr = (ptr_t)curr_fo->fo_hidden_base; /*< revealed */
1201          if (!GC_is_marked(real_ptr)) {
1202            GC_set_mark_bit(real_ptr);
1203            continue;
1204          }
1205          if (NULL == prev_fo) {
1206            SET_FINALIZE_NOW(next_fo);
1207          } else {
1208            fo_set_next(prev_fo, next_fo);
1209            GC_dirty(prev_fo);
1210          }
1211          curr_fo->fo_hidden_base = GC_HIDE_POINTER(real_ptr);
1212          GC_bytes_finalized
1213              -= (word)curr_fo->fo_object_sz + sizeof(struct finalizable_object);
1214  
1215          i = HASH2(real_ptr, GC_log_fo_table_size);
1216          fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
1217          GC_dirty(curr_fo);
1218          GC_fo_entries++;
1219          GC_fnlz_roots.fo_head[i] = curr_fo;
1220          curr_fo = prev_fo;
1221          needs_barrier = TRUE;
1222        }
1223      }
1224    }
1225    if (needs_barrier)
1226      GC_dirty(GC_fnlz_roots.fo_head); /*< entire object */
1227  
1228    /* Remove dangling disappearing links. */
1229    GC_make_disappearing_links_disappear(&GC_dl_hashtbl, TRUE);
1230  
1231  #  ifndef GC_TOGGLE_REFS_NOT_NEEDED
1232    GC_clear_togglerefs();
1233  #  endif
1234  #  ifndef GC_LONG_REFS_NOT_NEEDED
1235    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, FALSE);
1236    GC_make_disappearing_links_disappear(&GC_ll_hashtbl, TRUE);
1237  #  endif
1238  
1239    if (GC_fail_count) {
1240      /*
1241       * Do not prevent running finalizers if there has been an allocation
1242       * failure recently.
1243       */
1244  #  ifdef THREADS
1245      GC_reset_finalizer_nested();
1246  #  else
1247      GC_finalizer_nested = 0;
1248  #  endif
1249    }
1250  }
1251  
1252  /*
1253   * Count of finalizers to run, at most, during a single invocation
1254   * of `GC_invoke_finalizers()`; zero means no limit.  Accessed with the
1255   * allocator lock held.
1256   */
1257  STATIC unsigned GC_interrupt_finalizers = 0;
1258  
1259  #  ifndef JAVA_FINALIZATION_NOT_NEEDED
1260  
1261  /*
1262   * Enqueue all remaining finalizers to be run.  A collection in progress,
1263   * if any, is completed when the first finalizer is enqueued.
1264   */
1265  STATIC void
1266  GC_enqueue_all_finalizers(void)
1267  {
1268    size_t i;
1269    size_t fo_size
1270        = GC_fnlz_roots.fo_head == NULL ? 0 : (size_t)1 << GC_log_fo_table_size;
1271  
1272    GC_ASSERT(I_HOLD_LOCK());
1273    GC_bytes_finalized = 0;
1274    for (i = 0; i < fo_size; i++) {
1275      struct finalizable_object *curr_fo = GC_fnlz_roots.fo_head[i];
1276  
1277      GC_fnlz_roots.fo_head[i] = NULL;
1278      while (curr_fo != NULL) {
1279        struct finalizable_object *next_fo;
1280        ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1281  
1282        GC_mark_fo(real_ptr, GC_normal_finalize_mark_proc);
1283        GC_set_mark_bit(real_ptr);
1284        GC_complete_ongoing_collection();
1285        next_fo = fo_next(curr_fo);
1286  
1287        /* Add to list of objects awaiting finalization. */
1288        fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1289        GC_dirty(curr_fo);
1290        SET_FINALIZE_NOW(curr_fo);
1291  
1292        /* Unhide object pointer so any future collections will see it. */
1293        curr_fo->fo_hidden_base
1294            = (GC_hidden_pointer)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1295        GC_bytes_finalized
1296            += curr_fo->fo_object_sz + sizeof(struct finalizable_object);
1297        curr_fo = next_fo;
1298      }
1299    }
1300    /* All entries are deleted from the hash table. */
1301    GC_fo_entries = 0;
1302  }
1303  
1304  GC_API void GC_CALL
1305  GC_finalize_all(void)
1306  {
1307    LOCK();
1308    while (GC_fo_entries > 0) {
1309      GC_enqueue_all_finalizers();
1310      /* Reset. */
1311      GC_interrupt_finalizers = 0;
1312      UNLOCK();
1313      GC_invoke_finalizers();
1314      /*
1315       * Running the finalizers in this thread is arguably not a good idea
1316       * when we should be notifying another thread to run them.
1317       * But otherwise we do not have a great way to wait for them to run.
1318       */
1319      LOCK();
1320    }
1321    UNLOCK();
1322  }
1323  
1324  #  endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1325  
1326  GC_API void GC_CALL
1327  GC_set_interrupt_finalizers(unsigned value)
1328  {
1329    LOCK();
1330    GC_interrupt_finalizers = value;
1331    UNLOCK();
1332  }
1333  
1334  GC_API unsigned GC_CALL
1335  GC_get_interrupt_finalizers(void)
1336  {
1337    unsigned value;
1338  
1339    READER_LOCK();
1340    value = GC_interrupt_finalizers;
1341    READER_UNLOCK();
1342    return value;
1343  }
1344  
1345  GC_API int GC_CALL
1346  GC_should_invoke_finalizers(void)
1347  {
1348  #  ifdef AO_HAVE_load
1349    return GC_cptr_load((volatile ptr_t *)&GC_fnlz_roots.finalize_now) != NULL;
1350  #  else
1351    return GC_fnlz_roots.finalize_now != NULL;
1352  #  endif /* !THREADS */
1353  }
1354  
1355  GC_API int GC_CALL
1356  GC_invoke_finalizers(void)
1357  {
1358    int count = 0;
1359    word bytes_freed_before = 0; /*< initialized to prevent warning */
1360  
1361    GC_ASSERT(I_DONT_HOLD_LOCK());
1362    while (GC_should_invoke_finalizers()) {
1363      struct finalizable_object *curr_fo;
1364      ptr_t real_ptr;
1365  
1366      LOCK();
1367      if (count == 0) {
1368        /* Note: we hold the allocator lock here. */
1369        bytes_freed_before = GC_bytes_freed;
1370      } else if (UNLIKELY(GC_interrupt_finalizers != 0)
1371                 && (unsigned)count >= GC_interrupt_finalizers) {
1372        UNLOCK();
1373        break;
1374      }
1375      curr_fo = GC_fnlz_roots.finalize_now;
1376  #  ifdef THREADS
1377      if (UNLIKELY(NULL == curr_fo)) {
1378        UNLOCK();
1379        break;
1380      }
1381  #  endif
1382      SET_FINALIZE_NOW(fo_next(curr_fo));
1383      UNLOCK();
1384      fo_set_next(curr_fo, 0);
1385      real_ptr = (ptr_t)curr_fo->fo_hidden_base; /*< revealed */
1386      curr_fo->fo_fn(real_ptr, curr_fo->fo_client_data);
1387      curr_fo->fo_client_data = NULL;
1388      ++count;
1389      /*
1390       * Explicit freeing of `curr_fo` is probably a bad idea.
1391       * It throws off accounting if nearly all objects are finalizable.
1392       * Otherwise it should not matter.
1393       */
1394    }
1395    /* `bytes_freed_before` is initialized whenever `count` is nonzero. */
1396    if (count != 0
1397  #  if defined(THREADS) && !defined(THREAD_SANITIZER)
1398        /*
1399         * A quick check whether some memory was freed.  The race with
1400         * `GC_free()` is safe to be ignored because we only need to know
1401         * if the current thread has deallocated something.
1402         */
1403        && bytes_freed_before != GC_bytes_freed
1404  #  endif
1405    ) {
1406      LOCK();
1407      GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
1408      UNLOCK();
1409    }
1410    return count;
1411  }
1412  
1413  static word last_finalizer_notification = 0;
1414  
1415  GC_INNER void
1416  GC_notify_or_invoke_finalizers(void)
1417  {
1418    GC_finalizer_notifier_proc notifier_fn = 0;
1419  #  if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1420    static word last_back_trace_gc_no = 1; /*< skip first one */
1421  #  endif
1422  
1423  #  if defined(THREADS) && !defined(KEEP_BACK_PTRS) && !defined(MAKE_BACK_GRAPH)
1424    /* Quick check (while unlocked) for an empty finalization queue. */
1425    if (!GC_should_invoke_finalizers())
1426      return;
1427  #  endif
1428    LOCK();
1429  
1430    /*
1431     * This is a convenient place to generate backtraces if appropriate,
1432     * since that code is not callable with the allocator lock.
1433     */
1434  #  if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1435    if (GC_gc_no != last_back_trace_gc_no) {
1436  #    ifdef KEEP_BACK_PTRS
1437      static GC_bool bt_in_progress = FALSE;
1438  
1439      if (!bt_in_progress) {
1440        long i;
1441  
1442        /* Prevent a recursion or parallel usage. */
1443        bt_in_progress = TRUE;
1444        for (i = 0; i < GC_backtraces; ++i) {
1445          /*
1446           * FIXME: This tolerates concurrent heap mutation, which may
1447           * cause occasional mysterious results.  We need to release
1448           * the allocator lock, since `GC_print_callers()` acquires it.
1449           * It probably should not.
1450           */
1451          void *current = GC_generate_random_valid_address();
1452  
1453          UNLOCK();
1454          GC_printf("\n***Chosen address %p in object\n", current);
1455          GC_print_backtrace(current);
1456          LOCK();
1457        }
1458        bt_in_progress = FALSE;
1459      }
1460  #    endif
1461      last_back_trace_gc_no = GC_gc_no;
1462  #    ifdef MAKE_BACK_GRAPH
1463      if (GC_print_back_height) {
1464        GC_print_back_graph_stats();
1465      }
1466  #    endif
1467    }
1468  #  endif
1469    if (NULL == GC_fnlz_roots.finalize_now) {
1470      UNLOCK();
1471      return;
1472    }
1473  
1474    if (!GC_finalize_on_demand) {
1475      unsigned char *pnested;
1476  
1477  #  ifdef THREADS
1478      if (UNLIKELY(GC_in_thread_creation)) {
1479        UNLOCK();
1480        return;
1481      }
1482  #  endif
1483      pnested = GC_check_finalizer_nested();
1484      UNLOCK();
1485      /* Skip `GC_invoke_finalizers()` if nested. */
1486      if (pnested != NULL) {
1487        (void)GC_invoke_finalizers();
1488        /* Reset since no more finalizers or interrupted. */
1489        *pnested = 0;
1490  #  ifndef THREADS
1491        GC_ASSERT(NULL == GC_fnlz_roots.finalize_now
1492                  || GC_interrupt_finalizers > 0);
1493  #  else
1494        /*
1495         * Note: in the multi-threaded case GC can run concurrently and
1496         * add more finalizers to run.
1497         */
1498  #  endif
1499      }
1500      return;
1501    }
1502  
1503    /* These variables require synchronization to avoid data race. */
1504    if (last_finalizer_notification != GC_gc_no) {
1505      notifier_fn = GC_finalizer_notifier;
1506      last_finalizer_notification = GC_gc_no;
1507    }
1508    UNLOCK();
1509    if (notifier_fn != 0) {
1510      /* Invoke the notifier. */
1511      (*notifier_fn)();
1512    }
1513  }
1514  
1515  #  ifndef SMALL_CONFIG
1516  #    ifndef GC_LONG_REFS_NOT_NEEDED
1517  #      define IF_LONG_REFS_PRESENT_ELSE(x, y) (x)
1518  #    else
1519  #      define IF_LONG_REFS_PRESENT_ELSE(x, y) (y)
1520  #    endif
1521  
1522  GC_INNER void
1523  GC_print_finalization_stats(void)
1524  {
1525    const struct finalizable_object *fo;
1526    unsigned long ready = 0;
1527  
1528    GC_log_printf(
1529        "%lu finalization entries;"
1530        " %lu/%lu short/long disappearing links alive\n",
1531        (unsigned long)GC_fo_entries, (unsigned long)GC_dl_hashtbl.entries,
1532        (unsigned long)IF_LONG_REFS_PRESENT_ELSE(GC_ll_hashtbl.entries, 0));
1533  
1534    for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
1535      ++ready;
1536    GC_log_printf("%lu finalization-ready objects;"
1537                  " %ld/%ld short/long links cleared\n",
1538                  ready, (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
1539                  (long)IF_LONG_REFS_PRESENT_ELSE(
1540                      GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1541  }
1542  #  endif /* !SMALL_CONFIG */
1543  
1544  #endif /* !GC_NO_FINALIZATION */
1545