new_hblk.c raw

   1  /*
   2   * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
   3   * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
   4   * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
   5   *
   6   * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
   7   * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
   8   *
   9   * Permission is hereby granted to use or copy this program
  10   * for any purpose, provided the above notices are retained on all copies.
  11   * Permission to modify the code and to distribute modified code is granted,
  12   * provided the above notices are retained, and a notice that the code was
  13   * modified is included with the above copyright notice.
  14   */
  15  
  16  #include "private/gc_priv.h"
  17  
  18  #ifndef SMALL_CONFIG
  19  /*
  20   * Build a free list for two-pointer cleared objects inside the given block.
  21   * Set the last link to be `ofl`.  Return a pointer to the first free-list
  22   * entry.
  23   */
  24  STATIC ptr_t
  25  GC_build_fl_clear2(struct hblk *h, ptr_t ofl)
  26  {
  27    ptr_t *p = (ptr_t *)h->hb_body;
  28    ptr_t plim = (ptr_t)(h + 1);
  29  
  30    p[0] = ofl;
  31    p[1] = NULL;
  32    p[2] = (ptr_t)p;
  33    p[3] = NULL;
  34    for (p += 4; ADDR_LT((ptr_t)p, plim); p += 4) {
  35      p[0] = (ptr_t)(p - 2);
  36      p[1] = NULL;
  37      p[2] = (ptr_t)p;
  38      p[3] = NULL;
  39    }
  40    return (ptr_t)(p - 2);
  41  }
  42  
  43  /* The same as above but uncleared objects. */
  44  STATIC ptr_t
  45  GC_build_fl2(struct hblk *h, ptr_t ofl)
  46  {
  47    ptr_t *p = (ptr_t *)h->hb_body;
  48    ptr_t plim = (ptr_t)(h + 1);
  49  
  50    p[0] = ofl;
  51    p[2] = (ptr_t)p;
  52    for (p += 4; ADDR_LT((ptr_t)p, plim); p += 4) {
  53      p[0] = (ptr_t)(p - 2);
  54      p[2] = (ptr_t)p;
  55    }
  56    return (ptr_t)(p - 2);
  57  }
  58  
  59  /* The same as above but for four-pointer cleared objects. */
  60  STATIC ptr_t
  61  GC_build_fl_clear4(struct hblk *h, ptr_t ofl)
  62  {
  63    ptr_t *p = (ptr_t *)h->hb_body;
  64    ptr_t plim = (ptr_t)(h + 1);
  65  
  66    p[0] = ofl;
  67    p[1] = NULL;
  68    p[2] = NULL;
  69    p[3] = NULL;
  70    for (p += 4; ADDR_LT((ptr_t)p, plim); p += 4) {
  71      GC_PREFETCH_FOR_WRITE((ptr_t)(p + 64));
  72      p[0] = (ptr_t)(p - 4);
  73      p[1] = NULL;
  74      CLEAR_DOUBLE(p + 2);
  75    }
  76    return (ptr_t)(p - 4);
  77  }
  78  
  79  /* The same as `GC_build_fl_clear4()` but uncleared objects. */
  80  STATIC ptr_t
  81  GC_build_fl4(struct hblk *h, ptr_t ofl)
  82  {
  83    ptr_t *p = (ptr_t *)h->hb_body;
  84    ptr_t plim = (ptr_t)(h + 1);
  85  
  86    p[0] = ofl;
  87    p[4] = (ptr_t)p;
  88    /* Unroll the loop by 2. */
  89    for (p += 8; ADDR_LT((ptr_t)p, plim); p += 8) {
  90      GC_PREFETCH_FOR_WRITE((ptr_t)(p + 64));
  91      p[0] = (ptr_t)(p - 4);
  92      p[4] = (ptr_t)p;
  93    }
  94    return (ptr_t)(p - 4);
  95  }
  96  #endif /* !SMALL_CONFIG */
  97  
  98  GC_INNER ptr_t
  99  GC_build_fl(struct hblk *h, ptr_t list, size_t lg, GC_bool clear)
 100  {
 101    ptr_t *p, *prev;
 102    ptr_t plim; /*< points to last object in new `hblk` entity */
 103    size_t lpw = GRANULES_TO_PTRS(lg);
 104  
 105    /*
 106     * Do a few prefetches here, just because it is cheap.
 107     * If we were more serious about it, these should go inside the loops.
 108     * But write prefetches usually do not seem to matter much.
 109     */
 110    GC_PREFETCH_FOR_WRITE((ptr_t)h);
 111    GC_PREFETCH_FOR_WRITE((ptr_t)h + 128);
 112    GC_PREFETCH_FOR_WRITE((ptr_t)h + 256);
 113    GC_PREFETCH_FOR_WRITE((ptr_t)h + 378);
 114  #ifndef SMALL_CONFIG
 115    /*
 116     * Handle small objects sizes more efficiently.  For larger objects
 117     * the difference is less significant.
 118     */
 119    switch (lpw) {
 120    case 2:
 121      if (clear) {
 122        return GC_build_fl_clear2(h, list);
 123      } else {
 124        return GC_build_fl2(h, list);
 125      }
 126    case 4:
 127      if (clear) {
 128        return GC_build_fl_clear4(h, list);
 129      } else {
 130        return GC_build_fl4(h, list);
 131      }
 132    default:
 133      break;
 134    }
 135  #endif /* !SMALL_CONFIG */
 136  
 137    /* Clear the page if necessary. */
 138    if (clear)
 139      BZERO(h, HBLKSIZE);
 140  
 141    /* Add objects to free list. */
 142    prev = (ptr_t *)h->hb_body; /*< one object behind `p` */
 143  
 144    /* The last place for the last object to start. */
 145    plim = (ptr_t)h + HBLKSIZE - lpw * sizeof(ptr_t);
 146  
 147    /* Make a list of all objects in `*h` with head as last object. */
 148    for (p = prev + lpw; ADDR_GE(plim, (ptr_t)p); p += lpw) {
 149      /* The current object's link points to last object. */
 150      obj_link(p) = (ptr_t)prev;
 151      prev = p;
 152    }
 153    p -= lpw;
 154    /* `p` now points to the last object. */
 155  
 156    /*
 157     * Put `p` (which is now head of list of objects in `*h`) as first pointer
 158     * in the appropriate free list for this size.
 159     */
 160    *(ptr_t *)h = list;
 161    return (ptr_t)p;
 162  }
 163  
 164  GC_INNER void
 165  GC_new_hblk(size_t lg, int kind)
 166  {
 167    struct hblk *h; /*< the new heap block */
 168    size_t lb_adjusted = GRANULES_TO_BYTES(lg);
 169  
 170    GC_STATIC_ASSERT(sizeof(struct hblk) == HBLKSIZE);
 171    GC_ASSERT(I_HOLD_LOCK());
 172    /* Allocate a new heap block. */
 173    h = GC_allochblk(lb_adjusted, kind, 0 /* `flags` */, 0 /* `align_m1` */);
 174    if (UNLIKELY(NULL == h))
 175      return; /*< out of memory */
 176  
 177    /* Mark all objects if appropriate. */
 178    if (IS_UNCOLLECTABLE(kind))
 179      GC_set_hdr_marks(HDR(h));
 180  
 181    /* Build the free list. */
 182    GC_obj_kinds[kind].ok_freelist[lg]
 183        = GC_build_fl(h, (ptr_t)GC_obj_kinds[kind].ok_freelist[lg], lg,
 184                      GC_debugging_started || GC_obj_kinds[kind].ok_init);
 185  }
 186  
 187  /*
 188   * Routines for maintaining maps describing heap block layouts for various
 189   * object sizes.  Allows fast pointer validity checks and fast location of
 190   * object start locations on machines (such as SPARC) with slow division.
 191   */
 192  
 193  GC_API void GC_CALL
 194  GC_register_displacement(size_t offset)
 195  {
 196    LOCK();
 197    GC_register_displacement_inner(offset);
 198    UNLOCK();
 199  }
 200  
 201  GC_INNER void
 202  GC_register_displacement_inner(size_t offset)
 203  {
 204    GC_ASSERT(I_HOLD_LOCK());
 205    if (offset >= VALID_OFFSET_SZ) {
 206      ABORT("Bad argument to GC_register_displacement");
 207    }
 208    if (!GC_valid_offsets[offset]) {
 209      GC_valid_offsets[offset] = TRUE;
 210      GC_modws_valid_offsets[offset % sizeof(ptr_t)] = TRUE;
 211    }
 212  }
 213  
 214  #ifndef MARK_BIT_PER_OBJ
 215  GC_INNER GC_bool
 216  GC_add_map_entry(size_t lg)
 217  {
 218    size_t displ;
 219    hb_map_entry_t *new_map;
 220  
 221    GC_ASSERT(I_HOLD_LOCK());
 222    /*
 223     * Ensure `displ % lg` fits into `hb_map_entry_t` type.
 224     * Note: the maximum value is computed in this way to avoid compiler
 225     * complains about constant truncation or expression overflow.
 226     */
 227    GC_STATIC_ASSERT(
 228        MAXOBJGRANULES - 1
 229        <= (~(size_t)0 >> ((sizeof(size_t) - sizeof(hb_map_entry_t)) * 8)));
 230  
 231    if (lg > MAXOBJGRANULES)
 232      lg = 0;
 233    if (LIKELY(GC_obj_map[lg] != NULL))
 234      return TRUE;
 235  
 236    new_map = (hb_map_entry_t *)GC_scratch_alloc(OBJ_MAP_LEN
 237                                                 * sizeof(hb_map_entry_t));
 238    if (UNLIKELY(NULL == new_map))
 239      return FALSE;
 240  
 241    GC_COND_LOG_PRINTF("Adding block map for size of %u granules (%u bytes)\n",
 242                       (unsigned)lg, (unsigned)GRANULES_TO_BYTES(lg));
 243    if (0 == lg) {
 244      for (displ = 0; displ < OBJ_MAP_LEN; displ++) {
 245        /* Set to a nonzero to get us out of the marker fast path. */
 246        new_map[displ] = 1;
 247      }
 248    } else {
 249      for (displ = 0; displ < OBJ_MAP_LEN; displ++) {
 250        new_map[displ] = (hb_map_entry_t)(displ % lg);
 251      }
 252    }
 253    GC_obj_map[lg] = new_map;
 254    return TRUE;
 255  }
 256  #endif /* !MARK_BIT_PER_OBJ */
 257  
 258  GC_INNER void
 259  GC_initialize_offsets(void)
 260  {
 261    size_t i;
 262  
 263    if (GC_all_interior_pointers) {
 264      for (i = 0; i < VALID_OFFSET_SZ; ++i)
 265        GC_valid_offsets[i] = TRUE;
 266    } else {
 267      BZERO(GC_valid_offsets, sizeof(GC_valid_offsets));
 268      for (i = 0; i < sizeof(ptr_t); ++i)
 269        GC_modws_valid_offsets[i] = FALSE;
 270    }
 271  }
 272