specific.c raw

   1  /*
   2   * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
   3   *
   4   * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
   5   * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
   6   *
   7   * Permission is hereby granted to use or copy this program
   8   * for any purpose, provided the above notices are retained on all copies.
   9   * Permission to modify the code and to distribute modified code is granted,
  10   * provided the above notices are retained, and a notice that the code was
  11   * modified is included with the above copyright notice.
  12   */
  13  
  14  /*
  15   * To determine type of `tsd` implementation; includes `private/specific.h`
  16   * file if needed.
  17   */
  18  #include "private/thread_local_alloc.h"
  19  
  20  #if defined(USE_CUSTOM_SPECIFIC)
  21  
  22  /*
  23   * A thread-specific data entry which will never appear valid to a reader.
  24   * Used to fill in empty cache entries to avoid a check for 0.
  25   */
  26  static const tse invalid_tse = { INVALID_QTID, 0, 0, INVALID_THREADID };
  27  
  28  GC_INNER int
  29  GC_key_create_inner(tsd **key_ptr)
  30  {
  31    int i;
  32    int ret;
  33    tsd *result;
  34  
  35    GC_ASSERT(I_HOLD_LOCK());
  36    /* A quick alignment check, since we need atomic stores. */
  37    GC_ASSERT(ADDR(&invalid_tse.next) % ALIGNMENT == 0);
  38    result = (tsd *)MALLOC_CLEAR(sizeof(tsd));
  39    if (NULL == result)
  40      return ENOMEM;
  41    ret = pthread_mutex_init(&result->lock, NULL);
  42    if (ret != 0)
  43      return ret;
  44  
  45    for (i = 0; i < TS_CACHE_SIZE; ++i) {
  46      result->cache[i] = (tse *)GC_CAST_AWAY_CONST_PVOID(&invalid_tse);
  47    }
  48  #  ifdef GC_ASSERTIONS
  49    for (i = 0; i < TS_HASH_SIZE; ++i) {
  50      GC_ASSERT(NULL == result->hash[i]);
  51    }
  52  #  endif
  53    *key_ptr = result;
  54    return 0;
  55  }
  56  
  57  GC_INNER int
  58  GC_setspecific(tsd *key, void *value)
  59  {
  60    pthread_t self = pthread_self();
  61    unsigned hash_val = TS_HASH(self);
  62    volatile tse *entry;
  63  
  64    GC_ASSERT(I_HOLD_LOCK());
  65    GC_ASSERT(self != INVALID_THREADID);
  66    /* Disable garbage collection during `GC_malloc`. */
  67    GC_dont_gc++;
  68    entry = (volatile tse *)MALLOC_CLEAR(sizeof(tse));
  69    GC_dont_gc--;
  70    if (UNLIKELY(NULL == entry))
  71      return ENOMEM;
  72  
  73    pthread_mutex_lock(&key->lock);
  74    entry->next = key->hash[hash_val];
  75  #  ifdef GC_ASSERTIONS
  76    {
  77      tse *p;
  78  
  79      /* Ensure no existing entry. */
  80      for (p = entry->next; p != NULL; p = p->next) {
  81        GC_ASSERT(!THREAD_EQUAL(p->thread, self));
  82      }
  83    }
  84  #  endif
  85    entry->thread = self;
  86    entry->value = TS_HIDE_VALUE(value);
  87    GC_ASSERT(entry->qtid == INVALID_QTID);
  88    /*
  89     * There can only be one writer at a time, but this needs to be atomic
  90     * with respect to concurrent readers.
  91     */
  92    GC_cptr_store_release((volatile ptr_t *)&key->hash[hash_val],
  93                          (ptr_t)CAST_AWAY_VOLATILE_PVOID(entry));
  94    GC_dirty(CAST_AWAY_VOLATILE_PVOID(entry));
  95    GC_dirty(key->hash + hash_val);
  96    if (pthread_mutex_unlock(&key->lock) != 0)
  97      ABORT("pthread_mutex_unlock failed (setspecific)");
  98    return 0;
  99  }
 100  
 101  GC_INNER void
 102  GC_remove_specific_after_fork(tsd *key, pthread_t t)
 103  {
 104    unsigned hash_val = TS_HASH(t);
 105    tse *entry;
 106    tse *prev = NULL;
 107  
 108  #  ifdef CAN_HANDLE_FORK
 109    /*
 110     * Both `GC_setspecific` and `GC_remove_specific` should be called with
 111     * the allocator lock held to ensure the consistency of the hash table
 112     * in the forked child process.
 113     */
 114    GC_ASSERT(I_HOLD_LOCK());
 115  #  endif
 116    pthread_mutex_lock(&key->lock);
 117    for (entry = key->hash[hash_val];
 118         entry != NULL && !THREAD_EQUAL(entry->thread, t); entry = entry->next) {
 119      prev = entry;
 120    }
 121    /*
 122     * Invalidate `qtid` field, since `qtid` values may be reused, and
 123     * a later cache lookup could otherwise find this `entry`.
 124     */
 125    if (entry != NULL) {
 126      entry->qtid = INVALID_QTID;
 127      if (NULL == prev) {
 128        key->hash[hash_val] = entry->next;
 129        GC_dirty(key->hash + hash_val);
 130      } else {
 131        prev->next = entry->next;
 132        GC_dirty(prev);
 133      }
 134      /*
 135       * Atomic! Concurrent accesses still work.  They must, since readers
 136       * do not lock.  We should not need a `volatile` access here, since
 137       * both this and the preceding write should become visible no later
 138       * than the `pthread_mutex_unlock()` call.
 139       */
 140    }
 141    /*
 142     * If we wanted to deallocate the entry, we would first have to clear
 143     * any cache entries pointing to it.  That probably requires additional
 144     * synchronization, since we cannot prevent a concurrent cache lookup,
 145     * which should still be examining deallocated memory.  This can only
 146     * happen if the concurrent access is from another thread, and hence
 147     * has missed the cache, but still...
 148     */
 149  #  ifdef LINT2
 150    GC_noop1_ptr(entry);
 151  #  endif
 152  
 153    /*
 154     * With GC, we are done, since the pointers from the cache will be
 155     * overwritten, all local pointers to the entries will be dropped,
 156     * and the entry will then be reclaimed.
 157     */
 158    if (pthread_mutex_unlock(&key->lock) != 0)
 159      ABORT("pthread_mutex_unlock failed (remove_specific after fork)");
 160  }
 161  
 162  #  ifdef CAN_HANDLE_FORK
 163  GC_INNER void
 164  GC_update_specific_after_fork(tsd *key)
 165  {
 166    unsigned hash_val = TS_HASH(GC_parent_pthread_self);
 167    tse *entry;
 168  
 169    GC_ASSERT(I_HOLD_LOCK());
 170  #    ifdef LINT2
 171    pthread_mutex_lock(&key->lock);
 172  #    endif
 173    entry = key->hash[hash_val];
 174    if (LIKELY(entry != NULL)) {
 175      GC_ASSERT(THREAD_EQUAL(entry->thread, GC_parent_pthread_self));
 176      GC_ASSERT(NULL == entry->next);
 177      /* Remove the `entry` from the table. */
 178      key->hash[hash_val] = NULL;
 179      entry->thread = pthread_self();
 180      /* Then put the `entry` back to the table (based on new hash value). */
 181      key->hash[TS_HASH(entry->thread)] = entry;
 182    }
 183  #    ifdef LINT2
 184    (void)pthread_mutex_unlock(&key->lock);
 185  #    endif
 186  }
 187  #  endif
 188  
 189  GC_INNER void *
 190  GC_slow_getspecific(tsd *key, size_t qtid, tse *volatile *cache_ptr)
 191  {
 192    pthread_t self = pthread_self();
 193    tse *entry = key->hash[TS_HASH(self)];
 194  
 195    GC_ASSERT(qtid != INVALID_QTID);
 196    while (entry != NULL && !THREAD_EQUAL(entry->thread, self)) {
 197      entry = entry->next;
 198    }
 199    if (entry == NULL)
 200      return NULL;
 201    /*
 202     * Set the cache `entry`.  It is safe to do this asynchronously.
 203     * Either value is safe, though may produce spurious misses.
 204     * We are replacing one `qtid` with another one for the same thread.
 205     */
 206    AO_store(&entry->qtid, qtid);
 207  
 208    GC_cptr_store((volatile ptr_t *)cache_ptr, (ptr_t)entry);
 209    return TS_REVEAL_PTR(entry->value);
 210  }
 211  
 212  #  ifdef GC_ASSERTIONS
 213  /*
 214   * Check that that all elements of the data structure associated with
 215   * `key` are marked.
 216   */
 217  void
 218  GC_check_tsd_marks(tsd *key)
 219  {
 220    int i;
 221    tse *p;
 222  
 223    if (!GC_is_marked(GC_base(key))) {
 224      ABORT("Unmarked thread-specific-data table");
 225    }
 226    for (i = 0; i < TS_HASH_SIZE; ++i) {
 227      for (p = key->hash[i]; p != NULL; p = p->next) {
 228        if (!GC_is_marked(GC_base(p))) {
 229          ABORT_ARG1("Unmarked thread-specific-data entry", " at %p", (void *)p);
 230        }
 231      }
 232    }
 233    for (i = 0; i < TS_CACHE_SIZE; ++i) {
 234      p = key->cache[i];
 235      if (p != &invalid_tse && !GC_is_marked(GC_base(p))) {
 236        ABORT_ARG1("Unmarked cached thread-specific-data entry", " at %p",
 237                   (void *)p);
 238      }
 239    }
 240  }
 241  #  endif /* GC_ASSERTIONS */
 242  
 243  #endif /* USE_CUSTOM_SPECIFIC */
 244