weakmap.c raw
1 /*
2 * Copyright (c) 2018 Petter A. Urkedal
3 *
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
6 *
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
12 */
13
14 /*
15 * This one tests a case where disclaim notifiers sometimes return a nonzero
16 * value in order to protect objects from collection.
17 */
18
19 #ifdef HAVE_CONFIG_H
20 /* For `GC_THREADS` and `GC_PTHREADS` macros. */
21 # include "config.h"
22 #endif
23
24 #undef GC_NO_THREAD_REDIRECTS
25 /* This includes `gc.h` file transitively. */
26 #include "gc/gc_disclaim.h"
27
28 #define NOT_GCBUILD
29 #include "private/gc_priv.h"
30
31 #include <string.h>
32
33 #ifdef GC_PTHREADS
34 # include <errno.h> /*< for `EAGAIN`, `EBUSY` */
35 # include <pthread.h>
36 #endif
37
38 #undef rand
39 /* Note: concurrent update of `seed` does not hurt the test. */
40 static GC_RAND_STATE_T seed;
41 #define rand() GC_RAND_NEXT(&seed)
42
43 /* Note: this should not precede include `gc_priv.h` file. */
44 #include "gc/gc_mark.h"
45
46 #ifdef GC_PTHREADS
47 # ifndef NTHREADS
48 /* This excludes the main thread, which also runs a test. */
49 # define NTHREADS 5
50 # endif
51 # include "private/gc_atomic_ops.h" /*< for `AO_t`, `AO_fetch_and_add1` */
52 #else
53 # undef NTHREADS
54 # define NTHREADS 0
55 # ifndef AO_HAVE_compiler_barrier
56 # define AO_t size_t
57 # endif
58 #endif
59
60 #define POP_SIZE 200
61 #define MUTATE_CNT_BASE 700000
62
63 #define MUTATE_CNT (MUTATE_CNT_BASE / (NTHREADS + 1))
64 #define GROW_LIMIT (MUTATE_CNT / 10)
65
66 #define WEAKMAP_CAPACITY 256
67 #define WEAKMAP_MUTEX_COUNT 32
68
69 /* `FINALIZER_CLOSURE_FLAG` definition matches the one in `fnlz_mlc.c` file. */
70 #if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
71 # define FINALIZER_CLOSURE_FLAG 0x2
72 # define INVALIDATE_FLAG 0x1
73 #else
74 # define FINALIZER_CLOSURE_FLAG 0x1
75 # define INVALIDATE_FLAG 0x2
76 #endif
77
78 #define IS_FLAG_SET(p, mask) \
79 (((unsigned)(GC_uintptr_t)(p) & (unsigned)(mask)) != 0)
80
81 #define my_assert(e) \
82 if (!(e)) { \
83 fflush(stdout); \
84 fprintf(stderr, "Assertion failure, line %d: %s\n", __LINE__, #e); \
85 exit(70); \
86 }
87
88 #define CHECK_OUT_OF_MEMORY(p) \
89 do { \
90 if (NULL == (p)) { \
91 fprintf(stderr, "Out of memory\n"); \
92 exit(69); \
93 } \
94 } while (0)
95
96 #ifndef AO_HAVE_fetch_and_add1
97 /* This is used only to update counters. */
98 # define AO_fetch_and_add1(p) ((*(p))++)
99 #endif
100
101 static unsigned
102 memhash(void *src, size_t len)
103 {
104 unsigned acc = 0;
105 size_t i;
106
107 my_assert(len % sizeof(GC_word) == 0);
108 for (i = 0; i < len / sizeof(GC_word); ++i) {
109 acc = (unsigned)((2003 * (GC_word)acc + ((GC_word *)src)[i]) / 3);
110 }
111 return acc;
112 }
113
114 static AO_t stat_added;
115 static AO_t stat_found;
116 static AO_t stat_removed;
117 static AO_t stat_skip_locked;
118 static AO_t stat_skip_marked;
119
120 union weakmap_element_u {
121 void *p;
122 GC_hidden_pointer hidden;
123 };
124
125 struct weakmap_link {
126 union weakmap_element_u obj;
127 struct weakmap_link *next;
128 };
129
130 struct weakmap {
131 #ifdef GC_PTHREADS
132 pthread_mutex_t mutex[WEAKMAP_MUTEX_COUNT];
133 #endif
134 size_t key_size;
135 size_t obj_size;
136 size_t capacity;
137 unsigned weakobj_kind;
138 /* Note: `NULL` mean this `weakmap` instance is destroyed. */
139 struct weakmap_link **links;
140 };
141
142 static void
143 weakmap_lock(struct weakmap *wm, unsigned h)
144 {
145 #ifdef GC_PTHREADS
146 int err = pthread_mutex_lock(&wm->mutex[h % WEAKMAP_MUTEX_COUNT]);
147
148 my_assert(0 == err);
149 #else
150 (void)wm;
151 (void)h;
152 #endif
153 }
154
155 #ifdef GC_PTHREADS
156 static int
157 weakmap_trylock(struct weakmap *wm, unsigned h)
158 {
159 int err = pthread_mutex_trylock(&wm->mutex[h % WEAKMAP_MUTEX_COUNT]);
160
161 if (err != 0 && err != EBUSY) {
162 fprintf(stderr, "pthread_mutex_trylock, errno= %d\n", err);
163 exit(69);
164 }
165 return err;
166 }
167 #endif /* GC_PTHREADS */
168
169 static void
170 weakmap_unlock(struct weakmap *wm, unsigned h)
171 {
172 #ifdef GC_PTHREADS
173 int err = pthread_mutex_unlock(&wm->mutex[h % WEAKMAP_MUTEX_COUNT]);
174
175 my_assert(0 == err);
176 #else
177 (void)wm;
178 (void)h;
179 #endif
180 }
181
182 static void *GC_CALLBACK
183 set_mark_bit(void *obj)
184 {
185 GC_set_mark_bit(obj);
186 #if defined(CPPCHECK)
187 GC_noop1_ptr(obj);
188 #endif
189 return NULL;
190 }
191
192 static void *
193 weakmap_add(struct weakmap *wm, void *obj, size_t obj_size)
194 {
195 struct weakmap_link *link, *new_link, **first;
196 void **new_base;
197 void *new_obj;
198 unsigned h;
199 size_t key_size = wm->key_size;
200
201 my_assert(!IS_FLAG_SET(wm, FINALIZER_CLOSURE_FLAG));
202 /* Lock and look for an existing entry. */
203 my_assert(key_size <= obj_size);
204 h = memhash(obj, key_size);
205 first = &wm->links[h % wm->capacity];
206 weakmap_lock(wm, h);
207
208 for (link = *first; link != NULL; link = link->next) {
209 void *old_obj = GC_get_find_leak() ? link->obj.p
210 : GC_REVEAL_POINTER(link->obj.hidden);
211
212 if (memcmp(old_obj, obj, key_size) == 0) {
213 GC_call_with_alloc_lock(set_mark_bit, (void **)old_obj - 1);
214 /*
215 * Pointers in the key part may have been freed and reused, changing
216 * the keys without `memcmp` noticing. This is OK as long as we update
217 * the mapped value.
218 */
219 if (memcmp((char *)old_obj + key_size, (char *)obj + key_size,
220 wm->obj_size - key_size)
221 != 0) {
222 memcpy((char *)old_obj + key_size, (char *)obj + key_size,
223 wm->obj_size - key_size);
224 GC_end_stubborn_change((char *)old_obj + key_size);
225 }
226 weakmap_unlock(wm, h);
227 AO_fetch_and_add1(&stat_found);
228 #ifdef DEBUG_DISCLAIM_WEAKMAP
229 printf("Found %p, hash= 0x%x\n", old_obj, h);
230 #endif
231 return old_obj;
232 }
233 }
234
235 /* Create new object. */
236 new_base = (void **)GC_generic_malloc(sizeof(void *) + wm->obj_size,
237 (int)wm->weakobj_kind);
238 CHECK_OUT_OF_MEMORY(new_base);
239 *new_base = CPTR_SET_FLAGS(wm, FINALIZER_CLOSURE_FLAG);
240 new_obj = new_base + 1;
241 memcpy(new_obj, obj, wm->obj_size);
242 GC_end_stubborn_change(new_base);
243
244 /* Add the object to the map. */
245 new_link = (struct weakmap_link *)GC_malloc(sizeof(struct weakmap_link));
246 CHECK_OUT_OF_MEMORY(new_link);
247 if (GC_get_find_leak()) {
248 new_link->obj.p = new_obj;
249 } else {
250 new_link->obj.hidden = GC_HIDE_POINTER(new_obj);
251 }
252 new_link->next = *first;
253 GC_END_STUBBORN_CHANGE(new_link);
254 GC_ptr_store_and_dirty(first, new_link);
255 weakmap_unlock(wm, h);
256 AO_fetch_and_add1(&stat_added);
257 #ifdef DEBUG_DISCLAIM_WEAKMAP
258 printf("Added %p, hash= 0x%x\n", new_obj, h);
259 #endif
260 return new_obj;
261 }
262
263 static int GC_CALLBACK
264 weakmap_disclaim(void *obj_base)
265 {
266 struct weakmap *wm;
267 struct weakmap_link **link;
268 void *header;
269 void *obj;
270 unsigned h;
271
272 /* Decode header word. */
273 header = *(void **)obj_base;
274 if (!IS_FLAG_SET(header, FINALIZER_CLOSURE_FLAG)) {
275 /* On the collector free list, ignore it. */
276 return 0;
277 }
278
279 my_assert(!IS_FLAG_SET(header, INVALIDATE_FLAG));
280 wm = (struct weakmap *)CPTR_CLEAR_FLAGS(header, FINALIZER_CLOSURE_FLAG);
281 if (NULL == wm->links) {
282 /* The weakmap has been already destroyed. */
283 return 0;
284 }
285 obj = (void **)obj_base + 1;
286
287 /* Lock and check for mark. */
288 h = memhash(obj, wm->key_size);
289 #ifdef GC_PTHREADS
290 if (weakmap_trylock(wm, h) != 0) {
291 AO_fetch_and_add1(&stat_skip_locked);
292 # ifdef DEBUG_DISCLAIM_WEAKMAP
293 printf("Skipping locked %p, hash= 0x%x\n", obj, h);
294 # endif
295 return 1;
296 }
297 #endif
298 if (GC_is_marked(obj_base)) {
299 weakmap_unlock(wm, h);
300 AO_fetch_and_add1(&stat_skip_marked);
301 #ifdef DEBUG_DISCLAIM_WEAKMAP
302 printf("Skipping marked %p, hash= 0x%x\n", obj, h);
303 #endif
304 return 1;
305 }
306
307 /* Remove `obj` from `wm`. */
308 #ifdef DEBUG_DISCLAIM_WEAKMAP
309 printf("Removing %p, hash= 0x%x\n", obj, h);
310 #endif
311 my_assert(header == *(void **)obj_base);
312 *(void **)obj_base = CPTR_SET_FLAGS(header, INVALIDATE_FLAG);
313 AO_fetch_and_add1(&stat_removed);
314 for (link = &wm->links[h % wm->capacity];; link = &(*link)->next) {
315 const void *old_obj;
316
317 if (NULL == *link) {
318 fprintf(stderr, "Did not find %p\n", obj);
319 exit(70);
320 }
321 old_obj = GC_get_find_leak() ? (*link)->obj.p
322 : GC_REVEAL_POINTER((*link)->obj.hidden);
323 if (old_obj == obj)
324 break;
325 my_assert(memcmp(old_obj, obj, wm->key_size) != 0);
326 }
327 GC_ptr_store_and_dirty(link, (*link)->next);
328 weakmap_unlock(wm, h);
329 return 0;
330 }
331
332 static struct weakmap *
333 weakmap_new(size_t capacity, size_t key_size, size_t obj_size,
334 unsigned weakobj_kind)
335 {
336 struct weakmap *wm = (struct weakmap *)GC_malloc(sizeof(struct weakmap));
337
338 CHECK_OUT_OF_MEMORY(wm);
339 #ifdef GC_PTHREADS
340 {
341 int i;
342 for (i = 0; i < WEAKMAP_MUTEX_COUNT; ++i) {
343 int err = pthread_mutex_init(&wm->mutex[i], NULL);
344
345 my_assert(err == 0);
346 }
347 }
348 #endif
349 wm->key_size = key_size;
350 wm->obj_size = obj_size;
351 wm->capacity = capacity;
352 wm->weakobj_kind = weakobj_kind;
353 GC_ptr_store_and_dirty(&wm->links,
354 GC_malloc(sizeof(struct weakmap_link *) * capacity));
355 CHECK_OUT_OF_MEMORY(wm->links);
356 return wm;
357 }
358
359 static void
360 weakmap_destroy(struct weakmap *wm)
361 {
362 #ifdef GC_PTHREADS
363 int i;
364
365 for (i = 0; i < WEAKMAP_MUTEX_COUNT; ++i) {
366 (void)pthread_mutex_destroy(&wm->mutex[i]);
367 }
368 #endif
369 /* The weakmap is destroyed. */
370 wm->links = NULL;
371 }
372
373 struct weakmap *pair_hcset;
374
375 /* Note: this should not exceed `sizeof(pair_magic)`. */
376 #define PAIR_MAGIC_SIZE 16
377
378 struct pair_key {
379 struct pair *car, *cdr;
380 };
381
382 struct pair {
383 struct pair *car;
384 struct pair *cdr;
385 char magic[PAIR_MAGIC_SIZE];
386 int checksum;
387 };
388
389 static const char *const pair_magic = "PAIR_MAGIC_BYTES";
390
391 #define CSUM_SEED 782
392
393 static struct pair *
394 pair_new(struct pair *car, struct pair *cdr)
395 {
396 struct pair tmpl;
397
398 /* This is to clear the paddings (to avoid a compiler warning). */
399 memset(&tmpl, 0, sizeof(tmpl));
400
401 tmpl.car = car;
402 tmpl.cdr = cdr;
403 memcpy(tmpl.magic, pair_magic, PAIR_MAGIC_SIZE);
404 tmpl.checksum = CSUM_SEED + (car != NULL ? car->checksum : 0)
405 + (cdr != NULL ? cdr->checksum : 0);
406 return (struct pair *)weakmap_add(pair_hcset, &tmpl, sizeof(tmpl));
407 }
408
409 static void
410 pair_check_rec(struct pair *p, int line)
411 {
412 while (p != NULL) {
413 int checksum = CSUM_SEED;
414
415 if (memcmp(p->magic, pair_magic, PAIR_MAGIC_SIZE) != 0) {
416 fprintf(stderr, "Magic bytes wrong for %p at %d\n", (void *)p, line);
417 exit(70);
418 }
419 if (p->car != NULL)
420 checksum += p->car->checksum;
421 if (p->cdr != NULL)
422 checksum += p->cdr->checksum;
423 if (p->checksum != checksum) {
424 fprintf(stderr, "Checksum failure for %p: (car= %p, cdr= %p) at %d\n",
425 (void *)p, (void *)p->car, (void *)p->cdr, line);
426 exit(70);
427 }
428 p = (rand() & 1) != 0 ? p->cdr : p->car;
429 }
430 }
431
432 static void *
433 test(void *data)
434 {
435 int i;
436 struct pair *p0, *p1;
437 struct pair *pop[POP_SIZE];
438
439 memset(pop, 0, sizeof(pop));
440 for (i = 0; i < MUTATE_CNT; ++i) {
441 int bits = rand();
442 int t = (bits >> 3) % POP_SIZE;
443
444 switch (bits % (i > GROW_LIMIT ? 5 : 3)) {
445 case 0:
446 case 3:
447 if (pop[t] != NULL)
448 pop[t] = pop[t]->car;
449 break;
450 case 1:
451 case 4:
452 if (pop[t] != NULL)
453 pop[t] = pop[t]->cdr;
454 break;
455 case 2:
456 p0 = pop[rand() % POP_SIZE];
457 p1 = pop[rand() % POP_SIZE];
458 pop[t] = pair_new(p0, p1);
459 my_assert(pair_new(p0, p1) == pop[t]);
460 my_assert(pop[t]->car == p0);
461 my_assert(pop[t]->cdr == p1);
462 break;
463 }
464 pair_check_rec(pop[rand() % POP_SIZE], __LINE__);
465 }
466 return data;
467 }
468
469 int
470 main(void)
471 {
472 unsigned weakobj_kind;
473 #if NTHREADS > 0
474 int i, n;
475 pthread_t th[NTHREADS];
476 #endif
477
478 /* Make the test stricter. */
479 GC_set_all_interior_pointers(0);
480
481 #ifdef TEST_MANUAL_VDB
482 GC_set_manual_vdb_allowed(1);
483 #endif
484 GC_INIT();
485 /* Register the displacements. */
486 GC_init_finalized_malloc();
487
488 #ifndef NO_INCREMENTAL
489 GC_enable_incremental();
490 #endif
491 if (GC_get_find_leak())
492 printf("This test program is not designed for leak detection mode\n");
493 weakobj_kind = GC_new_kind(GC_new_free_list(), /* 0 | */ GC_DS_LENGTH,
494 1 /* `adjust` */, 1 /* `clear` */);
495 GC_register_disclaim_proc((int)weakobj_kind, weakmap_disclaim,
496 1 /* `mark_unconditionally` */);
497 pair_hcset = weakmap_new(WEAKMAP_CAPACITY, sizeof(struct pair_key),
498 sizeof(struct pair), weakobj_kind);
499
500 #if NTHREADS > 0
501 for (i = 0; i < NTHREADS; ++i) {
502 int err = pthread_create(&th[i], NULL, test, NULL);
503
504 if (err != 0) {
505 fprintf(stderr, "Thread #%d creation failed, errno= %d\n", i, err);
506 if (i > 1 && EAGAIN == err)
507 break;
508 exit(1);
509 }
510 }
511 n = i;
512 #endif
513 (void)test(NULL);
514 #if NTHREADS > 0
515 for (i = 0; i < n; ++i) {
516 int err = pthread_join(th[i], NULL);
517
518 if (err != 0) {
519 fprintf(stderr, "Thread #%d join failed, errno= %d\n", i, err);
520 exit(69);
521 }
522 }
523 #endif
524 weakmap_destroy(pair_hcset);
525 printf("%u added, %u found; %u removed, %u locked, %u marked; %u remains\n",
526 (unsigned)stat_added, (unsigned)stat_found, (unsigned)stat_removed,
527 (unsigned)stat_skip_locked, (unsigned)stat_skip_marked,
528 (unsigned)stat_added - (unsigned)stat_removed);
529 return 0;
530 }
531