malloc.c raw
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5 * Copyright (c) 2008-2022 Ivan Maidanski
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 #include "private/gc_priv.h"
18
19 #include <string.h>
20
21 /* Allocate reclaim list for the kind. Returns `TRUE` on success. */
22 STATIC GC_bool
23 GC_alloc_reclaim_list(struct obj_kind *ok)
24 {
25 struct hblk **result;
26
27 GC_ASSERT(I_HOLD_LOCK());
28 result = (struct hblk **)GC_scratch_alloc((MAXOBJGRANULES + 1)
29 * sizeof(struct hblk *));
30 if (UNLIKELY(NULL == result))
31 return FALSE;
32
33 BZERO(result, (MAXOBJGRANULES + 1) * sizeof(struct hblk *));
34 ok->ok_reclaim_list = result;
35 return TRUE;
36 }
37
38 /*
39 * Allocate a large block of size `lb_adjusted` bytes with the requested
40 * alignment (`align_m1 + 1`). The block is not cleared. We assume that
41 * the size is nonzero and a multiple of `GC_GRANULE_BYTES`, and that
42 * it already includes `EXTRA_BYTES` value. The `flags` argument should
43 * be `IGNORE_OFF_PAGE` or 0. Calls `GC_allochblk()` to do the actual
44 * allocation, but also triggers collection and/or heap expansion
45 * as appropriate. Updates value of `GC_bytes_allocd`; does also other
46 * accounting.
47 */
48 STATIC ptr_t
49 GC_alloc_large(size_t lb_adjusted, int kind, unsigned flags, size_t align_m1)
50 {
51 /*
52 * TODO: It is unclear which retries limit is sufficient (value of 3 leads
53 * to fail in some 32-bit applications, 10 is a kind of arbitrary value).
54 */
55 #define MAX_ALLOCLARGE_RETRIES 10
56
57 int retry_cnt;
58 size_t n_blocks; /*< includes alignment */
59 struct hblk *h;
60 ptr_t result;
61
62 GC_ASSERT(I_HOLD_LOCK());
63 GC_ASSERT(lb_adjusted != 0 && (lb_adjusted & (GC_GRANULE_BYTES - 1)) == 0);
64 n_blocks = OBJ_SZ_TO_BLOCKS_CHECKED(SIZET_SAT_ADD(lb_adjusted, align_m1));
65 if (UNLIKELY(!GC_is_initialized)) {
66 UNLOCK(); /*< just to unset `GC_lock_holder` */
67 GC_init();
68 LOCK();
69 }
70 /* Do our share of marking work. */
71 if (GC_incremental && !GC_dont_gc) {
72 GC_collect_a_little_inner(n_blocks);
73 }
74
75 h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
76 #ifdef USE_MUNMAP
77 if (NULL == h && GC_merge_unmapped()) {
78 h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
79 }
80 #endif
81 for (retry_cnt = 0; NULL == h; retry_cnt++) {
82 /*
83 * Only a few iterations are expected at most, otherwise something
84 * is wrong in one of the functions called below.
85 */
86 if (retry_cnt > MAX_ALLOCLARGE_RETRIES)
87 ABORT("Too many retries in GC_alloc_large");
88 if (UNLIKELY(!GC_collect_or_expand(n_blocks, flags, retry_cnt > 0)))
89 return NULL;
90 h = GC_allochblk(lb_adjusted, kind, flags, align_m1);
91 }
92
93 GC_bytes_allocd += lb_adjusted;
94 if (lb_adjusted > HBLKSIZE) {
95 GC_large_allocd_bytes += HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted);
96 if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
97 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
98 }
99 /* FIXME: Do we need some way to reset `GC_max_large_allocd_bytes`? */
100 result = h->hb_body;
101 GC_ASSERT((ADDR(result) & align_m1) == 0);
102 return result;
103 }
104
105 /*
106 * Allocate a large block of given size in bytes, clear it if appropriate.
107 * We assume that the size is nonzero and a multiple of `GC_GRANULE_BYTES`,
108 * and that it already includes `EXTRA_BYTES` value. Update value of
109 * `GC_bytes_allocd`.
110 */
111 STATIC ptr_t
112 GC_alloc_large_and_clear(size_t lb_adjusted, int kind, unsigned flags)
113 {
114 ptr_t result;
115
116 GC_ASSERT(I_HOLD_LOCK());
117 result = GC_alloc_large(lb_adjusted, kind, flags, 0 /* `align_m1` */);
118 if (LIKELY(result != NULL)
119 && (GC_debugging_started || GC_obj_kinds[kind].ok_init)) {
120 /* Clear the whole block, in case of `GC_realloc` call. */
121 BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted));
122 }
123 return result;
124 }
125
126 /*
127 * Fill in additional entries in `GC_size_map`, including the `i`-th one.
128 * Note that a filled in section of the array ending at `n` always has
129 * the length of at least `n / 4`.
130 */
131 STATIC void
132 GC_extend_size_map(size_t i)
133 {
134 size_t original_lg = ALLOC_REQUEST_GRANS(i);
135 size_t lg;
136 /*
137 * The size we try to preserve. Close to `i`, unless this would
138 * introduce too many distinct sizes.
139 */
140 size_t byte_sz = GRANULES_TO_BYTES(original_lg);
141 size_t smaller_than_i = byte_sz - (byte_sz >> 3);
142 /* The lowest indexed entry we initialize. */
143 size_t low_limit;
144 size_t number_of_objs;
145
146 GC_ASSERT(I_HOLD_LOCK());
147 GC_ASSERT(0 == GC_size_map[i]);
148 if (0 == GC_size_map[smaller_than_i]) {
149 low_limit = byte_sz - (byte_sz >> 2); /*< much smaller than `i` */
150 lg = original_lg;
151 while (GC_size_map[low_limit] != 0)
152 low_limit++;
153 } else {
154 low_limit = smaller_than_i + 1;
155 while (GC_size_map[low_limit] != 0)
156 low_limit++;
157
158 lg = ALLOC_REQUEST_GRANS(low_limit);
159 lg += lg >> 3;
160 if (lg < original_lg)
161 lg = original_lg;
162 }
163
164 /*
165 * For these larger sizes, we use an even number of granules.
166 * This makes it easier to, e.g., construct a 16-byte-aligned
167 * allocator even if `GC_GRANULE_BYTES` is 8.
168 */
169 lg = (lg + 1) & ~(size_t)1;
170 if (lg > MAXOBJGRANULES)
171 lg = MAXOBJGRANULES;
172
173 /* If we can fit the same number of larger objects in a block, do so. */
174 GC_ASSERT(lg != 0);
175 number_of_objs = HBLK_GRANULES / lg;
176 GC_ASSERT(number_of_objs != 0);
177 lg = (HBLK_GRANULES / number_of_objs) & ~(size_t)1;
178
179 /*
180 * We may need one extra byte; do not always fill in
181 * `GC_size_map[byte_sz]`.
182 */
183 byte_sz = GRANULES_TO_BYTES(lg) - EXTRA_BYTES;
184
185 for (; low_limit <= byte_sz; low_limit++)
186 GC_size_map[low_limit] = lg;
187 }
188
189 STATIC void *
190 GC_generic_malloc_inner_small(size_t lb, int kind)
191 {
192 struct obj_kind *ok = &GC_obj_kinds[kind];
193 size_t lg = GC_size_map[lb];
194 void **opp = &ok->ok_freelist[lg];
195 void *op = *opp;
196
197 GC_ASSERT(I_HOLD_LOCK());
198 if (UNLIKELY(NULL == op)) {
199 if (0 == lg) {
200 if (UNLIKELY(!GC_is_initialized)) {
201 UNLOCK(); /*< just to unset `GC_lock_holder` */
202 GC_init();
203 LOCK();
204 lg = GC_size_map[lb];
205 }
206 if (0 == lg) {
207 GC_extend_size_map(lb);
208 lg = GC_size_map[lb];
209 GC_ASSERT(lg != 0);
210 }
211 /* Retry. */
212 opp = &ok->ok_freelist[lg];
213 op = *opp;
214 }
215 if (NULL == op) {
216 if (NULL == ok->ok_reclaim_list && !GC_alloc_reclaim_list(ok))
217 return NULL;
218 op = GC_allocobj(lg, kind);
219 if (NULL == op)
220 return NULL;
221 }
222 }
223 *opp = obj_link(op);
224 obj_link(op) = NULL;
225 GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
226 return op;
227 }
228
229 GC_INNER void *
230 GC_generic_malloc_inner(size_t lb, int kind, unsigned flags)
231 {
232 size_t lb_adjusted;
233
234 GC_ASSERT(I_HOLD_LOCK());
235 GC_ASSERT(kind < MAXOBJKINDS);
236 if (SMALL_OBJ(lb)) {
237 return GC_generic_malloc_inner_small(lb, kind);
238 }
239
240 #if MAX_EXTRA_BYTES > 0
241 if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
242 /* No need to add `EXTRA_BYTES`. */
243 lb_adjusted = lb;
244 } else
245 #endif
246 /* else */ {
247 lb_adjusted = ADD_EXTRA_BYTES(lb);
248 }
249 return GC_alloc_large_and_clear(ROUNDUP_GRANULE_SIZE(lb_adjusted), kind,
250 flags);
251 }
252
253 #ifdef GC_COLLECT_AT_MALLOC
254 # if defined(CPPCHECK)
255 size_t GC_dbg_collect_at_malloc_min_lb = 16 * 1024; /*< some value */
256 # else
257 size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
258 # endif
259 #endif
260
261 GC_INNER void *
262 GC_generic_malloc_aligned(size_t lb, int kind, unsigned flags, size_t align_m1)
263 {
264 void *result;
265
266 GC_ASSERT(kind < MAXOBJKINDS);
267 if (UNLIKELY(get_have_errors()))
268 GC_print_all_errors();
269 GC_notify_or_invoke_finalizers();
270 GC_DBG_COLLECT_AT_MALLOC(lb);
271 if (SMALL_OBJ(lb) && LIKELY(align_m1 < GC_GRANULE_BYTES)) {
272 LOCK();
273 result = GC_generic_malloc_inner_small(lb, kind);
274 UNLOCK();
275 } else {
276 #ifdef THREADS
277 size_t lg;
278 #endif
279 size_t lb_adjusted;
280 GC_bool init;
281
282 #if MAX_EXTRA_BYTES > 0
283 if ((flags & IGNORE_OFF_PAGE) != 0 && lb >= HBLKSIZE) {
284 /* No need to add `EXTRA_BYTES`. */
285 lb_adjusted = ROUNDUP_GRANULE_SIZE(lb);
286 # ifdef THREADS
287 lg = BYTES_TO_GRANULES(lb_adjusted);
288 # endif
289 } else
290 #endif
291 /* else */ {
292 #ifndef THREADS
293 size_t lg; /*< CPPCHECK */
294 #endif
295
296 if (UNLIKELY(0 == lb))
297 lb = 1;
298 lg = ALLOC_REQUEST_GRANS(lb);
299 lb_adjusted = GRANULES_TO_BYTES(lg);
300 }
301
302 init = GC_obj_kinds[kind].ok_init;
303 if (LIKELY(align_m1 < GC_GRANULE_BYTES)) {
304 align_m1 = 0;
305 } else if (align_m1 < HBLKSIZE) {
306 align_m1 = HBLKSIZE - 1;
307 }
308 LOCK();
309 result = GC_alloc_large(lb_adjusted, kind, flags, align_m1);
310 if (LIKELY(result != NULL)) {
311 if (GC_debugging_started
312 #ifndef THREADS
313 || init
314 #endif
315 ) {
316 BZERO(result, HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted));
317 } else {
318 #ifdef THREADS
319 GC_ASSERT(GRANULES_TO_PTRS(lg) >= 2);
320 /*
321 * Clear any memory that might be used for the GC descriptors
322 * before we release the allocator lock.
323 */
324 ((ptr_t *)result)[0] = NULL;
325 ((ptr_t *)result)[1] = NULL;
326 ((ptr_t *)result)[GRANULES_TO_PTRS(lg) - 1] = NULL;
327 ((ptr_t *)result)[GRANULES_TO_PTRS(lg) - 2] = NULL;
328 #endif
329 }
330 }
331 UNLOCK();
332 #ifdef THREADS
333 if (init && !GC_debugging_started && result != NULL) {
334 /* Clear the rest (i.e. excluding the initial 2 words). */
335 BZERO((ptr_t *)result + 2,
336 HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb_adjusted) - 2 * sizeof(ptr_t));
337 }
338 #endif
339 }
340 if (UNLIKELY(NULL == result)) {
341 result = (*GC_get_oom_fn())(lb);
342 /* Note: result might be misaligned. */
343 }
344 return result;
345 }
346
347 GC_API GC_ATTR_MALLOC void *GC_CALL
348 GC_generic_malloc(size_t lb, int kind)
349 {
350 return GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */,
351 0 /* `align_m1` */);
352 }
353
354 GC_API GC_ATTR_MALLOC void *GC_CALL
355 GC_malloc_kind_global(size_t lb, int kind)
356 {
357 return GC_malloc_kind_aligned_global(lb, kind, 0 /* `align_m1` */);
358 }
359
360 GC_INNER void *
361 GC_malloc_kind_aligned_global(size_t lb, int kind, size_t align_m1)
362 {
363 GC_ASSERT(kind < MAXOBJKINDS);
364 if (SMALL_OBJ(lb) && LIKELY(align_m1 < HBLKSIZE / 2)) {
365 void *op;
366 void **opp;
367 size_t lg;
368
369 GC_DBG_COLLECT_AT_MALLOC(lb);
370 LOCK();
371 lg = GC_size_map[lb];
372 opp = &GC_obj_kinds[kind].ok_freelist[lg];
373 op = *opp;
374 if (UNLIKELY(align_m1 >= GC_GRANULE_BYTES)) {
375 /* TODO: Avoid linear search. */
376 for (; (ADDR(op) & align_m1) != 0; op = *opp) {
377 opp = &obj_link(op);
378 }
379 }
380 if (LIKELY(op != NULL)) {
381 GC_ASSERT(PTRFREE == kind || NULL == obj_link(op)
382 || (ADDR(obj_link(op)) < GC_greatest_real_heap_addr
383 && GC_least_real_heap_addr < ADDR(obj_link(op))));
384 *opp = obj_link(op);
385 if (kind != PTRFREE)
386 obj_link(op) = NULL;
387 GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
388 UNLOCK();
389 GC_ASSERT((ADDR(op) & align_m1) == 0);
390 return op;
391 }
392 UNLOCK();
393 }
394
395 /*
396 * We make the `GC_clear_stack()` call a tail one, hoping to get more
397 * of the stack.
398 */
399 return GC_clear_stack(
400 GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */, align_m1));
401 }
402
403 #if defined(THREADS) && !defined(THREAD_LOCAL_ALLOC)
404 GC_API GC_ATTR_MALLOC void *GC_CALL
405 GC_malloc_kind(size_t lb, int kind)
406 {
407 return GC_malloc_kind_global(lb, kind);
408 }
409 #endif
410
411 GC_API GC_ATTR_MALLOC void *GC_CALL
412 GC_malloc_atomic(size_t lb)
413 {
414 /* Allocate `lb` bytes of atomic (pointer-free) data. */
415 return GC_malloc_kind(lb, PTRFREE);
416 }
417
418 GC_API GC_ATTR_MALLOC void *GC_CALL
419 GC_malloc(size_t lb)
420 {
421 /* Allocate `lb` bytes of composite (pointerful) data. */
422 return GC_malloc_kind(lb, NORMAL);
423 }
424
425 GC_API GC_ATTR_MALLOC void *GC_CALL
426 GC_generic_malloc_uncollectable(size_t lb, int kind)
427 {
428 void *op;
429 size_t lb_orig = lb;
430
431 GC_ASSERT(kind < MAXOBJKINDS);
432 if (EXTRA_BYTES != 0 && LIKELY(lb != 0)) {
433 /*
434 * We do not need the extra byte, since this will not be collected
435 * anyway.
436 */
437 lb--;
438 }
439
440 if (SMALL_OBJ(lb)) {
441 void **opp;
442 size_t lg;
443
444 if (UNLIKELY(get_have_errors()))
445 GC_print_all_errors();
446 GC_notify_or_invoke_finalizers();
447 GC_DBG_COLLECT_AT_MALLOC(lb_orig);
448 LOCK();
449 lg = GC_size_map[lb];
450 opp = &GC_obj_kinds[kind].ok_freelist[lg];
451 op = *opp;
452 if (LIKELY(op != NULL)) {
453 *opp = obj_link(op);
454 obj_link(op) = 0;
455 GC_bytes_allocd += GRANULES_TO_BYTES((word)lg);
456 /*
457 * Mark bit was already set on free list. It will be cleared only
458 * temporarily during a collection, as a result of the normal
459 * free-list mark bit clearing.
460 */
461 GC_non_gc_bytes += GRANULES_TO_BYTES((word)lg);
462 } else {
463 op = GC_generic_malloc_inner_small(lb, kind);
464 if (NULL == op) {
465 GC_oom_func oom_fn = GC_oom_fn;
466 UNLOCK();
467 return (*oom_fn)(lb_orig);
468 }
469 /* For small objects, the free lists are completely marked. */
470 }
471 GC_ASSERT(GC_is_marked(op));
472 UNLOCK();
473 } else {
474 op = GC_generic_malloc_aligned(lb, kind, 0 /* `flags` */,
475 0 /* `align_m1` */);
476 if (op /* `!= NULL` */) { /*< CPPCHECK */
477 hdr *hhdr = HDR(op);
478
479 GC_ASSERT(HBLKDISPL(op) == 0); /*< large block */
480
481 /*
482 * We do not need to acquire the allocator lock before `HDR(op)`,
483 * since we have an undisguised pointer, but we need it while we
484 * adjust the mark bits.
485 */
486 LOCK();
487 set_mark_bit_from_hdr(hhdr, 0); /*< the only object */
488 #ifndef THREADS
489 /*
490 * This is not guaranteed in the multi-threaded case because the
491 * counter could be updated before locking.
492 */
493 GC_ASSERT(hhdr->hb_n_marks == 0);
494 #endif
495 hhdr->hb_n_marks = 1;
496 UNLOCK();
497 }
498 }
499 return op;
500 }
501
502 GC_API GC_ATTR_MALLOC void *GC_CALL
503 GC_malloc_uncollectable(size_t lb)
504 {
505 /* Allocate `lb` bytes of pointerful, traced, but not collectible data. */
506 return GC_generic_malloc_uncollectable(lb, UNCOLLECTABLE);
507 }
508
509 #ifdef GC_ATOMIC_UNCOLLECTABLE
510 GC_API GC_ATTR_MALLOC void *GC_CALL
511 GC_malloc_atomic_uncollectable(size_t lb)
512 {
513 return GC_generic_malloc_uncollectable(lb, AUNCOLLECTABLE);
514 }
515 #endif /* GC_ATOMIC_UNCOLLECTABLE */
516
517 #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_MALLOC_IN_HEADER)
518
519 # ifndef MSWINCE
520 # include <errno.h>
521 # endif
522
523 /*
524 * Avoid unnecessary nested procedure calls here, by `#define` some `malloc`
525 * replacements. Otherwise we end up saving a meaningless return address in
526 * the object. It also speeds things up, but it is admittedly quite ugly.
527 */
528 # define GC_debug_malloc_replacement(lb) GC_debug_malloc(lb, GC_DBG_EXTRAS)
529
530 # if defined(CPPCHECK)
531 # define REDIRECT_MALLOC_F GC_malloc /*< e.g. */
532 # else
533 # define REDIRECT_MALLOC_F REDIRECT_MALLOC
534 # endif
535
536 void *
537 malloc(size_t lb)
538 {
539 /*
540 * It might help to manually inline the `GC_malloc` call here.
541 * But any decent compiler should reduce the extra procedure call
542 * to at most a jump instruction in this case.
543 */
544 # if defined(SOLARIS) && defined(THREADS) && defined(I386)
545 /*
546 * Thread initialization can call `malloc` before we are ready for.
547 * It is not clear that this is enough to help matters. The thread
548 * implementation may well call `malloc` at other inopportune times.
549 */
550 if (UNLIKELY(!GC_is_initialized))
551 return sbrk(lb);
552 # endif
553 return (void *)REDIRECT_MALLOC_F(lb);
554 }
555
556 # ifdef REDIR_MALLOC_AND_LINUXTHREADS
557 # ifdef HAVE_LIBPTHREAD_SO
558 STATIC ptr_t GC_libpthread_start = NULL;
559 STATIC ptr_t GC_libpthread_end = NULL;
560 # endif
561 STATIC ptr_t GC_libld_start = NULL;
562 STATIC ptr_t GC_libld_end = NULL;
563 static GC_bool lib_bounds_set = FALSE;
564
565 GC_INNER void
566 GC_init_lib_bounds(void)
567 {
568 IF_CANCEL(int cancel_state;)
569
570 /*
571 * This test does not need to ensure memory visibility, since the bounds
572 * will be set when/if we create another thread.
573 */
574 if (LIKELY(lib_bounds_set))
575 return;
576
577 DISABLE_CANCEL(cancel_state);
578 GC_init(); /*< if not called yet */
579
580 # if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
581 LOCK(); /*< just to set `GC_lock_holder` */
582 # endif
583 # ifdef HAVE_LIBPTHREAD_SO
584 if (!GC_text_mapping("libpthread-", &GC_libpthread_start,
585 &GC_libpthread_end)) {
586 WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
587 /*
588 * This might still work with some versions of `libpthread`,
589 * so we do not `abort`.
590 */
591 }
592 # endif
593 if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
594 WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
595 }
596 # if defined(GC_ASSERTIONS) && defined(GC_ALWAYS_MULTITHREADED)
597 UNLOCK();
598 # endif
599 RESTORE_CANCEL(cancel_state);
600 lib_bounds_set = TRUE;
601 }
602 # endif /* REDIR_MALLOC_AND_LINUXTHREADS */
603
604 void *
605 calloc(size_t n, size_t lb)
606 {
607 if (UNLIKELY((lb | n) > GC_SQRT_SIZE_MAX) /*< fast initial test */
608 && lb && n > GC_SIZE_MAX / lb)
609 return (*GC_get_oom_fn())(GC_SIZE_MAX); /*< `n * lb` overflow */
610 # ifdef REDIR_MALLOC_AND_LINUXTHREADS
611 /*
612 * The linker may allocate some memory that is only pointed to by
613 * memory-mapped thread stacks. Make sure it is not collectible.
614 */
615 {
616 ptr_t caller = (ptr_t)__builtin_return_address(0);
617
618 GC_init_lib_bounds();
619 if (ADDR_INSIDE(caller, GC_libld_start, GC_libld_end)
620 # ifdef HAVE_LIBPTHREAD_SO
621 /*
622 * Note: the two ranges are actually usually adjacent, so there
623 * may be a way to speed this up.
624 */
625 || ADDR_INSIDE(caller, GC_libpthread_start, GC_libpthread_end)
626 # endif
627 ) {
628 return GC_generic_malloc_uncollectable(n * lb, UNCOLLECTABLE);
629 }
630 }
631 # endif
632 return (void *)REDIRECT_MALLOC_F(n * lb);
633 }
634
635 # ifndef strdup
636 char *
637 strdup(const char *s)
638 {
639 size_t lb = strlen(s) + 1;
640 char *result = (char *)REDIRECT_MALLOC_F(lb);
641
642 if (UNLIKELY(NULL == result)) {
643 errno = ENOMEM;
644 return NULL;
645 }
646 BCOPY(s, result, lb);
647 return result;
648 }
649 # else
650 /*
651 * If `strdup` is macro defined, we assume that it actually calls `malloc`,
652 * and thus the right thing will happen even without overriding it.
653 * This seems to be true on most Linux systems.
654 */
655 # endif /* strdup */
656
657 # ifndef strndup
658 /* This is similar to `strdup()`. */
659 char *
660 strndup(const char *str, size_t size)
661 {
662 char *copy;
663 size_t len = strlen(str);
664 if (UNLIKELY(len > size))
665 len = size;
666 copy = (char *)REDIRECT_MALLOC_F(len + 1);
667 if (UNLIKELY(NULL == copy)) {
668 errno = ENOMEM;
669 return NULL;
670 }
671 if (LIKELY(len > 0))
672 BCOPY(str, copy, len);
673 copy[len] = '\0';
674 return copy;
675 }
676 # endif /* !strndup */
677
678 # undef GC_debug_malloc_replacement
679
680 #endif /* REDIRECT_MALLOC */
681
682 /* Explicitly deallocate the object. `hhdr` should correspond to `p`. */
683 static void
684 free_internal(void *p, const hdr *hhdr)
685 {
686 size_t lb = hhdr->hb_sz; /*< size in bytes */
687 size_t lg = BYTES_TO_GRANULES(lb); /*< size in granules */
688 int kind = hhdr->hb_obj_kind;
689
690 GC_bytes_freed += lb;
691 if (IS_UNCOLLECTABLE(kind))
692 GC_non_gc_bytes -= lb;
693 if (LIKELY(lg <= MAXOBJGRANULES)) {
694 struct obj_kind *ok = &GC_obj_kinds[kind];
695 void **flh;
696
697 /*
698 * It is unnecessary to clear the mark bit. If the object is
699 * reallocated, it does not matter. Otherwise, the collector will
700 * do it, since it is on a free list.
701 */
702 if (ok->ok_init && LIKELY(lb > sizeof(ptr_t))) {
703 BZERO((ptr_t *)p + 1, lb - sizeof(ptr_t));
704 }
705
706 flh = &ok->ok_freelist[lg];
707 obj_link(p) = *flh;
708 *flh = (ptr_t)p;
709 } else {
710 if (lb > HBLKSIZE) {
711 GC_large_allocd_bytes -= HBLKSIZE * OBJ_SZ_TO_BLOCKS(lb);
712 }
713 GC_ASSERT(ADDR(HBLKPTR(p)) == ADDR(hhdr->hb_block));
714 GC_freehblk(hhdr->hb_block);
715 }
716 }
717
718 GC_API void GC_CALL
719 GC_free(void *p)
720 {
721 const hdr *hhdr;
722
723 if (p /* `!= NULL` */) {
724 /* CPPCHECK */
725 } else {
726 /* Required by ANSI. It is not my fault... */
727 return;
728 }
729
730 #ifdef LOG_ALLOCS
731 GC_log_printf("GC_free(%p) after GC #%lu\n", p, (unsigned long)GC_gc_no);
732 #endif
733 hhdr = HDR(p);
734 #if defined(REDIRECT_MALLOC) \
735 && ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \
736 || defined(REDIR_MALLOC_AND_LINUXTHREADS) \
737 || (defined(SOLARIS) && defined(THREADS)) || defined(MSWIN32))
738 /*
739 * This might be called indirectly by `GC_print_callers` to free the
740 * result of `backtrace_symbols()`. For Solaris, we have to redirect
741 * `malloc` calls during initialization. For the others, this seems
742 * to happen implicitly. Do not try to deallocate that memory.
743 */
744 if (UNLIKELY(NULL == hhdr))
745 return;
746 #endif
747 GC_ASSERT(GC_base(p) == p);
748 LOCK();
749 free_internal(p, hhdr);
750 FREE_PROFILER_HOOK(p);
751 UNLOCK();
752 }
753
754 #ifdef THREADS
755 GC_INNER void
756 GC_free_inner(void *p)
757 {
758 GC_ASSERT(I_HOLD_LOCK());
759 free_internal(p, HDR(p));
760 }
761 #endif /* THREADS */
762
763 #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
764 # define REDIRECT_FREE GC_free
765 #endif
766
767 #if defined(REDIRECT_FREE) && !defined(REDIRECT_MALLOC_IN_HEADER)
768
769 # if defined(CPPCHECK)
770 # define REDIRECT_FREE_F GC_free /*< e.g. */
771 # else
772 # define REDIRECT_FREE_F REDIRECT_FREE
773 # endif
774
775 void
776 free(void *p)
777 {
778 # ifdef IGNORE_FREE
779 UNUSED_ARG(p);
780 # else
781 # if defined(REDIR_MALLOC_AND_LINUXTHREADS) \
782 && !defined(USE_PROC_FOR_LIBRARIES)
783 /*
784 * Do not bother with initialization checks. If nothing has been
785 * initialized, then the check fails, and that is safe, since we have
786 * not allocated uncollectible objects neither.
787 */
788 ptr_t caller = (ptr_t)__builtin_return_address(0);
789
790 /*
791 * This test does not need to ensure memory visibility, since the bounds
792 * will be set when/if we create another thread.
793 */
794 if (ADDR_INSIDE(caller, GC_libld_start, GC_libld_end)
795 # ifdef HAVE_LIBPTHREAD_SO
796 || ADDR_INSIDE(caller, GC_libpthread_start, GC_libpthread_end)
797 # endif
798 ) {
799 GC_free(p);
800 return;
801 }
802 # endif
803 REDIRECT_FREE_F(p);
804 # endif
805 }
806 #endif /* REDIRECT_FREE */
807