headers.c raw
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2008-2022 Ivan Maidanski
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17 #include "private/gc_priv.h"
18
19 #if defined(KEEP_BACK_PTRS) && defined(GC_ASSERTIONS)
20 # include "private/dbg_mlc.h" /*< for `NOT_MARKED` */
21 #endif
22
23 /*
24 * This implements:
25 * 1. Allocation of heap block headers;
26 * 2. A map from addresses to heap block addresses to heap block headers.
27 *
28 * Access speed is crucial. We implement an index structure based on
29 * a two-level tree.
30 */
31
32 GC_INNER hdr *
33 GC_find_header(const void *h)
34 {
35 #ifdef HASH_TL
36 hdr *result;
37 GET_HDR(h, result);
38 return result;
39 #else
40 return HDR_INNER(h);
41 #endif
42 }
43
44 GC_INNER hdr *
45 #ifdef PRINT_BLACK_LIST
46 GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
47 #else
48 GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce)
49 #endif
50 {
51 hdr *hhdr;
52
53 HC_MISS();
54 GET_HDR(p, hhdr);
55 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
56 if (GC_all_interior_pointers) {
57 if (hhdr != NULL) {
58 /* Pointer to near the start of the large object. */
59 ptr_t current = (ptr_t)GC_find_starting_hblk(HBLKPTR(p), &hhdr);
60
61 if (hhdr->hb_flags & IGNORE_OFF_PAGE)
62 return 0;
63 if (HBLK_IS_FREE(hhdr) || p - current >= (GC_signed_word)hhdr->hb_sz) {
64 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
65 /* The pointer is past the end of the block. */
66 return 0;
67 }
68 } else {
69 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
70 /* And return `NULL`. */
71 }
72 GC_ASSERT(NULL == hhdr || !HBLK_IS_FREE(hhdr));
73 /*
74 * Pointers past the first page are probably too rare to add them to
75 * the cache. We do not. And correctness relies on the fact that
76 * we do not.
77 */
78 return hhdr;
79 } else {
80 if (NULL == hhdr) {
81 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
82 }
83 return 0;
84 }
85 } else {
86 if (HBLK_IS_FREE(hhdr)) {
87 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
88 return 0;
89 } else {
90 hce->block_addr = ADDR(p) >> LOG_HBLKSIZE;
91 hce->hce_hdr = hhdr;
92 return hhdr;
93 }
94 }
95 }
96
97 /*
98 * Routines to dynamically allocate collector data structures that will
99 * never be freed.
100 */
101
102 GC_INNER ptr_t
103 GC_scratch_alloc(size_t bytes)
104 {
105 ptr_t result = GC_scratch_free_ptr;
106 size_t bytes_to_get;
107
108 GC_ASSERT(I_HOLD_LOCK());
109 bytes = ROUNDUP_GRANULE_SIZE(bytes);
110 for (;;) {
111 GC_ASSERT(GC_scratch_end_addr >= ADDR(result));
112 if (bytes <= GC_scratch_end_addr - ADDR(result)) {
113 /* Unallocated space of scratch buffer has enough size. */
114 GC_scratch_free_ptr = result + bytes;
115 return result;
116 }
117
118 GC_ASSERT(GC_page_size != 0);
119 if (bytes >= MINHINCR * HBLKSIZE) {
120 bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
121 result = GC_os_get_mem(bytes_to_get);
122 if (result != NULL) {
123 #if defined(KEEP_BACK_PTRS) && (GC_GRANULE_BYTES < 0x10)
124 GC_ASSERT(ADDR(result) > (word)NOT_MARKED);
125 #endif
126 /* No update of scratch free area pointer; get memory directly. */
127 #ifdef USE_SCRATCH_LAST_END_PTR
128 /*
129 * Update end point of last obtained area (needed only by
130 * `GC_register_dynamic_libraries` for some targets).
131 */
132 GC_scratch_last_end_addr = ADDR(result) + bytes;
133 #endif
134 }
135 return result;
136 }
137
138 /* This is rounded up for a safety reason. */
139 bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(MINHINCR * HBLKSIZE);
140
141 result = GC_os_get_mem(bytes_to_get);
142 if (UNLIKELY(NULL == result)) {
143 WARN("Out of memory - trying to allocate requested amount"
144 " (%" WARN_PRIuPTR " bytes)...\n",
145 bytes);
146 bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
147 result = GC_os_get_mem(bytes_to_get);
148 if (result != NULL) {
149 #ifdef USE_SCRATCH_LAST_END_PTR
150 GC_scratch_last_end_addr = ADDR(result) + bytes;
151 #endif
152 }
153 return result;
154 }
155
156 /* TODO: Some amount of unallocated space may remain unused forever. */
157 /* Update scratch area pointers and retry. */
158 GC_scratch_free_ptr = result;
159 GC_scratch_end_addr = ADDR(GC_scratch_free_ptr) + bytes_to_get;
160 #ifdef USE_SCRATCH_LAST_END_PTR
161 GC_scratch_last_end_addr = GC_scratch_end_addr;
162 #endif
163 }
164 }
165
166 /* Return an uninitialized header. */
167 static hdr *
168 alloc_hdr(void)
169 {
170 hdr *result;
171
172 GC_ASSERT(I_HOLD_LOCK());
173 if (NULL == GC_hdr_free_list) {
174 result = (hdr *)GC_scratch_alloc(sizeof(hdr));
175 } else {
176 result = GC_hdr_free_list;
177 GC_hdr_free_list = (hdr *)result->hb_next;
178 }
179 return result;
180 }
181
182 GC_INLINE void
183 free_hdr(hdr *hhdr)
184 {
185 hhdr->hb_next = (struct hblk *)GC_hdr_free_list;
186 GC_hdr_free_list = hhdr;
187 }
188
189 #ifdef COUNT_HDR_CACHE_HITS
190 /* Used for debugging/profiling (the symbols are externally visible). */
191 word GC_hdr_cache_hits = 0;
192 word GC_hdr_cache_misses = 0;
193 #endif
194
195 GC_INNER void
196 GC_init_headers(void)
197 {
198 unsigned i;
199
200 GC_ASSERT(I_HOLD_LOCK());
201 GC_ASSERT(NULL == GC_all_nils);
202 GC_all_nils = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
203 if (GC_all_nils == NULL) {
204 GC_err_printf("Insufficient memory for GC_all_nils\n");
205 EXIT();
206 }
207 BZERO(GC_all_nils, sizeof(bottom_index));
208 for (i = 0; i < TOP_SZ; i++) {
209 GC_top_index[i] = GC_all_nils;
210 }
211 }
212
213 /*
214 * Make sure that there is a bottom-level index block for address `addr`.
215 * Returns `FALSE` on failure.
216 */
217 static GC_bool
218 get_index(word addr)
219 {
220 word hi = addr >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
221 bottom_index *r;
222 bottom_index *p;
223 bottom_index **prev;
224 bottom_index *pi; /*< `old_p` */
225 word i;
226
227 GC_ASSERT(I_HOLD_LOCK());
228 #ifdef HASH_TL
229 i = TL_HASH(hi);
230 pi = GC_top_index[i];
231 for (p = pi; p != GC_all_nils; p = p->hash_link) {
232 if (p->key == hi)
233 return TRUE;
234 }
235 #else
236 if (GC_top_index[hi] != GC_all_nils)
237 return TRUE;
238 i = hi;
239 #endif
240 r = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
241 if (UNLIKELY(NULL == r))
242 return FALSE;
243 BZERO(r, sizeof(bottom_index));
244 r->key = hi;
245 #ifdef HASH_TL
246 r->hash_link = pi;
247 #endif
248
249 /* Add it to the list of bottom indices. */
250 prev = &GC_all_bottom_indices; /*< pointer to `p` */
251
252 pi = NULL; /*< `bottom_index` preceding `p` */
253 while ((p = *prev) != 0 && p->key < hi) {
254 pi = p;
255 prev = &p->asc_link;
256 }
257 r->desc_link = pi;
258 if (NULL == p) {
259 GC_all_bottom_indices_end = r;
260 } else {
261 p->desc_link = r;
262 }
263 r->asc_link = p;
264 *prev = r;
265
266 GC_top_index[i] = r;
267 return TRUE;
268 }
269
270 GC_INNER hdr *
271 GC_install_header(struct hblk *h)
272 {
273 hdr *result;
274
275 GC_ASSERT(I_HOLD_LOCK());
276 if (UNLIKELY(!get_index(ADDR(h))))
277 return NULL;
278
279 result = alloc_hdr();
280 if (LIKELY(result != NULL)) {
281 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(result));
282 SET_HDR(h, result);
283 #ifdef USE_MUNMAP
284 result->hb_last_reclaimed = (unsigned short)GC_gc_no;
285 #endif
286 }
287 return result;
288 }
289
290 GC_INNER GC_bool
291 GC_install_counts(struct hblk *h, size_t sz /* bytes */)
292 {
293 struct hblk *hbp;
294
295 for (hbp = h; ADDR_LT((ptr_t)hbp, (ptr_t)h + sz); hbp += BOTTOM_SZ) {
296 if (!get_index(ADDR(hbp)))
297 return FALSE;
298 /* Is overflow of `hbp` expected? */
299 if (ADDR(hbp) > GC_WORD_MAX - (word)BOTTOM_SZ * HBLKSIZE)
300 break;
301 }
302 if (!get_index(ADDR(h) + sz - 1))
303 return FALSE;
304
305 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(HDR(h)));
306 for (hbp = h + 1; ADDR_LT((ptr_t)hbp, (ptr_t)h + sz); hbp++) {
307 word i = (word)HBLK_PTR_DIFF(hbp, h);
308
309 SET_HDR(hbp, (hdr *)NUMERIC_TO_VPTR(i > MAX_JUMP ? MAX_JUMP : i));
310 }
311 return TRUE;
312 }
313
314 GC_INNER void
315 GC_remove_header(struct hblk *h)
316 {
317 hdr **ha;
318 GET_HDR_ADDR(h, ha);
319 free_hdr(*ha);
320 *ha = 0;
321 }
322
323 GC_INNER void
324 GC_remove_counts(struct hblk *h, size_t sz /* bytes */)
325 {
326 struct hblk *hbp;
327
328 if (sz <= HBLKSIZE)
329 return;
330 if (NULL == HDR(h + 1)) {
331 #ifdef GC_ASSERTIONS
332 for (hbp = h + 2; ADDR_LT((ptr_t)hbp, (ptr_t)h + sz); hbp++) {
333 GC_ASSERT(NULL == HDR(hbp));
334 }
335 #endif
336 return;
337 }
338
339 for (hbp = h + 1; ADDR_LT((ptr_t)hbp, (ptr_t)h + sz); hbp++) {
340 SET_HDR(hbp, NULL);
341 }
342 }
343
344 #define HBLK_ADDR(bi, j) \
345 ((((bi)->key << LOG_BOTTOM_SZ) + (word)(j)) << LOG_HBLKSIZE)
346
347 GC_API void GC_CALL
348 GC_apply_to_all_blocks(GC_walk_hblk_fn fn, void *client_data)
349 {
350 bottom_index *bi;
351
352 for (bi = GC_all_bottom_indices; bi != NULL; bi = bi->asc_link) {
353 GC_signed_word j;
354
355 for (j = BOTTOM_SZ - 1; j >= 0;) {
356 hdr *hhdr = bi->index[j];
357
358 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
359 j -= (GC_signed_word)(hhdr != NULL ? ADDR(hhdr) : 1);
360 } else {
361 if (!HBLK_IS_FREE(hhdr)) {
362 GC_ASSERT(HBLK_ADDR(bi, j) == ADDR(hhdr->hb_block));
363 fn(hhdr->hb_block, client_data);
364 }
365 j--;
366 }
367 }
368 }
369 }
370
371 GC_INNER struct hblk *
372 GC_next_block(struct hblk *h, GC_bool allow_free)
373 {
374 REGISTER bottom_index *bi;
375 REGISTER size_t j = (size_t)(ADDR(h) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1);
376
377 GC_ASSERT(I_HOLD_READER_LOCK());
378 GET_BI(h, bi);
379 if (bi == GC_all_nils) {
380 REGISTER word hi = ADDR(h) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
381
382 bi = GC_all_bottom_indices;
383 while (bi != NULL && bi->key < hi)
384 bi = bi->asc_link;
385 j = 0;
386 }
387
388 for (; bi != NULL; bi = bi->asc_link) {
389 while (j < BOTTOM_SZ) {
390 hdr *hhdr = bi->index[j];
391
392 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
393 j++;
394 } else {
395 if (allow_free || !HBLK_IS_FREE(hhdr)) {
396 GC_ASSERT(HBLK_ADDR(bi, j) == ADDR(hhdr->hb_block));
397 return hhdr->hb_block;
398 }
399 j += divHBLKSZ(hhdr->hb_sz);
400 }
401 }
402 j = 0;
403 }
404 return NULL;
405 }
406
407 GC_INNER struct hblk *
408 GC_prev_block(struct hblk *h)
409 {
410 bottom_index *bi;
411 GC_signed_word j = (ADDR(h) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1);
412
413 GC_ASSERT(I_HOLD_READER_LOCK());
414 GET_BI(h, bi);
415 if (bi == GC_all_nils) {
416 word hi = ADDR(h) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
417
418 bi = GC_all_bottom_indices_end;
419 while (bi != NULL && bi->key > hi)
420 bi = bi->desc_link;
421 j = BOTTOM_SZ - 1;
422 }
423 for (; bi != NULL; bi = bi->desc_link) {
424 while (j >= 0) {
425 hdr *hhdr = bi->index[j];
426
427 if (NULL == hhdr) {
428 --j;
429 } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
430 j -= (GC_signed_word)ADDR(hhdr);
431 } else {
432 GC_ASSERT(HBLK_ADDR(bi, j) == ADDR(hhdr->hb_block));
433 return hhdr->hb_block;
434 }
435 }
436 j = BOTTOM_SZ - 1;
437 }
438 return NULL;
439 }
440