blacklst.c raw
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 */
14
15 #include "private/gc_priv.h"
16
17 #ifndef NO_BLACK_LISTING
18
19 /*
20 * We maintain several hash tables of `hblk` entities that have had false
21 * hits. Each contains one bit per hash bucket. If any page in the bucket
22 * has had a false hit, we assume that all of them have.
23 * See the definition of `page_hash_table` in `gc_priv.h` file.
24 * False hits from the stack(s) are much more dangerous than false hits
25 * from elsewhere, since the former can pin a large object that spans the
26 * block, even though it does not start on the dangerous block.
27 */
28
29 /*
30 * Externally callable routines are:
31 * - `GC_add_to_black_list_normal`,
32 * - `GC_add_to_black_list_stack`,
33 * - `GC_promote_black_lists`.
34 */
35
36 /*
37 * Pointers to individual tables. We replace one table by another by
38 * switching these pointers.
39 */
40
41 /* Non-stack false references seen at last full collection. */
42 STATIC word *GC_old_normal_bl = NULL;
43
44 /* Non-stack false references seen since last full collection. */
45 STATIC word *GC_incomplete_normal_bl = NULL;
46
47 STATIC word *GC_old_stack_bl = NULL;
48 STATIC word *GC_incomplete_stack_bl = NULL;
49
50 /* Number of bytes on stack blacklist. */
51 STATIC word GC_total_stack_black_listed = 0;
52
53 GC_INNER word GC_black_list_spacing = MINHINCR * HBLKSIZE; /*< initial guess */
54
55 STATIC void GC_clear_bl(word *);
56
57 # ifdef PRINT_BLACK_LIST
58 STATIC void
59 GC_print_blacklisted_ptr(ptr_t p, ptr_t source, const char *kind_str)
60 {
61 ptr_t base = (ptr_t)GC_base(source);
62
63 if (0 == base) {
64 GC_err_printf("Black listing (%s) %p referenced from %p in %s\n", kind_str,
65 (void *)p, (void *)source,
66 NULL != source ? "root set" : "register");
67 } else {
68 /*
69 * FIXME: We cannot call the debug variant of `GC_print_heap_obj`
70 * (with `PRINT_CALL_CHAIN`) here because the allocator lock is held
71 * and the world is stopped.
72 */
73 GC_err_printf("Black listing (%s) %p referenced from %p in"
74 " object at %p of appr. %lu bytes\n",
75 kind_str, (void *)p, (void *)source, (void *)base,
76 (unsigned long)GC_size(base));
77 }
78 }
79 # endif /* PRINT_BLACK_LIST */
80
81 GC_INNER void
82 GC_bl_init_no_interiors(void)
83 {
84 GC_ASSERT(I_HOLD_LOCK());
85 if (NULL == GC_incomplete_normal_bl) {
86 GC_old_normal_bl = (word *)GC_scratch_alloc(sizeof(page_hash_table));
87 GC_incomplete_normal_bl
88 = (word *)GC_scratch_alloc(sizeof(page_hash_table));
89 if (NULL == GC_old_normal_bl || NULL == GC_incomplete_normal_bl) {
90 GC_err_printf("Insufficient memory for black list\n");
91 EXIT();
92 }
93 GC_clear_bl(GC_old_normal_bl);
94 GC_clear_bl(GC_incomplete_normal_bl);
95 }
96 }
97
98 GC_INNER void
99 GC_bl_init(void)
100 {
101 GC_ASSERT(I_HOLD_LOCK());
102 if (!GC_all_interior_pointers) {
103 GC_bl_init_no_interiors();
104 }
105 GC_ASSERT(NULL == GC_old_stack_bl && NULL == GC_incomplete_stack_bl);
106 GC_old_stack_bl = (word *)GC_scratch_alloc(sizeof(page_hash_table));
107 GC_incomplete_stack_bl = (word *)GC_scratch_alloc(sizeof(page_hash_table));
108 if (NULL == GC_old_stack_bl || NULL == GC_incomplete_stack_bl) {
109 GC_err_printf("Insufficient memory for black list\n");
110 EXIT();
111 }
112 GC_clear_bl(GC_old_stack_bl);
113 GC_clear_bl(GC_incomplete_stack_bl);
114 }
115
116 STATIC void
117 GC_clear_bl(word *bl)
118 {
119 BZERO(bl, sizeof(page_hash_table));
120 }
121
122 STATIC void
123 GC_copy_bl(const word *old, word *dest)
124 {
125 BCOPY(old, dest, sizeof(page_hash_table));
126 }
127
128 static word total_stack_black_listed(void);
129
130 GC_INNER void
131 GC_promote_black_lists(void)
132 {
133 word *very_old_normal_bl = GC_old_normal_bl;
134 word *very_old_stack_bl = GC_old_stack_bl;
135
136 GC_ASSERT(I_HOLD_LOCK());
137 GC_old_normal_bl = GC_incomplete_normal_bl;
138 GC_old_stack_bl = GC_incomplete_stack_bl;
139 if (!GC_all_interior_pointers) {
140 GC_clear_bl(very_old_normal_bl);
141 }
142 GC_clear_bl(very_old_stack_bl);
143 GC_incomplete_normal_bl = very_old_normal_bl;
144 GC_incomplete_stack_bl = very_old_stack_bl;
145 GC_total_stack_black_listed = total_stack_black_listed();
146 GC_VERBOSE_LOG_PRINTF(
147 "%lu bytes in heap blacklisted for interior pointers\n",
148 (unsigned long)GC_total_stack_black_listed);
149 if (GC_total_stack_black_listed != 0) {
150 GC_black_list_spacing
151 = HBLKSIZE * (GC_heapsize / GC_total_stack_black_listed);
152 }
153 if (GC_black_list_spacing < 3 * HBLKSIZE) {
154 GC_black_list_spacing = 3 * HBLKSIZE;
155 }
156 if (GC_black_list_spacing > MAXHINCR * HBLKSIZE) {
157 /*
158 * Make it easier to allocate really huge blocks, which otherwise may
159 * have problems with nonuniform blacklist distributions.
160 * This way we should always succeed immediately after growing the heap.
161 */
162 GC_black_list_spacing = MAXHINCR * HBLKSIZE;
163 }
164 }
165
166 GC_INNER void
167 GC_unpromote_black_lists(void)
168 {
169 if (!GC_all_interior_pointers) {
170 GC_copy_bl(GC_old_normal_bl, GC_incomplete_normal_bl);
171 }
172 GC_copy_bl(GC_old_stack_bl, GC_incomplete_stack_bl);
173 }
174
175 # if defined(PARALLEL_MARK) && defined(THREAD_SANITIZER)
176 # define backlist_set_pht_entry_from_index(db, index) \
177 set_pht_entry_from_index_concurrent(db, index)
178 # else
179 /*
180 * It is safe to set a bit in a blacklist even without synchronization,
181 * the only drawback is that we might have to redo black-listing sometimes.
182 */
183 # define backlist_set_pht_entry_from_index(bl, index) \
184 set_pht_entry_from_index(bl, index)
185 # endif
186
187 # ifdef PRINT_BLACK_LIST
188 GC_INNER void
189 GC_add_to_black_list_normal(ptr_t p, ptr_t source)
190 # else
191 GC_INNER void
192 GC_add_to_black_list_normal(ptr_t p)
193 # endif
194 {
195 # ifndef PARALLEL_MARK
196 GC_ASSERT(I_HOLD_LOCK());
197 # endif
198 if (GC_modws_valid_offsets[ADDR(p) & (sizeof(ptr_t) - 1)]) {
199 size_t index = PHT_HASH(p);
200
201 if (NULL == HDR(p) || get_pht_entry_from_index(GC_old_normal_bl, index)) {
202 # ifdef PRINT_BLACK_LIST
203 if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
204 GC_print_blacklisted_ptr(p, source, "normal");
205 }
206 # endif
207 backlist_set_pht_entry_from_index(GC_incomplete_normal_bl, index);
208 } else {
209 /*
210 * This is probably just an interior pointer to an allocated object,
211 * and is not worth black listing.
212 */
213 }
214 }
215 }
216
217 # ifdef PRINT_BLACK_LIST
218 GC_INNER void
219 GC_add_to_black_list_stack(ptr_t p, ptr_t source)
220 # else
221 GC_INNER void
222 GC_add_to_black_list_stack(ptr_t p)
223 # endif
224 {
225 size_t index = PHT_HASH(p);
226
227 # ifndef PARALLEL_MARK
228 GC_ASSERT(I_HOLD_LOCK());
229 # endif
230 if (NULL == HDR(p) || get_pht_entry_from_index(GC_old_stack_bl, index)) {
231 # ifdef PRINT_BLACK_LIST
232 if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
233 GC_print_blacklisted_ptr(p, source, "stack");
234 }
235 # endif
236 backlist_set_pht_entry_from_index(GC_incomplete_stack_bl, index);
237 }
238 }
239
240 #endif /* !NO_BLACK_LISTING */
241
242 GC_API struct GC_hblk_s *GC_CALL
243 GC_is_black_listed(struct GC_hblk_s *h, size_t len)
244 {
245 #ifdef NO_BLACK_LISTING
246 UNUSED_ARG(h);
247 UNUSED_ARG(len);
248 #else
249 size_t index = PHT_HASH(h);
250 size_t i, nblocks;
251
252 if (!GC_all_interior_pointers
253 && (get_pht_entry_from_index(GC_old_normal_bl, index)
254 || get_pht_entry_from_index(GC_incomplete_normal_bl, index))) {
255 return h + 1;
256 }
257
258 nblocks = divHBLKSZ(len);
259 for (i = 0;;) {
260 if (GC_old_stack_bl[divWORDSZ(index)] == 0
261 && GC_incomplete_stack_bl[divWORDSZ(index)] == 0) {
262 /* An easy case. */
263 i += CPP_WORDSZ - modWORDSZ(index);
264 } else {
265 if (get_pht_entry_from_index(GC_old_stack_bl, index)
266 || get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
267 return &h[i + 1];
268 }
269 i++;
270 }
271 if (i >= nblocks)
272 break;
273 index = PHT_HASH(h + i);
274 }
275 #endif
276 return NULL;
277 }
278
279 #ifndef NO_BLACK_LISTING
280 /*
281 * Return the number of black-listed blocks in a given range. Used only
282 * for statistical purposes. Looks only at the `GC_incomplete_stack_bl`.
283 */
284 STATIC word
285 GC_number_stack_black_listed(struct hblk *start, struct hblk *endp1)
286 {
287 struct hblk *h;
288 word result = 0;
289
290 for (h = start; ADDR_LT((ptr_t)h, (ptr_t)endp1); h++) {
291 size_t index = PHT_HASH(h);
292
293 if (get_pht_entry_from_index(GC_old_stack_bl, index))
294 result++;
295 }
296 return result;
297 }
298
299 /* Return the total number of (stack) black-listed bytes. */
300 static word
301 total_stack_black_listed(void)
302 {
303 size_t i;
304 word total = 0;
305
306 for (i = 0; i < GC_n_heap_sects; i++) {
307 struct hblk *start = (struct hblk *)GC_heap_sects[i].hs_start;
308 struct hblk *endp1 = start + divHBLKSZ(GC_heap_sects[i].hs_bytes);
309
310 total += GC_number_stack_black_listed(start, endp1);
311 }
312 return total * HBLKSIZE;
313 }
314 #endif /* !NO_BLACK_LISTING */
315