darwin_stop_world.c raw
1 /*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2010 by Hewlett-Packard Development Company.
6 * All rights reserved.
7 * Copyright (c) 2008-2022 Ivan Maidanski
8 *
9 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
10 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
11 *
12 * Permission is hereby granted to use or copy this program
13 * for any purpose, provided the above notices are retained on all copies.
14 * Permission to modify the code and to distribute modified code is granted,
15 * provided the above notices are retained, and a notice that the code was
16 * modified is included with the above copyright notice.
17 */
18
19 #include "private/pthread_support.h"
20
21 /* This probably needs more porting work to ppc64. */
22
23 #if defined(DARWIN) && defined(THREADS)
24
25 # include <mach/machine.h>
26 # include <sys/sysctl.h>
27
28 # if defined(ARM32) && defined(ARM_THREAD_STATE32)
29 # include <CoreFoundation/CoreFoundation.h>
30 # endif
31
32 /*
33 * From "Inside Mac OS X - Mach-O Runtime Architecture" published by Apple:
34 * - Page 49: The space beneath the stack pointer, where a new stack frame
35 * would normally be allocated, is called the red zone. This area as
36 * shown in Figure 3-2 may be used for any purpose as long as a new stack
37 * frame does not need to be added to the stack.
38 * - Page 50: If a leaf procedure's red zone usage would exceed 224 bytes,
39 * then it must set up a stack frame just like routines that call other
40 * routines.
41 */
42 # ifdef POWERPC
43 # if CPP_WORDSZ == 32
44 # define PPC_RED_ZONE_SIZE 224
45 # elif CPP_WORDSZ == 64
46 # define PPC_RED_ZONE_SIZE 320
47 # endif
48 # endif
49
50 # ifndef DARWIN_DONT_PARSE_STACK
51
52 typedef struct StackFrame {
53 unsigned long savedSP;
54 unsigned long savedCR;
55 unsigned long savedLR;
56 /* `unsigned long reserved[2];` */
57 /* `unsigned long savedRTOC;` */
58 } StackFrame;
59
60 GC_INNER ptr_t
61 GC_FindTopOfStack(unsigned long stack_start)
62 {
63 StackFrame *frame = (StackFrame *)MAKE_CPTR(stack_start);
64
65 if (NULL == frame) {
66 # ifdef POWERPC
67 # if CPP_WORDSZ == 32
68 __asm__ __volatile__("lwz %0,0(r1)" : "=r"(frame));
69 # else
70 __asm__ __volatile__("ld %0,0(r1)" : "=r"(frame));
71 # endif
72 # elif defined(ARM32)
73 volatile ptr_t sp_reg;
74
75 __asm__ __volatile__("mov %0, r7\n" : "=r"(sp_reg));
76 frame = (/* no volatile */ StackFrame *)sp_reg;
77 # elif defined(AARCH64)
78 volatile ptr_t sp_reg;
79
80 __asm__ __volatile__("mov %0, x29\n" : "=r"(sp_reg));
81 frame = (/* no volatile */ StackFrame *)sp_reg;
82 # else
83 # if defined(CPPCHECK)
84 GC_noop1_ptr(&frame);
85 # endif
86 ABORT("GC_FindTopOfStack(0) is not implemented");
87 # endif
88 }
89
90 # ifdef DEBUG_THREADS_EXTRA
91 GC_log_printf("FindTopOfStack start at sp= %p\n", (void *)frame);
92 # endif
93 while (frame->savedSP != 0) { /*< stop if no more stack frames */
94 unsigned long maskedLR;
95
96 # ifdef CPPCHECK
97 GC_noop1(frame->savedCR);
98 # endif
99 frame = (StackFrame *)MAKE_CPTR(frame->savedSP);
100
101 /*
102 * We do these next two checks after going to the next frame because
103 * the `savedLR` for the first stack frame in the loop is not set up
104 * on purpose, so we should not check it.
105 */
106 maskedLR = frame->savedLR & ~0x3UL;
107 if (0 == maskedLR || ~0x3UL == maskedLR) {
108 /* The next `savedLR` is bogus, stop. */
109 break;
110 }
111 }
112 # ifdef DEBUG_THREADS_EXTRA
113 GC_log_printf("FindTopOfStack finish at sp= %p\n", (void *)frame);
114 # endif
115 return (ptr_t)frame;
116 }
117
118 # endif /* !DARWIN_DONT_PARSE_STACK */
119
120 /*
121 * `GC_query_task_threads` controls whether to obtain the list of the
122 * threads from the kernel or to use `GC_threads` table.
123 */
124 # ifdef GC_NO_THREADS_DISCOVERY
125 # define GC_query_task_threads FALSE
126 # elif defined(GC_DISCOVER_TASK_THREADS)
127 # define GC_query_task_threads TRUE
128 # else
129 STATIC GC_bool GC_query_task_threads = FALSE;
130 # endif /* !GC_NO_THREADS_DISCOVERY */
131
132 GC_API void GC_CALL
133 GC_use_threads_discovery(void)
134 {
135 # ifdef GC_NO_THREADS_DISCOVERY
136 ABORT("Darwin task-threads-based stop and push unsupported");
137 # else
138 # ifndef GC_ALWAYS_MULTITHREADED
139 GC_ASSERT(!GC_need_to_lock);
140 # endif
141 # ifndef GC_DISCOVER_TASK_THREADS
142 GC_query_task_threads = TRUE;
143 # endif
144 GC_init();
145 # endif
146 }
147
148 # ifndef kCFCoreFoundationVersionNumber_iOS_8_0
149 # define kCFCoreFoundationVersionNumber_iOS_8_0 1140.1
150 # endif
151
152 # define THREAD_ACT_TO_VPTR(t) THREAD_ID_TO_VPTR(t)
153 # define MACH_PORT_TO_VPTR(t) THREAD_ID_TO_VPTR(t)
154
155 /*
156 * Evaluates the stack range for a given thread. Returns the lower bound
157 * and sets `*phi` to the upper one. Sets `*pfound_me` to `TRUE` if this
158 * is the current thread, otherwise the value is not changed.
159 */
160 STATIC ptr_t
161 GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
162 mach_port_t my_thread, ptr_t *paltstack_lo,
163 ptr_t *paltstack_hi, GC_bool *pfound_me)
164 {
165 # ifdef DARWIN_DONT_PARSE_STACK
166 GC_stack_context_t crtn;
167 # endif
168 ptr_t lo;
169
170 GC_ASSERT(I_HOLD_LOCK());
171 if (thread == my_thread) {
172 GC_ASSERT(NULL == p || (p->flags & DO_BLOCKING) == 0);
173 lo = GC_approx_sp();
174 # ifndef DARWIN_DONT_PARSE_STACK
175 *phi = GC_FindTopOfStack(0);
176 # endif
177 *pfound_me = TRUE;
178 } else if (p != NULL && (p->flags & DO_BLOCKING) != 0) {
179 lo = p->crtn->stack_ptr;
180 # ifndef DARWIN_DONT_PARSE_STACK
181 *phi = p->crtn->topOfStack;
182 # endif
183
184 } else {
185 /*
186 * `MACHINE_THREAD_STATE_COUNT` does not seem to be defined everywhere.
187 * Hence we use our own variant. Alternatively, we could use
188 * `THREAD_STATE_MAX` (but seems to be not optimal).
189 */
190 kern_return_t kern_result;
191 GC_THREAD_STATE_T state;
192
193 # if defined(ARM32) && defined(ARM_THREAD_STATE32)
194 /*
195 * Use `ARM_UNIFIED_THREAD_STATE` on iOS8+ 32-bit targets and on
196 * 64-bit H/W (iOS7+ 32-bit mode).
197 */
198 size_t size;
199 static cpu_type_t cputype = 0;
200
201 if (cputype == 0) {
202 sysctlbyname("hw.cputype", &cputype, &size, NULL, 0);
203 }
204 if (cputype == CPU_TYPE_ARM64
205 || kCFCoreFoundationVersionNumber
206 >= kCFCoreFoundationVersionNumber_iOS_8_0) {
207 arm_unified_thread_state_t unified_state;
208 mach_msg_type_number_t unified_thread_state_count
209 = ARM_UNIFIED_THREAD_STATE_COUNT;
210 # if defined(CPPCHECK)
211 # define GC_ARM_UNIFIED_THREAD_STATE 1
212 # else
213 # define GC_ARM_UNIFIED_THREAD_STATE ARM_UNIFIED_THREAD_STATE
214 # endif
215 kern_result = thread_get_state(thread, GC_ARM_UNIFIED_THREAD_STATE,
216 (natural_t *)&unified_state,
217 &unified_thread_state_count);
218 # if !defined(CPPCHECK)
219 if (unified_state.ash.flavor != ARM_THREAD_STATE32) {
220 ABORT("unified_state flavor should be ARM_THREAD_STATE32");
221 }
222 # endif
223 state = unified_state;
224 } else
225 # endif
226 /* else */ {
227 mach_msg_type_number_t thread_state_count = GC_MACH_THREAD_STATE_COUNT;
228
229 /* Get the thread state (registers, etc.). */
230 do {
231 kern_result
232 = thread_get_state(thread, GC_MACH_THREAD_STATE,
233 (natural_t *)&state, &thread_state_count);
234 } while (kern_result == KERN_ABORTED);
235 }
236 # ifdef DEBUG_THREADS
237 GC_log_printf("thread_get_state returns %d\n", kern_result);
238 # endif
239 if (kern_result != KERN_SUCCESS)
240 ABORT("thread_get_state failed");
241
242 # if defined(I386)
243 lo = (ptr_t)state.THREAD_FLD(esp);
244 # ifndef DARWIN_DONT_PARSE_STACK
245 *phi = GC_FindTopOfStack(state.THREAD_FLD(esp));
246 # endif
247 GC_push_one(state.THREAD_FLD(eax));
248 GC_push_one(state.THREAD_FLD(ebx));
249 GC_push_one(state.THREAD_FLD(ecx));
250 GC_push_one(state.THREAD_FLD(edx));
251 GC_push_one(state.THREAD_FLD(edi));
252 GC_push_one(state.THREAD_FLD(esi));
253 GC_push_one(state.THREAD_FLD(ebp));
254
255 # elif defined(X86_64)
256 lo = (ptr_t)state.THREAD_FLD(rsp);
257 # ifndef DARWIN_DONT_PARSE_STACK
258 *phi = GC_FindTopOfStack(state.THREAD_FLD(rsp));
259 # endif
260 GC_push_one(state.THREAD_FLD(rax));
261 GC_push_one(state.THREAD_FLD(rbx));
262 GC_push_one(state.THREAD_FLD(rcx));
263 GC_push_one(state.THREAD_FLD(rdx));
264 GC_push_one(state.THREAD_FLD(rdi));
265 GC_push_one(state.THREAD_FLD(rsi));
266 GC_push_one(state.THREAD_FLD(rbp));
267 /* `rsp` is skipped. */
268 GC_push_one(state.THREAD_FLD(r8));
269 GC_push_one(state.THREAD_FLD(r9));
270 GC_push_one(state.THREAD_FLD(r10));
271 GC_push_one(state.THREAD_FLD(r11));
272 GC_push_one(state.THREAD_FLD(r12));
273 GC_push_one(state.THREAD_FLD(r13));
274 GC_push_one(state.THREAD_FLD(r14));
275 GC_push_one(state.THREAD_FLD(r15));
276
277 # elif defined(POWERPC)
278 lo = (ptr_t)(state.THREAD_FLD(r1) - PPC_RED_ZONE_SIZE);
279 # ifndef DARWIN_DONT_PARSE_STACK
280 *phi = GC_FindTopOfStack(state.THREAD_FLD(r1));
281 # endif
282 GC_push_one(state.THREAD_FLD(r0));
283 /* `r1` is skipped. */
284 GC_push_one(state.THREAD_FLD(r2));
285 GC_push_one(state.THREAD_FLD(r3));
286 GC_push_one(state.THREAD_FLD(r4));
287 GC_push_one(state.THREAD_FLD(r5));
288 GC_push_one(state.THREAD_FLD(r6));
289 GC_push_one(state.THREAD_FLD(r7));
290 GC_push_one(state.THREAD_FLD(r8));
291 GC_push_one(state.THREAD_FLD(r9));
292 GC_push_one(state.THREAD_FLD(r10));
293 GC_push_one(state.THREAD_FLD(r11));
294 GC_push_one(state.THREAD_FLD(r12));
295 GC_push_one(state.THREAD_FLD(r13));
296 GC_push_one(state.THREAD_FLD(r14));
297 GC_push_one(state.THREAD_FLD(r15));
298 GC_push_one(state.THREAD_FLD(r16));
299 GC_push_one(state.THREAD_FLD(r17));
300 GC_push_one(state.THREAD_FLD(r18));
301 GC_push_one(state.THREAD_FLD(r19));
302 GC_push_one(state.THREAD_FLD(r20));
303 GC_push_one(state.THREAD_FLD(r21));
304 GC_push_one(state.THREAD_FLD(r22));
305 GC_push_one(state.THREAD_FLD(r23));
306 GC_push_one(state.THREAD_FLD(r24));
307 GC_push_one(state.THREAD_FLD(r25));
308 GC_push_one(state.THREAD_FLD(r26));
309 GC_push_one(state.THREAD_FLD(r27));
310 GC_push_one(state.THREAD_FLD(r28));
311 GC_push_one(state.THREAD_FLD(r29));
312 GC_push_one(state.THREAD_FLD(r30));
313 GC_push_one(state.THREAD_FLD(r31));
314
315 # elif defined(ARM32)
316 lo = (ptr_t)state.THREAD_FLD(sp);
317 # ifndef DARWIN_DONT_PARSE_STACK
318 *phi = GC_FindTopOfStack(state.THREAD_FLD(r[7])); /*< `fp` */
319 # endif
320 {
321 int j;
322 for (j = 0; j < 7; j++)
323 GC_push_one(state.THREAD_FLD(r[j]));
324 /* Skip `r7` (because iOS uses it as a frame pointer). */
325 j++;
326 for (; j <= 12; j++)
327 GC_push_one(state.THREAD_FLD(r[j]));
328 }
329 /* `cpsr`, `pc` and `sp` are skipped. */
330 GC_push_one(state.THREAD_FLD(lr));
331
332 # elif defined(AARCH64)
333 lo = (ptr_t)state.THREAD_FLD(sp);
334 # ifndef DARWIN_DONT_PARSE_STACK
335 *phi = GC_FindTopOfStack(state.THREAD_FLD(fp));
336 # endif
337 {
338 int j;
339 for (j = 0; j <= 28; j++) {
340 GC_push_one(state.THREAD_FLD(x[j]));
341 }
342 }
343 /* `cpsr`, `fp`, `pc` and `sp` are skipped. */
344 GC_push_one(state.THREAD_FLD(lr));
345
346 # elif defined(CPPCHECK)
347 lo = NULL;
348 # else
349 # error FIXME for non-arm/ppc/x86 architectures
350 # endif
351 }
352
353 # ifndef DARWIN_DONT_PARSE_STACK
354 /* TODO: Determine `p` and handle `altstack` if `!DARWIN_DONT_PARSE_STACK` */
355 UNUSED_ARG(paltstack_hi);
356 # else
357 /*
358 * `p` is guaranteed to be non-`NULL` regardless of
359 * `GC_query_task_threads`.
360 */
361 # ifdef CPPCHECK
362 if (NULL == p)
363 ABORT("Bad GC_stack_range_for call");
364 # endif
365 crtn = p->crtn;
366 *phi = crtn->stack_end;
367 if (crtn->altstack != NULL && ADDR_GE(lo, crtn->altstack)
368 && ADDR_GE(crtn->altstack + crtn->altstack_size, lo)) {
369 *paltstack_lo = lo;
370 *paltstack_hi = crtn->altstack + crtn->altstack_size;
371 lo = crtn->normstack;
372 *phi = lo + crtn->normstack_size;
373 } else
374 # endif
375 /* else */ {
376 *paltstack_lo = NULL;
377 }
378 # if defined(STACKPTR_CORRECTOR_AVAILABLE) && defined(DARWIN_DONT_PARSE_STACK)
379 if (GC_sp_corrector != 0)
380 GC_sp_corrector((void **)&lo, THREAD_ID_TO_VPTR(p->id));
381 # endif
382 # ifdef DEBUG_THREADS
383 GC_log_printf("Darwin: Stack for thread %p is [%p,%p)\n",
384 THREAD_ACT_TO_VPTR(thread), (void *)lo, (void *)*phi);
385 # endif
386 return lo;
387 }
388
389 GC_INNER void
390 GC_push_all_stacks(void)
391 {
392 ptr_t hi, altstack_lo, altstack_hi;
393 task_t my_task = current_task();
394 mach_port_t my_thread = mach_thread_self();
395 GC_bool found_me = FALSE;
396 int nthreads = 0;
397 word total_size = 0;
398
399 GC_ASSERT(I_HOLD_LOCK());
400 GC_ASSERT(GC_thr_initialized);
401
402 # ifndef DARWIN_DONT_PARSE_STACK
403 if (GC_query_task_threads) {
404 int i;
405 kern_return_t kern_result;
406 thread_act_array_t act_list;
407 mach_msg_type_number_t listcount;
408
409 /* Obtain the list of the threads from the kernel. */
410 kern_result = task_threads(my_task, &act_list, &listcount);
411 if (kern_result != KERN_SUCCESS)
412 ABORT("task_threads failed");
413
414 for (i = 0; i < (int)listcount; i++) {
415 thread_act_t thread = act_list[i];
416 ptr_t lo = GC_stack_range_for(&hi, thread, NULL, my_thread, &altstack_lo,
417 &altstack_hi, &found_me);
418
419 if (lo) {
420 GC_ASSERT(ADDR_GE(hi, lo));
421 total_size += hi - lo;
422 GC_push_all_stack(lo, hi);
423 }
424 /* TODO: Handle `altstack`. */
425 nthreads++;
426 mach_port_deallocate(my_task, thread);
427 }
428
429 vm_deallocate(my_task, (vm_address_t)act_list,
430 sizeof(thread_t) * listcount);
431 } else
432 # endif /* !DARWIN_DONT_PARSE_STACK */
433 /* else */ {
434 int i;
435
436 for (i = 0; i < THREAD_TABLE_SZ; i++) {
437 GC_thread p;
438
439 for (p = GC_threads[i]; p != NULL; p = p->tm.next) {
440 GC_ASSERT(THREAD_TABLE_INDEX(p->id) == i);
441 if (!KNOWN_FINISHED(p)) {
442 thread_act_t thread = (thread_act_t)(p->mach_thread);
443 ptr_t lo = GC_stack_range_for(&hi, thread, p, my_thread,
444 &altstack_lo, &altstack_hi, &found_me);
445
446 if (lo) {
447 GC_ASSERT(ADDR_GE(hi, lo));
448 total_size += hi - lo;
449 GC_push_all_stack_sections(lo, hi, p->crtn->traced_stack_sect);
450 }
451 if (altstack_lo) {
452 total_size += altstack_hi - altstack_lo;
453 GC_push_all_stack(altstack_lo, altstack_hi);
454 }
455 nthreads++;
456 }
457 }
458 }
459 }
460
461 mach_port_deallocate(my_task, my_thread);
462 GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks\n", nthreads);
463 if (!found_me && !GC_in_thread_creation)
464 ABORT("Collecting from unknown thread");
465 GC_total_stacksize = total_size;
466 }
467
468 # ifndef GC_NO_THREADS_DISCOVERY
469
470 # ifdef MPROTECT_VDB
471 STATIC mach_port_t GC_mach_handler_thread = 0;
472 STATIC GC_bool GC_use_mach_handler_thread = FALSE;
473
474 GC_INNER void
475 GC_darwin_register_self_mach_handler(void)
476 {
477 GC_mach_handler_thread = mach_thread_self();
478 GC_use_mach_handler_thread = TRUE;
479 }
480 # endif /* MPROTECT_VDB */
481
482 # ifndef GC_MAX_MACH_THREADS
483 # define GC_MAX_MACH_THREADS THREAD_TABLE_SZ
484 # endif
485
486 struct GC_mach_thread {
487 thread_act_t thread;
488 GC_bool suspended;
489 };
490
491 struct GC_mach_thread GC_mach_threads[GC_MAX_MACH_THREADS];
492 STATIC int GC_mach_threads_count = 0;
493 /* FIXME: It is better to implement `GC_mach_threads` as a hash set. */
494
495 /*
496 * Return `TRUE` if there is a thread in `act_list` that was not
497 * in `old_list`.
498 */
499 STATIC GC_bool
500 GC_suspend_thread_list(thread_act_array_t act_list, int count,
501 thread_act_array_t old_list, int old_count,
502 task_t my_task, mach_port_t my_thread)
503 {
504 int i;
505 int j = -1;
506 GC_bool changed = FALSE;
507
508 GC_ASSERT(I_HOLD_LOCK());
509 for (i = 0; i < count; i++) {
510 thread_act_t thread = act_list[i];
511 GC_bool found;
512 kern_return_t kern_result;
513
514 if (thread == my_thread
515 # ifdef MPROTECT_VDB
516 || (GC_mach_handler_thread == thread && GC_use_mach_handler_thread)
517 # endif
518 # ifdef PARALLEL_MARK
519 || GC_is_mach_marker(thread) /*< ignore the parallel markers */
520 # endif
521 ) {
522 /*
523 * Do not add our one, parallel marker and the handler threads;
524 * consider it as found (e.g., it was processed earlier).
525 */
526 mach_port_deallocate(my_task, thread);
527 continue;
528 }
529
530 /* Find the current thread in the old list. */
531 found = FALSE;
532 {
533 /* The previous found thread index. */
534 int last_found = j;
535
536 /* Search for the thread starting from the last found one first. */
537 while (++j < old_count)
538 if (old_list[j] == thread) {
539 found = TRUE;
540 break;
541 }
542 if (!found) {
543 /* If not found, search in the rest (beginning) of the list. */
544 for (j = 0; j < last_found; j++)
545 if (old_list[j] == thread) {
546 found = TRUE;
547 break;
548 }
549 }
550 }
551
552 if (found) {
553 /* It is already in the list, skip processing, release `mach` port. */
554 mach_port_deallocate(my_task, thread);
555 continue;
556 }
557
558 /* Add it to the `GC_mach_threads` list. */
559 if (GC_mach_threads_count == GC_MAX_MACH_THREADS)
560 ABORT("Too many threads");
561 GC_mach_threads[GC_mach_threads_count].thread = thread;
562 /* The default is "not suspended". */
563 GC_mach_threads[GC_mach_threads_count].suspended = FALSE;
564 changed = TRUE;
565
566 # ifdef DEBUG_THREADS
567 GC_log_printf("Suspending %p\n", THREAD_ACT_TO_VPTR(thread));
568 # endif
569 /*
570 * Unconditionally suspend the thread. It will do no harm if it is
571 * already suspended by the client logic.
572 */
573 GC_acquire_dirty_lock();
574 do {
575 kern_result = thread_suspend(thread);
576 } while (kern_result == KERN_ABORTED);
577 GC_release_dirty_lock();
578 if (kern_result != KERN_SUCCESS) {
579 /*
580 * The thread may have quit since the `thread_threads()` call we
581 * mark as already suspended so it is not dealt with anymore later.
582 */
583 GC_mach_threads[GC_mach_threads_count].suspended = FALSE;
584 } else {
585 /* Mark the thread as suspended and require resume. */
586 GC_mach_threads[GC_mach_threads_count].suspended = TRUE;
587 if (GC_on_thread_event)
588 GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,
589 THREAD_ACT_TO_VPTR(thread));
590 }
591 GC_mach_threads_count++;
592 }
593 return changed;
594 }
595
596 # endif /* !GC_NO_THREADS_DISCOVERY */
597
598 GC_INNER void
599 GC_stop_world(void)
600 {
601 task_t my_task = current_task();
602 mach_port_t my_thread = mach_thread_self();
603 kern_return_t kern_result;
604
605 GC_ASSERT(I_HOLD_LOCK());
606 GC_ASSERT(GC_thr_initialized);
607 # ifdef DEBUG_THREADS
608 GC_log_printf("Stopping the world from thread %p\n",
609 MACH_PORT_TO_VPTR(my_thread));
610 # endif
611 # ifdef PARALLEL_MARK
612 if (GC_parallel) {
613 GC_acquire_mark_lock();
614 /* We should have previously waited for it to become zero. */
615 GC_ASSERT(GC_fl_builder_count == 0);
616 }
617 # endif /* PARALLEL_MARK */
618
619 if (GC_query_task_threads) {
620 # ifndef GC_NO_THREADS_DISCOVERY
621 GC_bool changed;
622 thread_act_array_t act_list, prev_list;
623 mach_msg_type_number_t listcount, prevcount;
624
625 /*
626 * Clear out the `mach` threads list table. We do not need to really
627 * clear `GC_mach_threads[]` as it is used only in the range from 0 to
628 * `GC_mach_threads_count - 1`, inclusive.
629 */
630 GC_mach_threads_count = 0;
631
632 /*
633 * Loop stopping threads until you have gone over the whole list twice
634 * without a new one appearing. `thread_create()` will not return (and
635 * thus the thread stop) until the new thread exists, so there is
636 * no window whereby you could stop a thread, recognize it is stopped,
637 * but then have a new thread it created before stopping shows up later.
638 */
639 changed = TRUE;
640 prev_list = NULL;
641 prevcount = 0;
642 do {
643 kern_result = task_threads(my_task, &act_list, &listcount);
644
645 if (kern_result == KERN_SUCCESS) {
646 changed = GC_suspend_thread_list(act_list, listcount, prev_list,
647 prevcount, my_task, my_thread);
648
649 if (prev_list != NULL) {
650 /*
651 * Thread ports are not deallocated by list, unused ports
652 * deallocated in `GC_suspend_thread_list`, used ones - kept
653 * in `GC_mach_threads` till `GC_start_world` as otherwise thread
654 * object change can occur and `GC_start_world` will not find the
655 * thread to resume which will cause application to hang.
656 */
657 vm_deallocate(my_task, (vm_address_t)prev_list,
658 sizeof(thread_t) * prevcount);
659 }
660
661 /* Repeat while having changes. */
662 prev_list = act_list;
663 prevcount = listcount;
664 }
665 } while (changed);
666
667 GC_ASSERT(prev_list != 0);
668 /* The thread ports are not deallocated by list, see above. */
669 vm_deallocate(my_task, (vm_address_t)act_list,
670 sizeof(thread_t) * listcount);
671 # endif /* !GC_NO_THREADS_DISCOVERY */
672
673 } else {
674 unsigned i;
675
676 for (i = 0; i < THREAD_TABLE_SZ; i++) {
677 GC_thread p;
678
679 for (p = GC_threads[i]; p != NULL; p = p->tm.next) {
680 if ((p->flags & (FINISHED | DO_BLOCKING)) == 0
681 && p->mach_thread != my_thread) {
682 GC_acquire_dirty_lock();
683 do {
684 kern_result = thread_suspend(p->mach_thread);
685 } while (kern_result == KERN_ABORTED);
686 GC_release_dirty_lock();
687 if (kern_result != KERN_SUCCESS)
688 ABORT("thread_suspend failed");
689 if (GC_on_thread_event)
690 GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,
691 MACH_PORT_TO_VPTR(p->mach_thread));
692 }
693 }
694 }
695 }
696
697 # ifdef MPROTECT_VDB
698 if (GC_auto_incremental) {
699 GC_mprotect_stop();
700 }
701 # endif
702 # ifdef PARALLEL_MARK
703 if (GC_parallel)
704 GC_release_mark_lock();
705 # endif
706
707 # ifdef DEBUG_THREADS
708 GC_log_printf("World stopped from %p\n", MACH_PORT_TO_VPTR(my_thread));
709 # endif
710 mach_port_deallocate(my_task, my_thread);
711 }
712
713 GC_INLINE void
714 GC_thread_resume(thread_act_t thread)
715 {
716 kern_return_t kern_result;
717 # if defined(DEBUG_THREADS) || defined(GC_ASSERTIONS)
718 struct thread_basic_info info;
719 mach_msg_type_number_t outCount = THREAD_BASIC_INFO_COUNT;
720
721 # ifdef CPPCHECK
722 info.run_state = 0;
723 # endif
724 kern_result = thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info,
725 &outCount);
726 if (kern_result != KERN_SUCCESS)
727 ABORT("thread_info failed");
728 # endif
729 GC_ASSERT(I_HOLD_LOCK());
730 # ifdef DEBUG_THREADS
731 GC_log_printf("Resuming thread %p with state %d\n",
732 THREAD_ACT_TO_VPTR(thread), info.run_state);
733 # endif
734 /* Resume the thread. */
735 kern_result = thread_resume(thread);
736 if (kern_result != KERN_SUCCESS) {
737 WARN("thread_resume(%p) failed: mach port invalid\n", thread);
738 } else if (GC_on_thread_event) {
739 GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,
740 THREAD_ACT_TO_VPTR(thread));
741 }
742 }
743
744 GC_INNER void
745 GC_start_world(void)
746 {
747 task_t my_task = current_task();
748
749 /* The allocator lock is held continuously since the world stopped. */
750 GC_ASSERT(I_HOLD_LOCK());
751 # ifdef DEBUG_THREADS
752 GC_log_printf("World starting\n");
753 # endif
754 # ifdef MPROTECT_VDB
755 if (GC_auto_incremental) {
756 GC_mprotect_resume();
757 }
758 # endif
759
760 if (GC_query_task_threads) {
761 # ifndef GC_NO_THREADS_DISCOVERY
762 int i, j;
763 kern_return_t kern_result;
764 thread_act_array_t act_list;
765 mach_msg_type_number_t listcount;
766
767 kern_result = task_threads(my_task, &act_list, &listcount);
768 if (kern_result != KERN_SUCCESS)
769 ABORT("task_threads failed");
770
771 j = (int)listcount;
772 for (i = 0; i < GC_mach_threads_count; i++) {
773 thread_act_t thread = GC_mach_threads[i].thread;
774
775 if (GC_mach_threads[i].suspended) {
776 /*
777 * The thread index found during the previous iteration
778 * (reaching `listcount` value means no thread found yet).
779 */
780 int last_found = j;
781
782 /* Search for the thread starting from the last found one first. */
783 while (++j < (int)listcount) {
784 if (act_list[j] == thread)
785 break;
786 }
787 if (j >= (int)listcount) {
788 /* If not found, search in the rest (beginning) of the list. */
789 for (j = 0; j < last_found; j++) {
790 if (act_list[j] == thread)
791 break;
792 }
793 }
794 if (j != last_found) {
795 /* The thread is alive, resume it. */
796 GC_thread_resume(thread);
797 }
798 } else {
799 /*
800 * This thread has failed to be suspended by `GC_stop_world`,
801 * no action is needed.
802 */
803 # ifdef DEBUG_THREADS
804 GC_log_printf("Not resuming thread %p as it is not suspended\n",
805 THREAD_ACT_TO_VPTR(thread));
806 # endif
807 }
808 mach_port_deallocate(my_task, thread);
809 }
810
811 for (i = 0; i < (int)listcount; i++)
812 mach_port_deallocate(my_task, act_list[i]);
813 vm_deallocate(my_task, (vm_address_t)act_list,
814 sizeof(thread_t) * listcount);
815 # endif /* !GC_NO_THREADS_DISCOVERY */
816
817 } else {
818 int i;
819 mach_port_t my_thread = mach_thread_self();
820
821 for (i = 0; i < THREAD_TABLE_SZ; i++) {
822 GC_thread p;
823
824 for (p = GC_threads[i]; p != NULL; p = p->tm.next) {
825 if ((p->flags & (FINISHED | DO_BLOCKING)) == 0
826 && p->mach_thread != my_thread)
827 GC_thread_resume(p->mach_thread);
828 }
829 }
830
831 mach_port_deallocate(my_task, my_thread);
832 }
833
834 # ifdef DEBUG_THREADS
835 GC_log_printf("World started\n");
836 # endif
837 }
838
839 #endif /* DARWIN && THREADS */
840