1 //go:build gc.boehm
2 3 // This is the Boehm-Demers-Weiser conservative garbage collector, integrated
4 // into Moxie.
5 //
6 // Note that we use a special way of dealing with threads:
7 // * All calls to the bdwgc library are serialized using locks.
8 // * When the bdwgc library wants to push GC roots, all other threads that are
9 // running are stopped.
10 // * After returning from a bdwgc library call, the caller checks whether
11 // other threads were stopped (meaning a GC cycle happened) and resumes the
12 // world.
13 // This is not exactly the most efficient way to do this. We can likely speed
14 // things up by using bdwgc-native wrappers for starting/stopping threads (and
15 // also to resume the world while sweeping). Also, thread local allocation might
16 // help. But we don't do any of these right now, it is left as a possible future
17 // improvement.
18 19 package runtime
20 21 import (
22 "internal/gclayout"
23 "internal/task"
24 "unsafe"
25 )
26 27 const needsStaticHeap = false
28 29 // zeroSizedAlloc is just a sentinel that gets returned when allocating 0 bytes.
30 var zeroSizedAlloc uint8
31 32 var gcLock task.PMutex
33 34 func initHeap() {
35 libgc_init()
36 37 // Call GC_set_push_other_roots(gcCallback) in C because of function
38 // signature differences that do matter in WebAssembly.
39 gcInit()
40 }
41 42 //export moxie_runtime_bdwgc_init
43 func gcInit()
44 45 //export moxie_runtime_bdwgc_callback
46 func gcCallback() {
47 // Mark globals and all stacks, and stop the world if we're using threading.
48 gcMarkReachable()
49 }
50 51 func markRoots(start, end uintptr) {
52 libgc_push_all(start, end)
53 }
54 55 func markCurrentGoroutineStack(sp uintptr) {
56 // Only mark the area of the stack that is currently in use.
57 // (This doesn't work for other goroutines, but at least it doesn't keep
58 // more pointers alive than needed on the current stack).
59 base := libgc_base(sp)
60 if base == 0 { // && asserts
61 runtimePanic("goroutine stack not in a heap allocation?")
62 }
63 stackBottom := base + libgc_size(base)
64 libgc_push_all_stack(sp, stackBottom)
65 }
66 67 //go:noinline
68 func alloc(size uintptr, layout unsafe.Pointer) unsafe.Pointer {
69 if size == 0 {
70 return unsafe.Pointer(&zeroSizedAlloc)
71 }
72 73 gcLock.Lock()
74 var ptr unsafe.Pointer
75 if layout == gclayout.NoPtrs.AsPtr() {
76 // This object is entirely pointer free, for example make([]int, ...).
77 // Make sure the GC knows this so it doesn't scan the object
78 // unnecessarily to improve performance.
79 ptr = libgc_malloc_atomic(size)
80 // Memory returned from libgc_malloc_atomic has not been zeroed so we
81 // have to do that manually.
82 memzero(ptr, size)
83 } else {
84 // TODO: bdwgc supports typed allocations, which could be useful to
85 // implement a mostly-precise GC.
86 ptr = libgc_malloc(size)
87 // Memory returned from libgc_malloc has already been zeroed, so nothing
88 // to do here.
89 }
90 gcResumeWorld()
91 gcLock.Unlock()
92 if ptr == nil {
93 runtimePanic("gc: out of memory")
94 }
95 96 return ptr
97 }
98 99 func free(ptr unsafe.Pointer) {
100 libgc_free(ptr)
101 }
102 103 func GC() {
104 gcLock.Lock()
105 libgc_gcollect()
106 gcResumeWorld()
107 gcLock.Unlock()
108 }
109 110 // This should be stack-allocated, but we don't currently have a good way of
111 // ensuring that happens.
112 var gcMemStats libgc_prof_stats
113 114 func ReadMemStats(m *MemStats) {
115 gcLock.Lock()
116 117 libgc_get_prof_stats(&gcMemStats, unsafe.Sizeof(gcMemStats))
118 119 // Fill in MemStats as well as we can, given the information that bdwgc
120 // provides to us.
121 m.HeapIdle = uint64(gcMemStats.free_bytes_full - gcMemStats.unmapped_bytes)
122 m.HeapInuse = uint64(gcMemStats.heapsize_full - gcMemStats.unmapped_bytes)
123 m.HeapReleased = uint64(gcMemStats.unmapped_bytes)
124 m.HeapSys = uint64(m.HeapInuse + m.HeapIdle)
125 m.GCSys = 0 // not provided by bdwgc
126 m.TotalAlloc = uint64(gcMemStats.allocd_bytes_before_gc + gcMemStats.bytes_allocd_since_gc)
127 m.Mallocs = 0 // not provided by bdwgc
128 m.Frees = 0 // not provided by bdwgc
129 m.Sys = uint64(gcMemStats.obtained_from_os_bytes)
130 131 gcLock.Unlock()
132 }
133 134 func setHeapEnd(newHeapEnd uintptr) {
135 runtimePanic("gc: did not expect setHeapEnd call")
136 }
137 138 func SetFinalizer(obj interface{}, finalizer interface{}) {
139 // Unimplemented.
140 // The GC *does* support finalization, so this could be added relatively
141 // easily I think.
142 }
143 144 //export GC_init
145 func libgc_init()
146 147 //export GC_malloc
148 func libgc_malloc(uintptr) unsafe.Pointer
149 150 //export GC_malloc_atomic
151 func libgc_malloc_atomic(uintptr) unsafe.Pointer
152 153 //export GC_free
154 func libgc_free(unsafe.Pointer)
155 156 //export GC_base
157 func libgc_base(ptr uintptr) uintptr
158 159 //export GC_size
160 func libgc_size(ptr uintptr) uintptr
161 162 //export GC_push_all
163 func libgc_push_all(bottom, top uintptr)
164 165 //export GC_push_all_stack
166 func libgc_push_all_stack(bottom, top uintptr)
167 168 //export GC_gcollect
169 func libgc_gcollect()
170 171 //export GC_get_prof_stats
172 func libgc_get_prof_stats(*libgc_prof_stats, uintptr) uintptr
173 174 //export GC_set_push_other_roots
175 func libgc_set_push_other_roots(unsafe.Pointer)
176 177 type libgc_prof_stats struct {
178 heapsize_full uintptr
179 free_bytes_full uintptr
180 unmapped_bytes uintptr
181 bytes_allocd_since_gc uintptr
182 allocd_bytes_before_gc uintptr
183 non_gc_bytes uintptr
184 gc_no uintptr
185 markers_m1 uintptr
186 bytes_reclaimed_since_gc uintptr
187 reclaimed_bytes_before_gc uintptr
188 expl_freed_bytes_since_gc uintptr
189 obtained_from_os_bytes uintptr
190 }
191