1 package compiler
2 3 // This file implements the 'defer' keyword in Go.
4 // Defer statements are implemented by transforming the function in the
5 // following way:
6 // * Creating an alloca in the entry block that contains a pointer (initially
7 // null) to the linked list of defer frames.
8 // * Every time a defer statement is executed, a new defer frame is created
9 // using alloca with a pointer to the previous defer frame, and the head
10 // pointer in the entry block is replaced with a pointer to this defer
11 // frame.
12 // * On return, runtime.rundefers is called which calls all deferred functions
13 // from the head of the linked list until it has gone through all defer
14 // frames.
15 16 import (
17 "go/types"
18 "strconv"
19 "strings"
20 21 "moxie/compiler/llvmutil"
22 "golang.org/x/tools/go/ssa"
23 "tinygo.org/x/go-llvm"
24 )
25 26 // supportsRecover returns whether the compiler supports the recover() builtin
27 // for the current architecture.
28 func (b *builder) supportsRecover() bool {
29 switch b.archFamily() {
30 case "wasm32":
31 // Probably needs to be implemented using the exception handling
32 // proposal of WebAssembly:
33 // https://github.com/WebAssembly/exception-handling
34 return false
35 case "riscv64", "xtensa":
36 // TODO: add support for these architectures
37 return false
38 default:
39 return true
40 }
41 }
42 43 // hasDeferFrame returns whether the current function needs to catch panics and
44 // run defers.
45 func (b *builder) hasDeferFrame() bool {
46 if b.fn.Recover == nil {
47 return false
48 }
49 return b.supportsRecover()
50 }
51 52 // deferInitFunc sets up this function for future deferred calls. It must be
53 // called from within the entry block when this function contains deferred
54 // calls.
55 func (b *builder) deferInitFunc() {
56 // Some setup.
57 b.deferFuncs = make(map[*ssa.Function]int)
58 b.deferInvokeFuncs = make(map[string]int)
59 b.deferClosureFuncs = make(map[*ssa.Function]int)
60 b.deferExprFuncs = make(map[ssa.Value]int)
61 b.deferBuiltinFuncs = make(map[ssa.Value]deferBuiltin)
62 63 // Create defer list pointer.
64 b.deferPtr = b.CreateAlloca(b.dataPtrType, "deferPtr")
65 b.CreateStore(llvm.ConstPointerNull(b.dataPtrType), b.deferPtr)
66 67 if b.hasDeferFrame() {
68 // Set up the defer frame with the current stack pointer.
69 // This assumes that the stack pointer doesn't move outside of the
70 // function prologue/epilogue (an invariant maintained by Moxie but
71 // possibly broken by the C alloca function).
72 // The frame pointer is _not_ saved, because it is marked as clobbered
73 // in the setjmp-like inline assembly.
74 deferFrameType := b.getLLVMRuntimeType("deferFrame")
75 b.deferFrame = b.CreateAlloca(deferFrameType, "deferframe.buf")
76 stackPointer := b.readStackPointer()
77 b.createRuntimeCall("setupDeferFrame", []llvm.Value{b.deferFrame, stackPointer}, "")
78 79 // Create the landing pad block, which is where control transfers after
80 // a panic.
81 b.landingpad = b.ctx.AddBasicBlock(b.llvmFn, "lpad")
82 }
83 }
84 85 // createLandingPad fills in the landing pad block. This block runs the deferred
86 // functions and returns (by jumping to the recover block). If the function is
87 // still panicking after the defers are run, the panic will be re-raised in
88 // destroyDeferFrame.
89 func (b *builder) createLandingPad() {
90 b.SetInsertPointAtEnd(b.landingpad)
91 92 // Add debug info, if needed.
93 // The location used is the closing bracket of the function.
94 if b.Debug {
95 pos := b.program.Fset.Position(b.fn.Syntax().End())
96 b.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), b.difunc, llvm.Metadata{})
97 }
98 99 b.createRunDefers()
100 101 // Continue at the 'recover' block, which returns to the parent in an
102 // appropriate way.
103 b.CreateBr(b.blockInfo[b.fn.Recover.Index].entry)
104 }
105 106 // Create a checkpoint (similar to setjmp). This emits inline assembly that
107 // stores the current program counter inside the ptr address (actually
108 // ptr+sizeof(ptr)) and then returns a boolean indicating whether this is the
109 // normal flow (false) or we jumped here from somewhere else (true).
110 func (b *builder) createCheckpoint(ptr llvm.Value) llvm.Value {
111 // Construct inline assembly equivalents of setjmp.
112 // The assembly works as follows:
113 // * All registers (both callee-saved and caller saved) are clobbered
114 // after the inline assembly returns.
115 // * The assembly stores the address just past the end of the assembly
116 // into the jump buffer.
117 // * The return value (eax, rax, r0, etc) is set to zero in the inline
118 // assembly but set to an unspecified non-zero value when jumping using
119 // a longjmp.
120 var asmString, constraints string
121 resultType := b.uintptrType
122 switch b.archFamily() {
123 case "i386":
124 asmString = `
125 xorl %eax, %eax
126 movl $$1f, 4(%ebx)
127 1:`
128 constraints = "={eax},{ebx},~{ebx},~{ecx},~{edx},~{esi},~{edi},~{ebp},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{fpsr},~{fpcr},~{flags},~{dirflag},~{memory}"
129 // This doesn't include the floating point stack because Moxie uses
130 // newer floating point instructions.
131 case "x86_64":
132 asmString = `
133 leaq 1f(%rip), %rax
134 movq %rax, 8(%rbx)
135 xorq %rax, %rax
136 1:`
137 constraints = "={rax},{rbx},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{fpsr},~{fpcr},~{flags},~{dirflag},~{memory}"
138 // This list doesn't include AVX/AVX512 registers because Moxie
139 // doesn't currently enable support for AVX instructions.
140 case "arm":
141 // Note: the following assembly takes into account that the PC is
142 // always 4 bytes ahead on ARM. The PC that is stored always points
143 // to the instruction just after the assembly fragment so that
144 // moxie_longjmp lands at the correct instruction.
145 if b.isThumb() {
146 // Instructions are 2 bytes in size.
147 asmString = `
148 movs r0, #0
149 mov r2, pc
150 str r2, [r1, #4]`
151 } else {
152 // Instructions are 4 bytes in size.
153 asmString = `
154 str pc, [r1, #4]
155 movs r0, #0`
156 }
157 constraints = "={r0},{r1},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{cpsr},~{memory}"
158 case "aarch64":
159 asmString = `
160 adr x2, 1f
161 str x2, [x1, #8]
162 mov x0, #0
163 1:
164 `
165 constraints = "={x0},{x1},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{lr},~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{nzcv},~{ffr},~{memory}"
166 if b.GOOS != "darwin" && b.GOOS != "windows" {
167 // These registers cause the following warning when compiling for
168 // MacOS and Windows:
169 // warning: inline asm clobber list contains reserved registers:
170 // X18, FP
171 // Reserved registers on the clobber list may not be preserved
172 // across the asm statement, and clobbering them may lead to
173 // undefined behaviour.
174 constraints += ",~{x18},~{fp}"
175 }
176 // TODO: SVE registers, which we don't use in Moxie at the moment.
177 case "avr":
178 // Note: the Y register (R28:R29) is a fixed register and therefore
179 // needs to be saved manually. TODO: do this only once per function with
180 // a defer frame, not for every call.
181 resultType = b.ctx.Int8Type()
182 asmString = `
183 ldi r24, pm_lo8(1f)
184 ldi r25, pm_hi8(1f)
185 std z+2, r24
186 std z+3, r25
187 std z+4, r28
188 std z+5, r29
189 ldi r24, 0
190 1:`
191 constraints = "={r24},z,~{r0},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r25},~{r26},~{r27}"
192 case "mips":
193 // $4 flag (zero or non-zero)
194 // $5 defer frame
195 asmString = `
196 .set noat
197 move $$4, $$zero
198 jal 1f
199 1:
200 addiu $$ra, 8
201 sw $$ra, 4($$5)
202 .set at`
203 constraints = "={$4},{$5},~{$1},~{$2},~{$3},~{$5},~{$6},~{$7},~{$8},~{$9},~{$10},~{$11},~{$12},~{$13},~{$14},~{$15},~{$16},~{$17},~{$18},~{$19},~{$20},~{$21},~{$22},~{$23},~{$24},~{$25},~{$26},~{$27},~{$28},~{$29},~{$30},~{$31},~{memory}"
204 if !strings.Contains(b.Features, "+soft-float") {
205 // Using floating point registers together with GOMIPS=softfloat
206 // results in a crash: "This value type is not natively supported!"
207 // So only add them when using hardfloat.
208 constraints += ",~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f13},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"
209 }
210 case "riscv32":
211 asmString = `
212 la a2, 1f
213 sw a2, 4(a1)
214 li a0, 0
215 1:`
216 constraints = "={a0},{a1},~{a1},~{a2},~{a3},~{a4},~{a5},~{a6},~{a7},~{s0},~{s1},~{s2},~{s3},~{s4},~{s5},~{s6},~{s7},~{s8},~{s9},~{s10},~{s11},~{t0},~{t1},~{t2},~{t3},~{t4},~{t5},~{t6},~{ra},~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31},~{memory}"
217 default:
218 // This case should have been handled by b.supportsRecover().
219 b.addError(b.fn.Pos(), "unknown architecture for defer: "+b.archFamily())
220 }
221 asmType := llvm.FunctionType(resultType, []llvm.Type{b.dataPtrType}, false)
222 asm := llvm.InlineAsm(asmType, asmString, constraints, false, false, 0, false)
223 result := b.CreateCall(asmType, asm, []llvm.Value{ptr}, "setjmp")
224 result.AddCallSiteAttribute(-1, b.ctx.CreateEnumAttribute(llvm.AttributeKindID("returns_twice"), 0))
225 isZero := b.CreateICmp(llvm.IntEQ, result, llvm.ConstInt(resultType, 0, false), "setjmp.result")
226 return isZero
227 }
228 229 // createInvokeCheckpoint saves the function state at the given point, to
230 // continue at the landing pad if a panic happened. This is implemented using a
231 // setjmp-like construct.
232 func (b *builder) createInvokeCheckpoint() {
233 isZero := b.createCheckpoint(b.deferFrame)
234 continueBB := b.insertBasicBlock("")
235 b.CreateCondBr(isZero, continueBB, b.landingpad)
236 b.SetInsertPointAtEnd(continueBB)
237 b.currentBlockInfo.exit = continueBB
238 }
239 240 // isInLoop checks if there is a path from the current block to itself.
241 // Use Tarjan's strongly connected components algorithm to search for cycles.
242 // A one-node SCC is a cycle iff there is an edge from the node to itself.
243 // A multi-node SCC is always a cycle.
244 // https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
245 func (b *builder) isInLoop() bool {
246 if b.currentBlockInfo.tarjan.lowLink == 0 {
247 b.strongConnect(b.currentBlock)
248 }
249 return b.currentBlockInfo.tarjan.cyclic
250 }
251 252 func (b *builder) strongConnect(block *ssa.BasicBlock) {
253 // Assign a new index.
254 // Indices start from 1 so that 0 can be used as a sentinel.
255 assignedIndex := b.tarjanIndex + 1
256 b.tarjanIndex = assignedIndex
257 258 // Apply the new index.
259 blockIndex := block.Index
260 node := &b.blockInfo[blockIndex].tarjan
261 node.lowLink = assignedIndex
262 263 // Push the node onto the stack.
264 node.onStack = true
265 b.tarjanStack = append(b.tarjanStack, uint(blockIndex))
266 267 // Process the successors.
268 for _, successor := range block.Succs {
269 // Look up the successor's state.
270 successorIndex := successor.Index
271 if successorIndex == blockIndex {
272 // Handle a self-cycle specially.
273 node.cyclic = true
274 continue
275 }
276 successorNode := &b.blockInfo[successorIndex].tarjan
277 278 switch {
279 case successorNode.lowLink == 0:
280 // This node has not yet been visisted.
281 b.strongConnect(successor)
282 283 case !successorNode.onStack:
284 // This node has been visited, but is in a different SCC.
285 // Ignore it, and do not update lowLink.
286 continue
287 }
288 289 // Update the lowLink index.
290 // This always uses the min-of-lowlink instead of using index in the on-stack case.
291 // This is done for two reasons:
292 // 1. The lowLink update can be shared between the new-node and on-stack cases.
293 // 2. The assigned index does not need to be saved - it is only needed for root node detection.
294 if successorNode.lowLink < node.lowLink {
295 node.lowLink = successorNode.lowLink
296 }
297 }
298 299 if node.lowLink == assignedIndex {
300 // This is a root node.
301 // Pop the SCC off the stack.
302 stack := b.tarjanStack
303 top := stack[len(stack)-1]
304 stack = stack[:len(stack)-1]
305 blocks := b.blockInfo
306 topNode := &blocks[top].tarjan
307 topNode.onStack = false
308 309 if top != uint(blockIndex) {
310 // The root node is not the only node in the SCC.
311 // Mark all nodes in this SCC as cyclic.
312 topNode.cyclic = true
313 for top != uint(blockIndex) {
314 top = stack[len(stack)-1]
315 stack = stack[:len(stack)-1]
316 topNode = &blocks[top].tarjan
317 topNode.onStack = false
318 topNode.cyclic = true
319 }
320 }
321 322 b.tarjanStack = stack
323 }
324 }
325 326 // tarjanNode holds per-block state for isInLoop and strongConnect.
327 type tarjanNode struct {
328 // lowLink is the index of the first visited node that is reachable from this block.
329 // The lowLink indices are assigned by the SCC search, and do not correspond to b.Index.
330 // A lowLink of 0 is used as a sentinel to mark a node which has not yet been visited.
331 lowLink uint
332 333 // onStack tracks whether this node is currently on the SCC search stack.
334 onStack bool
335 336 // cyclic indicates whether this block is in a loop.
337 // If lowLink is 0, strongConnect must be called before reading this field.
338 cyclic bool
339 }
340 341 // createDefer emits a single defer instruction, to be run when this function
342 // returns.
343 func (b *builder) createDefer(instr *ssa.Defer) {
344 // The pointer to the previous defer struct, which we will replace to
345 // make a linked list.
346 next := b.CreateLoad(b.dataPtrType, b.deferPtr, "defer.next")
347 348 var values []llvm.Value
349 valueTypes := []llvm.Type{b.uintptrType, next.Type()}
350 if instr.Call.IsInvoke() {
351 // Method call on an interface.
352 353 // Get callback type number.
354 methodName := instr.Call.Method.FullName()
355 if _, ok := b.deferInvokeFuncs[methodName]; !ok {
356 b.deferInvokeFuncs[methodName] = len(b.allDeferFuncs)
357 b.allDeferFuncs = append(b.allDeferFuncs, &instr.Call)
358 }
359 callback := llvm.ConstInt(b.uintptrType, uint64(b.deferInvokeFuncs[methodName]), false)
360 361 // Collect all values to be put in the struct (starting with
362 // runtime._defer fields, followed by the call parameters).
363 itf := b.getValue(instr.Call.Value, getPos(instr)) // interface
364 typecode := b.CreateExtractValue(itf, 0, "invoke.func.typecode")
365 receiverValue := b.CreateExtractValue(itf, 1, "invoke.func.receiver")
366 values = []llvm.Value{callback, next, typecode, receiverValue}
367 valueTypes = append(valueTypes, b.dataPtrType, b.dataPtrType)
368 for _, arg := range instr.Call.Args {
369 val := b.getValue(arg, getPos(instr))
370 values = append(values, val)
371 valueTypes = append(valueTypes, val.Type())
372 }
373 374 } else if callee, ok := instr.Call.Value.(*ssa.Function); ok {
375 // Regular function call.
376 if _, ok := b.deferFuncs[callee]; !ok {
377 b.deferFuncs[callee] = len(b.allDeferFuncs)
378 b.allDeferFuncs = append(b.allDeferFuncs, callee)
379 }
380 callback := llvm.ConstInt(b.uintptrType, uint64(b.deferFuncs[callee]), false)
381 382 // Collect all values to be put in the struct (starting with
383 // runtime._defer fields).
384 values = []llvm.Value{callback, next}
385 for _, param := range instr.Call.Args {
386 llvmParam := b.getValue(param, getPos(instr))
387 values = append(values, llvmParam)
388 valueTypes = append(valueTypes, llvmParam.Type())
389 }
390 391 } else if makeClosure, ok := instr.Call.Value.(*ssa.MakeClosure); ok {
392 // Immediately applied function literal with free variables.
393 394 // Extract the context from the closure. We won't need the function
395 // pointer.
396 // TODO: ignore this closure entirely and put pointers to the free
397 // variables directly in the defer struct, avoiding a memory allocation.
398 closure := b.getValue(instr.Call.Value, getPos(instr))
399 context := b.CreateExtractValue(closure, 0, "")
400 401 // Get the callback number.
402 fn := makeClosure.Fn.(*ssa.Function)
403 if _, ok := b.deferClosureFuncs[fn]; !ok {
404 b.deferClosureFuncs[fn] = len(b.allDeferFuncs)
405 b.allDeferFuncs = append(b.allDeferFuncs, makeClosure)
406 }
407 callback := llvm.ConstInt(b.uintptrType, uint64(b.deferClosureFuncs[fn]), false)
408 409 // Collect all values to be put in the struct (starting with
410 // runtime._defer fields, followed by all parameters including the
411 // context pointer).
412 values = []llvm.Value{callback, next}
413 for _, param := range instr.Call.Args {
414 llvmParam := b.getValue(param, getPos(instr))
415 values = append(values, llvmParam)
416 valueTypes = append(valueTypes, llvmParam.Type())
417 }
418 values = append(values, context)
419 valueTypes = append(valueTypes, context.Type())
420 421 } else if builtin, ok := instr.Call.Value.(*ssa.Builtin); ok {
422 var argTypes []types.Type
423 var argValues []llvm.Value
424 for _, arg := range instr.Call.Args {
425 argTypes = append(argTypes, arg.Type())
426 argValues = append(argValues, b.getValue(arg, getPos(instr)))
427 }
428 429 if _, ok := b.deferBuiltinFuncs[instr.Call.Value]; !ok {
430 b.deferBuiltinFuncs[instr.Call.Value] = deferBuiltin{
431 callName: builtin.Name(),
432 pos: builtin.Pos(),
433 argTypes: argTypes,
434 callback: len(b.allDeferFuncs),
435 }
436 b.allDeferFuncs = append(b.allDeferFuncs, instr.Call.Value)
437 }
438 callback := llvm.ConstInt(b.uintptrType, uint64(b.deferBuiltinFuncs[instr.Call.Value].callback), false)
439 440 // Collect all values to be put in the struct (starting with
441 // runtime._defer fields).
442 values = []llvm.Value{callback, next}
443 for _, param := range argValues {
444 values = append(values, param)
445 valueTypes = append(valueTypes, param.Type())
446 }
447 448 } else {
449 funcValue := b.getValue(instr.Call.Value, getPos(instr))
450 451 if _, ok := b.deferExprFuncs[instr.Call.Value]; !ok {
452 b.deferExprFuncs[instr.Call.Value] = len(b.allDeferFuncs)
453 b.allDeferFuncs = append(b.allDeferFuncs, &instr.Call)
454 }
455 456 callback := llvm.ConstInt(b.uintptrType, uint64(b.deferExprFuncs[instr.Call.Value]), false)
457 458 // Collect all values to be put in the struct (starting with
459 // runtime._defer fields, followed by all parameters including the
460 // context pointer).
461 values = []llvm.Value{callback, next, funcValue}
462 valueTypes = append(valueTypes, funcValue.Type())
463 for _, param := range instr.Call.Args {
464 llvmParam := b.getValue(param, getPos(instr))
465 values = append(values, llvmParam)
466 valueTypes = append(valueTypes, llvmParam.Type())
467 }
468 }
469 470 // Make a struct out of the collected values to put in the deferred call
471 // struct.
472 deferredCallType := b.ctx.StructType(valueTypes, false)
473 deferredCall := llvm.ConstNull(deferredCallType)
474 for i, value := range values {
475 deferredCall = b.CreateInsertValue(deferredCall, value, i, "")
476 }
477 478 // Put this struct in an allocation.
479 var alloca llvm.Value
480 if instr.Block() != b.currentBlock {
481 panic("block mismatch")
482 }
483 if !b.isInLoop() {
484 // This can safely use a stack allocation.
485 alloca = llvmutil.CreateEntryBlockAlloca(b.Builder, deferredCallType, "defer.alloca")
486 } else {
487 // This may be hit a variable number of times, so use a heap allocation.
488 size := b.targetData.TypeAllocSize(deferredCallType)
489 sizeValue := llvm.ConstInt(b.uintptrType, size, false)
490 nilPtr := llvm.ConstNull(b.dataPtrType)
491 alloca = b.createRuntimeCall("alloc", []llvm.Value{sizeValue, nilPtr}, "defer.alloc.call")
492 }
493 if b.NeedsStackObjects {
494 b.trackPointer(alloca)
495 }
496 b.CreateStore(deferredCall, alloca)
497 498 // Push it on top of the linked list by replacing deferPtr.
499 b.CreateStore(alloca, b.deferPtr)
500 }
501 502 // createRunDefers emits code to run all deferred functions.
503 func (b *builder) createRunDefers() {
504 deferType := b.getLLVMRuntimeType("_defer")
505 506 // Add a loop like the following:
507 // for stack != nil {
508 // _stack := stack
509 // stack = stack.next
510 // switch _stack.callback {
511 // case 0:
512 // // run first deferred call
513 // case 1:
514 // // run second deferred call
515 // // etc.
516 // default:
517 // unreachable
518 // }
519 // }
520 521 // Create loop, in the order: loophead, loop, callback0, callback1, ..., unreachable, end.
522 end := b.insertBasicBlock("rundefers.end")
523 unreachable := b.ctx.InsertBasicBlock(end, "rundefers.default")
524 loop := b.ctx.InsertBasicBlock(unreachable, "rundefers.loop")
525 loophead := b.ctx.InsertBasicBlock(loop, "rundefers.loophead")
526 b.CreateBr(loophead)
527 528 // Create loop head:
529 // for stack != nil {
530 b.SetInsertPointAtEnd(loophead)
531 deferData := b.CreateLoad(b.dataPtrType, b.deferPtr, "")
532 stackIsNil := b.CreateICmp(llvm.IntEQ, deferData, llvm.ConstPointerNull(deferData.Type()), "stackIsNil")
533 b.CreateCondBr(stackIsNil, end, loop)
534 535 // Create loop body:
536 // _stack := stack
537 // stack = stack.next
538 // switch stack.callback {
539 b.SetInsertPointAtEnd(loop)
540 nextStackGEP := b.CreateInBoundsGEP(deferType, deferData, []llvm.Value{
541 llvm.ConstInt(b.ctx.Int32Type(), 0, false),
542 llvm.ConstInt(b.ctx.Int32Type(), 1, false), // .next field
543 }, "stack.next.gep")
544 nextStack := b.CreateLoad(b.dataPtrType, nextStackGEP, "stack.next")
545 b.CreateStore(nextStack, b.deferPtr)
546 gep := b.CreateInBoundsGEP(deferType, deferData, []llvm.Value{
547 llvm.ConstInt(b.ctx.Int32Type(), 0, false),
548 llvm.ConstInt(b.ctx.Int32Type(), 0, false), // .callback field
549 }, "callback.gep")
550 callback := b.CreateLoad(b.uintptrType, gep, "callback")
551 sw := b.CreateSwitch(callback, unreachable, len(b.allDeferFuncs))
552 553 for i, callback := range b.allDeferFuncs {
554 // Create switch case, for example:
555 // case 0:
556 // // run first deferred call
557 block := b.insertBasicBlock("rundefers.callback" + strconv.Itoa(i))
558 sw.AddCase(llvm.ConstInt(b.uintptrType, uint64(i), false), block)
559 b.SetInsertPointAtEnd(block)
560 switch callback := callback.(type) {
561 case *ssa.CallCommon:
562 // Call on an value or interface value.
563 564 // Get the real defer struct type and cast to it.
565 valueTypes := []llvm.Type{b.uintptrType, b.dataPtrType}
566 567 if !callback.IsInvoke() {
568 //Expect funcValue to be passed through the deferred call.
569 valueTypes = append(valueTypes, b.getFuncType(callback.Signature()))
570 } else {
571 //Expect typecode
572 valueTypes = append(valueTypes, b.dataPtrType, b.dataPtrType)
573 }
574 575 for _, arg := range callback.Args {
576 valueTypes = append(valueTypes, b.getLLVMType(arg.Type()))
577 }
578 579 // Extract the params from the struct (including receiver).
580 forwardParams := []llvm.Value{}
581 zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false)
582 deferredCallType := b.ctx.StructType(valueTypes, false)
583 for i := 2; i < len(valueTypes); i++ {
584 gep := b.CreateInBoundsGEP(deferredCallType, deferData, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "gep")
585 forwardParam := b.CreateLoad(valueTypes[i], gep, "param")
586 forwardParams = append(forwardParams, forwardParam)
587 }
588 589 var fnPtr llvm.Value
590 var fnType llvm.Type
591 592 if !callback.IsInvoke() {
593 // Isolate the func value.
594 funcValue := forwardParams[0]
595 forwardParams = forwardParams[1:]
596 597 //Get function pointer and context
598 var context llvm.Value
599 fnPtr, context = b.decodeFuncValue(funcValue)
600 fnType = b.getLLVMFunctionType(callback.Signature())
601 602 //Pass context
603 forwardParams = append(forwardParams, context)
604 } else {
605 // Move typecode from the start to the end of the list of
606 // parameters.
607 forwardParams = append(forwardParams[1:], forwardParams[0])
608 fnPtr = b.getInvokeFunction(callback)
609 fnType = fnPtr.GlobalValueType()
610 611 // Add the context parameter. An interface call cannot also be a
612 // closure but we have to supply the parameter anyway for platforms
613 // with a strict calling convention.
614 forwardParams = append(forwardParams, llvm.Undef(b.dataPtrType))
615 }
616 617 b.createCall(fnType, fnPtr, forwardParams, "")
618 619 case *ssa.Function:
620 // Direct call.
621 622 // Get the real defer struct type and cast to it.
623 valueTypes := []llvm.Type{b.uintptrType, b.dataPtrType}
624 for _, param := range getParams(callback.Signature) {
625 valueTypes = append(valueTypes, b.getLLVMType(param.Type()))
626 }
627 deferredCallType := b.ctx.StructType(valueTypes, false)
628 629 // Extract the params from the struct.
630 forwardParams := []llvm.Value{}
631 zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false)
632 for i := range getParams(callback.Signature) {
633 gep := b.CreateInBoundsGEP(deferredCallType, deferData, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep")
634 forwardParam := b.CreateLoad(valueTypes[i+2], gep, "param")
635 forwardParams = append(forwardParams, forwardParam)
636 }
637 638 // Plain Moxie functions add some extra parameters to implement async functionality and function receivers.
639 // These parameters should not be supplied when calling into an external C/ASM function.
640 if !b.getFunctionInfo(callback).exported {
641 // Add the context parameter. We know it is ignored by the receiving
642 // function, but we have to pass one anyway.
643 forwardParams = append(forwardParams, llvm.Undef(b.dataPtrType))
644 }
645 646 // Call real function.
647 fnType, fn := b.getFunction(callback)
648 b.createInvoke(fnType, fn, forwardParams, "")
649 650 case *ssa.MakeClosure:
651 // Get the real defer struct type and cast to it.
652 fn := callback.Fn.(*ssa.Function)
653 valueTypes := []llvm.Type{b.uintptrType, b.dataPtrType}
654 params := fn.Signature.Params()
655 for i := 0; i < params.Len(); i++ {
656 valueTypes = append(valueTypes, b.getLLVMType(params.At(i).Type()))
657 }
658 valueTypes = append(valueTypes, b.dataPtrType) // closure
659 deferredCallType := b.ctx.StructType(valueTypes, false)
660 661 // Extract the params from the struct.
662 forwardParams := []llvm.Value{}
663 zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false)
664 for i := 2; i < len(valueTypes); i++ {
665 gep := b.CreateInBoundsGEP(deferredCallType, deferData, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i), false)}, "")
666 forwardParam := b.CreateLoad(valueTypes[i], gep, "param")
667 forwardParams = append(forwardParams, forwardParam)
668 }
669 670 // Call deferred function.
671 fnType, llvmFn := b.getFunction(fn)
672 b.createCall(fnType, llvmFn, forwardParams, "")
673 case *ssa.Builtin:
674 db := b.deferBuiltinFuncs[callback]
675 676 //Get parameter types
677 valueTypes := []llvm.Type{b.uintptrType, b.dataPtrType}
678 679 //Get signature from call results
680 params := callback.Type().Underlying().(*types.Signature).Params()
681 for i := 0; i < params.Len(); i++ {
682 valueTypes = append(valueTypes, b.getLLVMType(params.At(i).Type()))
683 }
684 685 deferredCallType := b.ctx.StructType(valueTypes, false)
686 687 // Extract the params from the struct.
688 var argValues []llvm.Value
689 zero := llvm.ConstInt(b.ctx.Int32Type(), 0, false)
690 for i := 0; i < params.Len(); i++ {
691 gep := b.CreateInBoundsGEP(deferredCallType, deferData, []llvm.Value{zero, llvm.ConstInt(b.ctx.Int32Type(), uint64(i+2), false)}, "gep")
692 forwardParam := b.CreateLoad(valueTypes[i+2], gep, "param")
693 argValues = append(argValues, forwardParam)
694 }
695 696 _, err := b.createBuiltin(db.argTypes, argValues, db.callName, db.pos)
697 if err != nil {
698 b.diagnostics = append(b.diagnostics, err)
699 }
700 default:
701 panic("unknown deferred function type")
702 }
703 704 // Branch back to the start of the loop.
705 b.CreateBr(loophead)
706 }
707 708 // Create default unreachable block:
709 // default:
710 // unreachable
711 // }
712 b.SetInsertPointAtEnd(unreachable)
713 b.CreateUnreachable()
714 715 // End of loop.
716 b.SetInsertPointAtEnd(end)
717 }
718