diff options
Diffstat (limited to 'src/runtime')
145 files changed, 8759 insertions, 1136 deletions
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index 7761415ecd..5bc2063bed 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -107,7 +107,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 MOVL BX, g_stackguard1(BP) MOVL BX, (g_stack+stack_lo)(BP) MOVL SP, (g_stack+stack_hi)(BP) - + // find out information about the processor we're on #ifdef GOOS_nacl // NaCl doesn't like PUSHFL/POPFL JMP has_cpuid @@ -827,7 +827,7 @@ havem: MOVL (g_sched+gobuf_sp)(SI), SP MOVL 0(SP), AX MOVL AX, (g_sched+gobuf_sp)(SI) - + // If the m on entry was nil, we called needm above to borrow an m // for the duration of the call. Since the call is over, return it with dropm. CMPL DX, $0 @@ -942,7 +942,7 @@ TEXT runtime·aeshashbody(SB),NOSPLIT,$0-0 CMPL BX, $64 JBE aes33to64 JMP aes65plus - + aes0to15: TESTL BX, BX JE aes0 @@ -957,7 +957,7 @@ aes0to15: ADDL BX, BX PAND masks<>(SB)(BX*8), X1 -final1: +final1: AESENC X0, X1 // scramble input, xor in seed AESENC X1, X1 // scramble combo 2 times AESENC X1, X1 @@ -987,7 +987,7 @@ aes17to32: // make second starting seed PXOR runtime·aeskeysched+16(SB), X1 AESENC X1, X1 - + // load data to be hashed MOVOU (AX), X2 MOVOU -16(AX)(BX*1), X3 @@ -1015,22 +1015,22 @@ aes33to64: AESENC X1, X1 AESENC X2, X2 AESENC X3, X3 - + MOVOU (AX), X4 MOVOU 16(AX), X5 MOVOU -32(AX)(BX*1), X6 MOVOU -16(AX)(BX*1), X7 - + AESENC X0, X4 AESENC X1, X5 AESENC X2, X6 AESENC X3, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 @@ -1052,7 +1052,7 @@ aes65plus: AESENC X1, X1 AESENC X2, X2 AESENC X3, X3 - + // start with last (possibly overlapping) block MOVOU -64(AX)(BX*1), X4 MOVOU -48(AX)(BX*1), X5 @@ -1068,7 +1068,7 @@ aes65plus: // compute number of remaining 64-byte blocks DECL BX SHRL $6, BX - + aesloop: // scramble state, xor in a block MOVOU (AX), X0 @@ -1095,7 +1095,7 @@ aesloop: AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 @@ -1132,77 +1132,77 @@ DATA masks<>+0x00(SB)/4, $0x00000000 DATA masks<>+0x04(SB)/4, $0x00000000 DATA masks<>+0x08(SB)/4, $0x00000000 DATA masks<>+0x0c(SB)/4, $0x00000000 - + DATA masks<>+0x10(SB)/4, $0x000000ff DATA masks<>+0x14(SB)/4, $0x00000000 DATA masks<>+0x18(SB)/4, $0x00000000 DATA masks<>+0x1c(SB)/4, $0x00000000 - + DATA masks<>+0x20(SB)/4, $0x0000ffff DATA masks<>+0x24(SB)/4, $0x00000000 DATA masks<>+0x28(SB)/4, $0x00000000 DATA masks<>+0x2c(SB)/4, $0x00000000 - + DATA masks<>+0x30(SB)/4, $0x00ffffff DATA masks<>+0x34(SB)/4, $0x00000000 DATA masks<>+0x38(SB)/4, $0x00000000 DATA masks<>+0x3c(SB)/4, $0x00000000 - + DATA masks<>+0x40(SB)/4, $0xffffffff DATA masks<>+0x44(SB)/4, $0x00000000 DATA masks<>+0x48(SB)/4, $0x00000000 DATA masks<>+0x4c(SB)/4, $0x00000000 - + DATA masks<>+0x50(SB)/4, $0xffffffff DATA masks<>+0x54(SB)/4, $0x000000ff DATA masks<>+0x58(SB)/4, $0x00000000 DATA masks<>+0x5c(SB)/4, $0x00000000 - + DATA masks<>+0x60(SB)/4, $0xffffffff DATA masks<>+0x64(SB)/4, $0x0000ffff DATA masks<>+0x68(SB)/4, $0x00000000 DATA masks<>+0x6c(SB)/4, $0x00000000 - + DATA masks<>+0x70(SB)/4, $0xffffffff DATA masks<>+0x74(SB)/4, $0x00ffffff DATA masks<>+0x78(SB)/4, $0x00000000 DATA masks<>+0x7c(SB)/4, $0x00000000 - + DATA masks<>+0x80(SB)/4, $0xffffffff DATA masks<>+0x84(SB)/4, $0xffffffff DATA masks<>+0x88(SB)/4, $0x00000000 DATA masks<>+0x8c(SB)/4, $0x00000000 - + DATA masks<>+0x90(SB)/4, $0xffffffff DATA masks<>+0x94(SB)/4, $0xffffffff DATA masks<>+0x98(SB)/4, $0x000000ff DATA masks<>+0x9c(SB)/4, $0x00000000 - + DATA masks<>+0xa0(SB)/4, $0xffffffff DATA masks<>+0xa4(SB)/4, $0xffffffff DATA masks<>+0xa8(SB)/4, $0x0000ffff DATA masks<>+0xac(SB)/4, $0x00000000 - + DATA masks<>+0xb0(SB)/4, $0xffffffff DATA masks<>+0xb4(SB)/4, $0xffffffff DATA masks<>+0xb8(SB)/4, $0x00ffffff DATA masks<>+0xbc(SB)/4, $0x00000000 - + DATA masks<>+0xc0(SB)/4, $0xffffffff DATA masks<>+0xc4(SB)/4, $0xffffffff DATA masks<>+0xc8(SB)/4, $0xffffffff DATA masks<>+0xcc(SB)/4, $0x00000000 - + DATA masks<>+0xd0(SB)/4, $0xffffffff DATA masks<>+0xd4(SB)/4, $0xffffffff DATA masks<>+0xd8(SB)/4, $0xffffffff DATA masks<>+0xdc(SB)/4, $0x000000ff - + DATA masks<>+0xe0(SB)/4, $0xffffffff DATA masks<>+0xe4(SB)/4, $0xffffffff DATA masks<>+0xe8(SB)/4, $0xffffffff DATA masks<>+0xec(SB)/4, $0x0000ffff - + DATA masks<>+0xf0(SB)/4, $0xffffffff DATA masks<>+0xf4(SB)/4, $0xffffffff DATA masks<>+0xf8(SB)/4, $0xffffffff @@ -1217,77 +1217,77 @@ DATA shifts<>+0x00(SB)/4, $0x00000000 DATA shifts<>+0x04(SB)/4, $0x00000000 DATA shifts<>+0x08(SB)/4, $0x00000000 DATA shifts<>+0x0c(SB)/4, $0x00000000 - + DATA shifts<>+0x10(SB)/4, $0xffffff0f DATA shifts<>+0x14(SB)/4, $0xffffffff DATA shifts<>+0x18(SB)/4, $0xffffffff DATA shifts<>+0x1c(SB)/4, $0xffffffff - + DATA shifts<>+0x20(SB)/4, $0xffff0f0e DATA shifts<>+0x24(SB)/4, $0xffffffff DATA shifts<>+0x28(SB)/4, $0xffffffff DATA shifts<>+0x2c(SB)/4, $0xffffffff - + DATA shifts<>+0x30(SB)/4, $0xff0f0e0d DATA shifts<>+0x34(SB)/4, $0xffffffff DATA shifts<>+0x38(SB)/4, $0xffffffff DATA shifts<>+0x3c(SB)/4, $0xffffffff - + DATA shifts<>+0x40(SB)/4, $0x0f0e0d0c DATA shifts<>+0x44(SB)/4, $0xffffffff DATA shifts<>+0x48(SB)/4, $0xffffffff DATA shifts<>+0x4c(SB)/4, $0xffffffff - + DATA shifts<>+0x50(SB)/4, $0x0e0d0c0b DATA shifts<>+0x54(SB)/4, $0xffffff0f DATA shifts<>+0x58(SB)/4, $0xffffffff DATA shifts<>+0x5c(SB)/4, $0xffffffff - + DATA shifts<>+0x60(SB)/4, $0x0d0c0b0a DATA shifts<>+0x64(SB)/4, $0xffff0f0e DATA shifts<>+0x68(SB)/4, $0xffffffff DATA shifts<>+0x6c(SB)/4, $0xffffffff - + DATA shifts<>+0x70(SB)/4, $0x0c0b0a09 DATA shifts<>+0x74(SB)/4, $0xff0f0e0d DATA shifts<>+0x78(SB)/4, $0xffffffff DATA shifts<>+0x7c(SB)/4, $0xffffffff - + DATA shifts<>+0x80(SB)/4, $0x0b0a0908 DATA shifts<>+0x84(SB)/4, $0x0f0e0d0c DATA shifts<>+0x88(SB)/4, $0xffffffff DATA shifts<>+0x8c(SB)/4, $0xffffffff - + DATA shifts<>+0x90(SB)/4, $0x0a090807 DATA shifts<>+0x94(SB)/4, $0x0e0d0c0b DATA shifts<>+0x98(SB)/4, $0xffffff0f DATA shifts<>+0x9c(SB)/4, $0xffffffff - + DATA shifts<>+0xa0(SB)/4, $0x09080706 DATA shifts<>+0xa4(SB)/4, $0x0d0c0b0a DATA shifts<>+0xa8(SB)/4, $0xffff0f0e DATA shifts<>+0xac(SB)/4, $0xffffffff - + DATA shifts<>+0xb0(SB)/4, $0x08070605 DATA shifts<>+0xb4(SB)/4, $0x0c0b0a09 DATA shifts<>+0xb8(SB)/4, $0xff0f0e0d DATA shifts<>+0xbc(SB)/4, $0xffffffff - + DATA shifts<>+0xc0(SB)/4, $0x07060504 DATA shifts<>+0xc4(SB)/4, $0x0b0a0908 DATA shifts<>+0xc8(SB)/4, $0x0f0e0d0c DATA shifts<>+0xcc(SB)/4, $0xffffffff - + DATA shifts<>+0xd0(SB)/4, $0x06050403 DATA shifts<>+0xd4(SB)/4, $0x0a090807 DATA shifts<>+0xd8(SB)/4, $0x0e0d0c0b DATA shifts<>+0xdc(SB)/4, $0xffffff0f - + DATA shifts<>+0xe0(SB)/4, $0x05040302 DATA shifts<>+0xe4(SB)/4, $0x09080706 DATA shifts<>+0xe8(SB)/4, $0x0d0c0b0a DATA shifts<>+0xec(SB)/4, $0xffff0f0e - + DATA shifts<>+0xf0(SB)/4, $0x04030201 DATA shifts<>+0xf4(SB)/4, $0x08070605 DATA shifts<>+0xf8(SB)/4, $0x0c0b0a09 diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 2a15910aea..ab891154c8 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -92,7 +92,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 ANDQ $~15, SP MOVQ AX, 16(SP) MOVQ BX, 24(SP) - + // create istack out of the given (operating system) stack. // _cgo_init may update stackguard. MOVQ $runtime·g0(SB), DI @@ -273,7 +273,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $16-8 // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $0-8 MOVQ fn+0(FP), DI - + get_tls(CX) MOVQ g(CX), AX // save state in g->sched MOVQ 0(SP), BX // caller's PC @@ -617,7 +617,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20 MOVQ m_gsignal(R8), SI CMPQ SI, DI JEQ nosave - + // Switch to system stack. MOVQ m_g0(R8), SI CALL gosave<>(SB) @@ -717,7 +717,7 @@ needm: get_tls(CX) MOVQ g(CX), BX MOVQ g_m(BX), BX - + // Set m->sched.sp = SP, so that if a panic happens // during the function we are about to execute, it will // have a valid SP to run on the g0 stack. @@ -801,7 +801,7 @@ havem: MOVQ (g_sched+gobuf_sp)(SI), SP MOVQ 0(SP), AX MOVQ AX, (g_sched+gobuf_sp)(SI) - + // If the m on entry was nil, we called needm above to borrow an m // for the duration of the call. Since the call is over, return it with dropm. CMPQ R8, $0 @@ -953,7 +953,7 @@ aes17to32: // make second starting seed PXOR runtime·aeskeysched+16(SB), X1 AESENC X1, X1 - + // load data to be hashed MOVOU (AX), X2 MOVOU -16(AX)(CX*1), X3 @@ -985,7 +985,7 @@ aes33to64: AESENC X1, X1 AESENC X2, X2 AESENC X3, X3 - + MOVOU (AX), X4 MOVOU 16(AX), X5 MOVOU -32(AX)(CX*1), X6 @@ -995,17 +995,17 @@ aes33to64: PXOR X1, X5 PXOR X2, X6 PXOR X3, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + AESENC X4, X4 AESENC X5, X5 AESENC X6, X6 @@ -1121,7 +1121,7 @@ aes129plus: AESENC X5, X5 AESENC X6, X6 AESENC X7, X7 - + // start with last (possibly overlapping) block MOVOU -128(AX)(CX*1), X8 MOVOU -112(AX)(CX*1), X9 @@ -1141,11 +1141,11 @@ aes129plus: PXOR X5, X13 PXOR X6, X14 PXOR X7, X15 - + // compute number of remaining 128-byte blocks DECQ CX SHRQ $7, CX - + aesloop: // scramble state AESENC X8, X8 diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s index 49958d0c88..7f194d2403 100644 --- a/src/runtime/asm_amd64p32.s +++ b/src/runtime/asm_amd64p32.s @@ -18,7 +18,7 @@ TEXT runtime·rt0_go(SB),NOSPLIT,$0 MOVL AX, 16(SP) MOVL BX, 24(SP) - + // create istack out of the given (operating system) stack. MOVL $runtime·g0(SB), DI LEAL (-64*1024+104)(SP), BX @@ -150,7 +150,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $8-4 // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $0-4 MOVL fn+0(FP), DI - + get_tls(CX) MOVL g(CX), AX // save state in g->sched MOVL 0(SP), BX // caller's PC diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index 6722ba760f..174dc46389 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -514,7 +514,7 @@ TEXT callRet<>(SB), NOSPLIT, $16-0 MOVW R1, 12(R13) MOVW R2, 16(R13) BL runtime·reflectcallmove(SB) - RET + RET CALLFN(·call16, 16) CALLFN(·call32, 32) @@ -673,7 +673,7 @@ TEXT runtime·cgocallback(SB),NOSPLIT,$16-16 // See cgocall.go for more details. TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-16 NO_LOCAL_POINTERS - + // Load m and g from thread-local storage. MOVB runtime·iscgo(SB), R0 CMP $0, R0 @@ -784,6 +784,9 @@ TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0 MOVW R0, g // Save g to thread-local storage. +#ifdef GOOS_windows + B runtime·save_g(SB) +#else MOVB runtime·iscgo(SB), R0 CMP $0, R0 B.EQ 2(PC) @@ -791,6 +794,7 @@ TEXT setg<>(SB),NOSPLIT|NOFRAME,$0-0 MOVW g, R0 RET +#endif TEXT runtime·emptyfunc(SB),0,$0-0 RET @@ -851,12 +855,12 @@ TEXT _cgo_topofstack(SB),NOSPLIT,$8 // callee-save in the gcc calling convention, so save them here. MOVW R11, saveR11-4(SP) MOVW g, saveG-8(SP) - + BL runtime·load_g(SB) MOVW g_m(g), R0 MOVW m_curg(R0), R0 MOVW (g_stack+stack_hi)(R0), R0 - + MOVW saveG-8(SP), g MOVW saveR11-4(SP), R11 RET diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index 57877c0194..b180cb06ab 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -390,15 +390,36 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ /* copy arguments to stack */ \ MOVD arg+16(FP), R3; \ MOVWZ argsize+24(FP), R4; \ - MOVD R1, R5; \ - ADD $(FIXED_FRAME-1), R5; \ - SUB $1, R3; \ - ADD R5, R4; \ - CMP R5, R4; \ - BEQ 4(PC); \ - MOVBZU 1(R3), R6; \ - MOVBZU R6, 1(R5); \ - BR -4(PC); \ + MOVD R1, R5; \ + CMP R4, $8; \ + BLT tailsetup; \ + /* copy 8 at a time if possible */ \ + ADD $(FIXED_FRAME-8), R5; \ + SUB $8, R3; \ +top: \ + MOVDU 8(R3), R7; \ + MOVDU R7, 8(R5); \ + SUB $8, R4; \ + CMP R4, $8; \ + BGE top; \ + /* handle remaining bytes */ \ + CMP $0, R4; \ + BEQ callfn; \ + ADD $7, R3; \ + ADD $7, R5; \ + BR tail; \ +tailsetup: \ + CMP $0, R4; \ + BEQ callfn; \ + ADD $(FIXED_FRAME-1), R5; \ + SUB $1, R3; \ +tail: \ + MOVBU 1(R3), R6; \ + MOVBU R6, 1(R5); \ + SUB $1, R4; \ + CMP $0, R4; \ + BGT tail; \ +callfn: \ /* call function */ \ MOVD f+8(FP), R11; \ MOVD (R11), R12; \ diff --git a/src/runtime/cgo/asm_amd64.s b/src/runtime/cgo/asm_amd64.s index 0e33fc4796..06c538b9bc 100644 --- a/src/runtime/cgo/asm_amd64.s +++ b/src/runtime/cgo/asm_amd64.s @@ -36,9 +36,9 @@ TEXT crosscall2(SB),NOSPLIT,$0x110-0 /* also need to save xmm6 - xmm15 */ MOVQ DX, 0x0(SP) /* arg */ MOVQ R8, 0x8(SP) /* argsize (includes padding) */ MOVQ R9, 0x10(SP) /* ctxt */ - + CALL CX /* fn */ - + MOVQ 0x48(SP), DI MOVQ 0x50(SP), SI MOVUPS 0x60(SP), X6 @@ -64,5 +64,5 @@ TEXT crosscall2(SB),NOSPLIT,$0x110-0 /* also need to save xmm6 - xmm15 */ MOVQ 0x30(SP), R13 MOVQ 0x38(SP), R14 MOVQ 0x40(SP), R15 - + RET diff --git a/src/runtime/cgo/asm_arm.s b/src/runtime/cgo/asm_arm.s index 36dab286ae..60132c14a8 100644 --- a/src/runtime/cgo/asm_arm.s +++ b/src/runtime/cgo/asm_arm.s @@ -8,7 +8,7 @@ // func crosscall2(fn func(a unsafe.Pointer, n int32, ctxt uintptr), a unsafe.Pointer, n int32, ctxt uintptr) // Saves C callee-saved registers and calls fn with three arguments. TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0 - /* + /* * We still need to save all callee save register as before, and then * push 3 args for fn (R1, R2, R3). * Also note that at procedure entry in gc world, 4(R13) will be the diff --git a/src/runtime/cgo/signal_darwin_arm64.s b/src/runtime/cgo/signal_darwin_arm64.s index 60443b64c8..1ae00d13f3 100644 --- a/src/runtime/cgo/signal_darwin_arm64.s +++ b/src/runtime/cgo/signal_darwin_arm64.s @@ -37,7 +37,7 @@ ongothread: // Build a 32-byte stack frame for us for this call. // Saved LR (none available) is at the bottom, - // then the PC argument for setsigsegv, + // then the PC argument for setsigsegv, // then a copy of the LR for us to restore. MOVD.W $0, -32(RSP) MOVD R1, 8(RSP) diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index 73cb6ecae2..ac57e0344e 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -126,7 +126,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { } s := spanOfUnchecked(uintptr(src)) - if s.state == _MSpanManual { + if s.state == mSpanManual { // There are no heap bits for value stored on the stack. // For a channel receive src might be on the stack of some // other goroutine, so we can't unwind the stack even if diff --git a/src/runtime/chan.go b/src/runtime/chan.go index 615643e6a6..5cf0b86f58 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -92,7 +92,7 @@ func makechan(t *chantype, size int) *hchan { // Queue or element size is zero. c = (*hchan)(mallocgc(hchanSize, nil, true)) // Race detector uses this location for synchronization. - c.buf = unsafe.Pointer(c) + c.buf = c.raceaddr() case elem.kind&kindNoPointers != 0: // Elements do not contain pointers. // Allocate hchan and buf in one call. @@ -151,7 +151,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { } if raceenabled { - racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend)) + racereadpc(c.raceaddr(), callerpc, funcPC(chansend)) } // Fast path: check for failed non-blocking operation without acquiring the lock. @@ -232,6 +232,11 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { gp.param = nil c.sendq.enqueue(mysg) goparkunlock(&c.lock, waitReasonChanSend, traceEvGoBlockSend, 3) + // Ensure the value being sent is kept alive until the + // receiver copies it out. The sudog has a pointer to the + // stack object, but sudogs aren't considered as roots of the + // stack tracer. + KeepAlive(ep) // someone woke us up. if mysg != gp.waiting { @@ -337,8 +342,8 @@ func closechan(c *hchan) { if raceenabled { callerpc := getcallerpc() - racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan)) - racerelease(unsafe.Pointer(c)) + racewritepc(c.raceaddr(), callerpc, funcPC(closechan)) + racerelease(c.raceaddr()) } c.closed = 1 @@ -361,7 +366,7 @@ func closechan(c *hchan) { gp := sg.g gp.param = nil if raceenabled { - raceacquireg(gp, unsafe.Pointer(c)) + raceacquireg(gp, c.raceaddr()) } glist.push(gp) } @@ -379,7 +384,7 @@ func closechan(c *hchan) { gp := sg.g gp.param = nil if raceenabled { - raceacquireg(gp, unsafe.Pointer(c)) + raceacquireg(gp, c.raceaddr()) } glist.push(gp) } @@ -454,7 +459,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) if c.closed != 0 && c.qcount == 0 { if raceenabled { - raceacquire(unsafe.Pointer(c)) + raceacquire(c.raceaddr()) } unlock(&c.lock) if ep != nil { @@ -732,6 +737,15 @@ func (q *waitq) dequeue() *sudog { } } +func (c *hchan) raceaddr() unsafe.Pointer { + // Treat read-like and write-like operations on the channel to + // happen at this address. Avoid using the address of qcount + // or dataqsiz, because the len() and cap() builtins read + // those addresses, and we don't want them racing with + // operations like close(). + return unsafe.Pointer(&c.buf) +} + func racesync(c *hchan, sg *sudog) { racerelease(chanbuf(c, 0)) raceacquireg(sg.g, chanbuf(c, 0)) diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index 2766b8850a..6835cacb3f 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -686,7 +686,7 @@ func init() { func TestRuntimePanic(t *testing.T) { testenv.MustHaveExec(t) - cmd := exec.Command(os.Args[0], "-test.run=TestRuntimePanic") + cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestRuntimePanic")) cmd.Env = append(cmd.Env, "GO_TEST_RUNTIME_PANIC=1") out, err := cmd.CombinedOutput() t.Logf("%s", out) diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go index af9e6430da..1384e00210 100644 --- a/src/runtime/crash_unix_test.go +++ b/src/runtime/crash_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package runtime_test diff --git a/src/runtime/debug_test.go b/src/runtime/debug_test.go index a34f4c77f7..37dcafd145 100644 --- a/src/runtime/debug_test.go +++ b/src/runtime/debug_test.go @@ -17,6 +17,8 @@ package runtime_test import ( "fmt" + "io/ioutil" + "regexp" "runtime" "runtime/debug" "sync/atomic" @@ -25,6 +27,11 @@ import ( ) func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) { + // This can deadlock if run under a debugger because it + // depends on catching SIGTRAP, which is usually swallowed by + // a debugger. + skipUnderDebugger(t) + // This can deadlock if there aren't enough threads or if a GC // tries to interrupt an atomic loop (see issue #10958). ogomaxprocs := runtime.GOMAXPROCS(2) @@ -73,6 +80,28 @@ func debugCallTKill(tid int) error { return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP) } +// skipUnderDebugger skips the current test when running under a +// debugger (specifically if this process has a tracer). This is +// Linux-specific. +func skipUnderDebugger(t *testing.T) { + pid := syscall.Getpid() + status, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/status", pid)) + if err != nil { + t.Logf("couldn't get proc tracer: %s", err) + return + } + re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`) + sub := re.FindSubmatch(status) + if sub == nil { + t.Logf("couldn't find proc tracer PID") + return + } + if string(sub[1]) == "0" { + return + } + t.Skip("test will deadlock under a debugger") +} + func TestDebugCall(t *testing.T) { g, after := startDebugCallWorker(t) defer after() @@ -160,6 +189,8 @@ func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) { } func TestDebugCallUnsafePoint(t *testing.T) { + skipUnderDebugger(t) + // This can deadlock if there aren't enough threads or if a GC // tries to interrupt an atomic loop (see issue #10958). defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) @@ -181,6 +212,8 @@ func TestDebugCallUnsafePoint(t *testing.T) { } func TestDebugCallPanic(t *testing.T) { + skipUnderDebugger(t) + // This can deadlock if there aren't enough threads. defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) diff --git a/src/runtime/defs2_linux.go b/src/runtime/defs2_linux.go index c10dfb8624..b08c0dafe1 100644 --- a/src/runtime/defs2_linux.go +++ b/src/runtime/defs2_linux.go @@ -58,7 +58,10 @@ const ( MAP_PRIVATE = C.MAP_PRIVATE MAP_FIXED = C.MAP_FIXED - MADV_DONTNEED = C.MADV_DONTNEED + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + MADV_HUGEPAGE = C.MADV_HUGEPAGE + MADV_NOHUGEPAGE = C.MADV_HNOUGEPAGE SA_RESTART = C.SA_RESTART SA_ONSTACK = C.SA_ONSTACK diff --git a/src/runtime/defs_aix.go b/src/runtime/defs_aix.go new file mode 100644 index 0000000000..812c7fcfa2 --- /dev/null +++ b/src/runtime/defs_aix.go @@ -0,0 +1,170 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs +GOARCH=ppc64 go tool cgo -godefs defs_aix.go > defs_aix_ppc64_tmp.go + +This is only an helper to create defs_aix_ppc64.go +Go runtime functions require the "linux" name of fields (ss_sp, si_addr, etc) +However, AIX structures don't provide such names and must be modified. + +TODO(aix): create a script to automatise defs_aix creation. + +Modifications made: + - sigset replaced by a [4]uint64 array + - add sigset_all variable + - siginfo.si_addr uintptr instead of *byte + - add (*timeval) set_usec + - stackt.ss_sp uintptr instead of *byte + - stackt.ss_size uintptr instead of uint64 + - sigcontext.sc_jmpbuf context64 instead of jumbuf + - ucontext.__extctx is a uintptr because we don't need extctx struct + - ucontext.uc_mcontext: replace jumbuf structure by context64 structure + - sigaction.sa_handler represents union field as both are uintptr + - tstate.* replace *byte by uintptr + + +*/ + +package runtime + +/* + +#include <sys/types.h> +#include <sys/errno.h> +#include <sys/time.h> +#include <sys/signal.h> +#include <sys/mman.h> +#include <sys/thread.h> +#include <sys/resource.h> + +#include <unistd.h> +#include <fcntl.h> +#include <pthread.h> +#include <semaphore.h> +*/ +import "C" + +const ( + _EPERM = C.EPERM + _ENOENT = C.ENOENT + _EINTR = C.EINTR + _EAGAIN = C.EAGAIN + _ENOMEM = C.ENOMEM + _EACCES = C.EACCES + _EFAULT = C.EFAULT + _EINVAL = C.EINVAL + _ETIMEDOUT = C.ETIMEDOUT + + _PROT_NONE = C.PROT_NONE + _PROT_READ = C.PROT_READ + _PROT_WRITE = C.PROT_WRITE + _PROT_EXEC = C.PROT_EXEC + + _MAP_ANONYMOUS = C.MAP_ANONYMOUS + _MAP_PRIVATE = C.MAP_PRIVATE + _MAP_FIXED = C.MAP_FIXED + _MADV_DONTNEED = C.MADV_DONTNEED + + _SIGHUP = C.SIGHUP + _SIGINT = C.SIGINT + _SIGQUIT = C.SIGQUIT + _SIGILL = C.SIGILL + _SIGTRAP = C.SIGTRAP + _SIGABRT = C.SIGABRT + _SIGBUS = C.SIGBUS + _SIGFPE = C.SIGFPE + _SIGKILL = C.SIGKILL + _SIGUSR1 = C.SIGUSR1 + _SIGSEGV = C.SIGSEGV + _SIGUSR2 = C.SIGUSR2 + _SIGPIPE = C.SIGPIPE + _SIGALRM = C.SIGALRM + _SIGCHLD = C.SIGCHLD + _SIGCONT = C.SIGCONT + _SIGSTOP = C.SIGSTOP + _SIGTSTP = C.SIGTSTP + _SIGTTIN = C.SIGTTIN + _SIGTTOU = C.SIGTTOU + _SIGURG = C.SIGURG + _SIGXCPU = C.SIGXCPU + _SIGXFSZ = C.SIGXFSZ + _SIGVTALRM = C.SIGVTALRM + _SIGPROF = C.SIGPROF + _SIGWINCH = C.SIGWINCH + _SIGIO = C.SIGIO + _SIGPWR = C.SIGPWR + _SIGSYS = C.SIGSYS + _SIGTERM = C.SIGTERM + _SIGEMT = C.SIGEMT + _SIGWAITING = C.SIGWAITING + + _FPE_INTDIV = C.FPE_INTDIV + _FPE_INTOVF = C.FPE_INTOVF + _FPE_FLTDIV = C.FPE_FLTDIV + _FPE_FLTOVF = C.FPE_FLTOVF + _FPE_FLTUND = C.FPE_FLTUND + _FPE_FLTRES = C.FPE_FLTRES + _FPE_FLTINV = C.FPE_FLTINV + _FPE_FLTSUB = C.FPE_FLTSUB + + _BUS_ADRALN = C.BUS_ADRALN + _BUS_ADRERR = C.BUS_ADRERR + _BUS_OBJERR = C.BUS_OBJERR + + _SEGV_MAPERR = C.SEGV_MAPERR + _SEGV_ACCERR = C.SEGV_ACCERR + + _ITIMER_REAL = C.ITIMER_REAL + _ITIMER_VIRTUAL = C.ITIMER_VIRTUAL + _ITIMER_PROF = C.ITIMER_PROF + + _O_RDONLY = C.O_RDONLY + + _SS_DISABLE = C.SS_DISABLE + _SI_USER = C.SI_USER + _SIG_BLOCK = C.SIG_BLOCK + _SIG_UNBLOCK = C.SIG_UNBLOCK + _SIG_SETMASK = C.SIG_SETMASK + + _SA_SIGINFO = C.SA_SIGINFO + _SA_RESTART = C.SA_RESTART + _SA_ONSTACK = C.SA_ONSTACK + + _PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED + + __SC_PAGE_SIZE = C._SC_PAGE_SIZE + __SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN + + _F_SETFD = C.F_SETFD + _F_SETFL = C.F_SETFL + _F_GETFD = C.F_GETFD + _F_GETFL = C.F_GETFL + _FD_CLOEXEC = C.FD_CLOEXEC +) + +type sigset C.sigset_t +type siginfo C.siginfo_t +type timespec C.struct_timespec +type timestruc C.struct_timestruc_t +type timeval C.struct_timeval +type itimerval C.struct_itimerval + +type stackt C.stack_t +type sigcontext C.struct_sigcontext +type ucontext C.ucontext_t +type _Ctype_struct___extctx uint64 // ucontext use a pointer to this structure but it shouldn't be used +type jmpbuf C.struct___jmpbuf +type context64 C.struct___context64 +type sigactiont C.struct_sigaction +type tstate C.struct_tstate +type rusage C.struct_rusage + +type pthread C.pthread_t +type pthread_attr C.pthread_attr_t + +type semt C.sem_t diff --git a/src/runtime/defs_aix_ppc64.go b/src/runtime/defs_aix_ppc64.go new file mode 100644 index 0000000000..e7480d06ba --- /dev/null +++ b/src/runtime/defs_aix_ppc64.go @@ -0,0 +1,203 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package runtime + +const ( + _EPERM = 0x1 + _ENOENT = 0x2 + _EINTR = 0x4 + _EAGAIN = 0xb + _ENOMEM = 0xc + _EACCES = 0xd + _EFAULT = 0xe + _EINVAL = 0x16 + _ETIMEDOUT = 0x4e + + _PROT_NONE = 0x0 + _PROT_READ = 0x1 + _PROT_WRITE = 0x2 + _PROT_EXEC = 0x4 + + _MAP_ANONYMOUS = 0x10 + _MAP_PRIVATE = 0x2 + _MAP_FIXED = 0x100 + _MADV_DONTNEED = 0x4 + + _SIGHUP = 0x1 + _SIGINT = 0x2 + _SIGQUIT = 0x3 + _SIGILL = 0x4 + _SIGTRAP = 0x5 + _SIGABRT = 0x6 + _SIGBUS = 0xa + _SIGFPE = 0x8 + _SIGKILL = 0x9 + _SIGUSR1 = 0x1e + _SIGSEGV = 0xb + _SIGUSR2 = 0x1f + _SIGPIPE = 0xd + _SIGALRM = 0xe + _SIGCHLD = 0x14 + _SIGCONT = 0x13 + _SIGSTOP = 0x11 + _SIGTSTP = 0x12 + _SIGTTIN = 0x15 + _SIGTTOU = 0x16 + _SIGURG = 0x10 + _SIGXCPU = 0x18 + _SIGXFSZ = 0x19 + _SIGVTALRM = 0x22 + _SIGPROF = 0x20 + _SIGWINCH = 0x1c + _SIGIO = 0x17 + _SIGPWR = 0x1d + _SIGSYS = 0xc + _SIGTERM = 0xf + _SIGEMT = 0x7 + _SIGWAITING = 0x27 + + _FPE_INTDIV = 0x14 + _FPE_INTOVF = 0x15 + _FPE_FLTDIV = 0x16 + _FPE_FLTOVF = 0x17 + _FPE_FLTUND = 0x18 + _FPE_FLTRES = 0x19 + _FPE_FLTINV = 0x1a + _FPE_FLTSUB = 0x1b + + _BUS_ADRALN = 0x1 + _BUS_ADRERR = 0x2 + _BUS_OBJERR = 0x3 + _ + _SEGV_MAPERR = 0x32 + _SEGV_ACCERR = 0x33 + + _ITIMER_REAL = 0x0 + _ITIMER_VIRTUAL = 0x1 + _ITIMER_PROF = 0x2 + + _O_RDONLY = 0x0 + + _SS_DISABLE = 0x2 + _SI_USER = 0x0 + _SIG_BLOCK = 0x0 + _SIG_UNBLOCK = 0x1 + _SIG_SETMASK = 0x2 + + _SA_SIGINFO = 0x100 + _SA_RESTART = 0x8 + _SA_ONSTACK = 0x1 + + _PTHREAD_CREATE_DETACHED = 0x1 + + __SC_PAGE_SIZE = 0x30 + __SC_NPROCESSORS_ONLN = 0x48 + + _F_SETFD = 0x2 + _F_SETFL = 0x4 + _F_GETFD = 0x1 + _F_GETFL = 0x3 + _FD_CLOEXEC = 0x1 +) + +type sigset [4]uint64 + +var sigset_all = sigset{^uint64(0), ^uint64(0), ^uint64(0), ^uint64(0)} + +type siginfo struct { + si_signo int32 + si_errno int32 + si_code int32 + si_pid int32 + si_uid uint32 + si_status int32 + si_addr uintptr + si_band int64 + si_value [2]int32 // [8]byte + __si_flags int32 + __pad [3]int32 +} + +type timespec struct { + tv_sec int64 + tv_nsec int64 +} +type timeval struct { + tv_sec int64 + tv_usec int32 + pad_cgo_0 [4]byte +} + +func (tv *timeval) set_usec(x int32) { + tv.tv_usec = x +} + +type itimerval struct { + it_interval timeval + it_value timeval +} + +type stackt struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + __pad [4]int32 + pas_cgo_0 [4]byte +} + +type sigcontext struct { + sc_onstack int32 + pad_cgo_0 [4]byte + sc_mask sigset + sc_uerror int32 + sc_jmpbuf context64 +} + +type ucontext struct { + __sc_onstack int32 + pad_cgo_0 [4]byte + uc_sigmask sigset + __sc_error int32 + pad_cgo_1 [4]byte + uc_mcontext context64 + uc_link *ucontext + uc_stack stackt + __extctx uintptr // pointer to struct __extctx but we don't use it + __extctx_magic int32 + __pad int32 +} + +type context64 struct { + gpr [32]uint64 + msr uint64 + iar uint64 + lr uint64 + ctr uint64 + cr uint32 + xer uint32 + fpscr uint32 + fpscrx uint32 + except [1]uint64 + fpr [32]float64 + fpeu uint8 + fpinfo uint8 + fpscr24_31 uint8 + pad [1]uint8 + excp_type int32 +} + +type sigactiont struct { + sa_handler uintptr // a union of two pointer + sa_mask sigset + sa_flags int32 + pad_cgo_0 [4]byte +} + +type pthread uint32 +type pthread_attr *byte + +type semt int32 diff --git a/src/runtime/defs_linux.go b/src/runtime/defs_linux.go index 553366a50b..2d810136d9 100644 --- a/src/runtime/defs_linux.go +++ b/src/runtime/defs_linux.go @@ -47,7 +47,10 @@ const ( MAP_PRIVATE = C.MAP_PRIVATE MAP_FIXED = C.MAP_FIXED - MADV_DONTNEED = C.MADV_DONTNEED + MADV_DONTNEED = C.MADV_DONTNEED + MADV_FREE = C.MADV_FREE + MADV_HUGEPAGE = C.MADV_HUGEPAGE + MADV_NOHUGEPAGE = C.MADV_HNOUGEPAGE SA_RESTART = C.SA_RESTART SA_ONSTACK = C.SA_ONSTACK diff --git a/src/runtime/defs_linux_386.go b/src/runtime/defs_linux_386.go index a7e435f854..0ebac17aef 100644 --- a/src/runtime/defs_linux_386.go +++ b/src/runtime/defs_linux_386.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_amd64.go b/src/runtime/defs_linux_amd64.go index e8c6a212db..c0a0ef0dd4 100644 --- a/src/runtime/defs_linux_amd64.go +++ b/src/runtime/defs_linux_amd64.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_arm.go b/src/runtime/defs_linux_arm.go index 62ec8fab5e..43946bb79c 100644 --- a/src/runtime/defs_linux_arm.go +++ b/src/runtime/defs_linux_arm.go @@ -16,6 +16,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_arm64.go b/src/runtime/defs_linux_arm64.go index c295bc0257..c2cc281ab4 100644 --- a/src/runtime/defs_linux_arm64.go +++ b/src/runtime/defs_linux_arm64.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_mips64x.go b/src/runtime/defs_linux_mips64x.go index df11cb0965..9dacd5d1e9 100644 --- a/src/runtime/defs_linux_mips64x.go +++ b/src/runtime/defs_linux_mips64x.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_mipsx.go b/src/runtime/defs_linux_mipsx.go index 702fbb51c8..9532ac54ee 100644 --- a/src/runtime/defs_linux_mipsx.go +++ b/src/runtime/defs_linux_mipsx.go @@ -22,6 +22,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_ppc64.go b/src/runtime/defs_linux_ppc64.go index 45363d1285..5a4326da07 100644 --- a/src/runtime/defs_linux_ppc64.go +++ b/src/runtime/defs_linux_ppc64.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_ppc64le.go b/src/runtime/defs_linux_ppc64le.go index 45363d1285..5a4326da07 100644 --- a/src/runtime/defs_linux_ppc64le.go +++ b/src/runtime/defs_linux_ppc64le.go @@ -18,6 +18,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_linux_s390x.go b/src/runtime/defs_linux_s390x.go index ab90723f75..a6cc9c48e9 100644 --- a/src/runtime/defs_linux_s390x.go +++ b/src/runtime/defs_linux_s390x.go @@ -19,6 +19,7 @@ const ( _MAP_FIXED = 0x10 _MADV_DONTNEED = 0x4 + _MADV_FREE = 0x8 _MADV_HUGEPAGE = 0xe _MADV_NOHUGEPAGE = 0xf diff --git a/src/runtime/defs_windows_386.go b/src/runtime/defs_windows_386.go index 589a7884cd..38b30b70e3 100644 --- a/src/runtime/defs_windows_386.go +++ b/src/runtime/defs_windows_386.go @@ -104,6 +104,9 @@ type context struct { func (c *context) ip() uintptr { return uintptr(c.eip) } func (c *context) sp() uintptr { return uintptr(c.esp) } +// 386 does not have link register, so this returns 0. +func (c *context) lr() uintptr { return 0 } + func (c *context) setip(x uintptr) { c.eip = uint32(x) } func (c *context) setsp(x uintptr) { c.esp = uint32(x) } diff --git a/src/runtime/defs_windows_amd64.go b/src/runtime/defs_windows_amd64.go index 1e173e934d..37508c09be 100644 --- a/src/runtime/defs_windows_amd64.go +++ b/src/runtime/defs_windows_amd64.go @@ -119,6 +119,9 @@ type context struct { func (c *context) ip() uintptr { return uintptr(c.rip) } func (c *context) sp() uintptr { return uintptr(c.rsp) } +// Amd64 does not have link register, so this returns 0. +func (c *context) lr() uintptr { return 0 } + func (c *context) setip(x uintptr) { c.rip = uint64(x) } func (c *context) setsp(x uintptr) { c.rsp = uint64(x) } diff --git a/src/runtime/defs_windows_arm.go b/src/runtime/defs_windows_arm.go new file mode 100644 index 0000000000..1140f61651 --- /dev/null +++ b/src/runtime/defs_windows_arm.go @@ -0,0 +1,149 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const ( + _PROT_NONE = 0 + _PROT_READ = 1 + _PROT_WRITE = 2 + _PROT_EXEC = 4 + + _MAP_ANON = 1 + _MAP_PRIVATE = 2 + + _DUPLICATE_SAME_ACCESS = 0x2 + _THREAD_PRIORITY_HIGHEST = 0x2 + + _SIGINT = 0x2 + _CTRL_C_EVENT = 0x0 + _CTRL_BREAK_EVENT = 0x1 + + _CONTEXT_CONTROL = 0x10001 + _CONTEXT_FULL = 0x10007 + + _EXCEPTION_ACCESS_VIOLATION = 0xc0000005 + _EXCEPTION_BREAKPOINT = 0x80000003 + _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d + _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e + _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f + _EXCEPTION_FLT_OVERFLOW = 0xc0000091 + _EXCEPTION_FLT_UNDERFLOW = 0xc0000093 + _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094 + _EXCEPTION_INT_OVERFLOW = 0xc0000095 + + _INFINITE = 0xffffffff + _WAIT_TIMEOUT = 0x102 + + _EXCEPTION_CONTINUE_EXECUTION = -0x1 + _EXCEPTION_CONTINUE_SEARCH = 0x0 +) + +type systeminfo struct { + anon0 [4]byte + dwpagesize uint32 + lpminimumapplicationaddress *byte + lpmaximumapplicationaddress *byte + dwactiveprocessormask uint32 + dwnumberofprocessors uint32 + dwprocessortype uint32 + dwallocationgranularity uint32 + wprocessorlevel uint16 + wprocessorrevision uint16 +} + +type exceptionrecord struct { + exceptioncode uint32 + exceptionflags uint32 + exceptionrecord *exceptionrecord + exceptionaddress *byte + numberparameters uint32 + exceptioninformation [15]uint32 +} + +type neon128 struct { + low uint64 + high int64 +} + +type context struct { + contextflags uint32 + r0 uint32 + r1 uint32 + r2 uint32 + r3 uint32 + r4 uint32 + r5 uint32 + r6 uint32 + r7 uint32 + r8 uint32 + r9 uint32 + r10 uint32 + r11 uint32 + r12 uint32 + + spr uint32 + lrr uint32 + pc uint32 + cpsr uint32 + + fpscr uint32 + padding uint32 + + floatNeon [16]neon128 + + bvr [8]uint32 + bcr [8]uint32 + wvr [1]uint32 + wcr [1]uint32 + padding2 [2]uint32 +} + +func (c *context) ip() uintptr { return uintptr(c.pc) } +func (c *context) sp() uintptr { return uintptr(c.spr) } +func (c *context) lr() uintptr { return uintptr(c.lrr) } + +func (c *context) setip(x uintptr) { c.pc = uint32(x) } +func (c *context) setsp(x uintptr) { c.spr = uint32(x) } + +func dumpregs(r *context) { + print("r0 ", hex(r.r0), "\n") + print("r1 ", hex(r.r1), "\n") + print("r2 ", hex(r.r2), "\n") + print("r3 ", hex(r.r3), "\n") + print("r4 ", hex(r.r4), "\n") + print("r5 ", hex(r.r5), "\n") + print("r6 ", hex(r.r6), "\n") + print("r7 ", hex(r.r7), "\n") + print("r8 ", hex(r.r8), "\n") + print("r9 ", hex(r.r9), "\n") + print("r10 ", hex(r.r10), "\n") + print("r11 ", hex(r.r11), "\n") + print("r12 ", hex(r.r12), "\n") + print("sp ", hex(r.spr), "\n") + print("lr ", hex(r.lrr), "\n") + print("pc ", hex(r.pc), "\n") + print("cpsr ", hex(r.cpsr), "\n") +} + +type overlapped struct { + internal uint32 + internalhigh uint32 + anon0 [8]byte + hevent *byte +} + +type memoryBasicInformation struct { + baseAddress uintptr + allocationBase uintptr + allocationProtect uint32 + regionSize uintptr + state uint32 + protect uint32 + type_ uint32 +} + +func stackcheck() { + // TODO: not implemented on ARM +} diff --git a/src/runtime/env_posix.go b/src/runtime/env_posix.go index 032e7122ce..a2daeb7f27 100644 --- a/src/runtime/env_posix.go +++ b/src/runtime/env_posix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package runtime diff --git a/src/runtime/export_unix_test.go b/src/runtime/export_unix_test.go index 54d577072e..eecdfb7eb2 100644 --- a/src/runtime/export_unix_test.go +++ b/src/runtime/export_unix_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package runtime diff --git a/src/runtime/extern.go b/src/runtime/extern.go index 1773c8fe7e..640688e004 100644 --- a/src/runtime/extern.go +++ b/src/runtime/extern.go @@ -50,19 +50,13 @@ It is a comma-separated list of name=val pairs setting these named variables: gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines onto smaller stacks. In this mode, a goroutine's stack can only grow. - gcrescanstacks: setting gcrescanstacks=1 enables stack - re-scanning during the STW mark termination phase. This is - helpful for debugging if objects are being prematurely - garbage collected. - gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection, making every garbage collection a stop-the-world event. Setting gcstoptheworld=2 also disables concurrent sweeping after the garbage collection finishes. gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard error at each collection, summarizing the amount of memory collected and the - length of the pause. Setting gctrace=2 emits the same summary but also - repeats each collection. The format of this line is subject to change. + length of the pause. The format of this line is subject to change. Currently, it is: gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # P where the fields are as follows: diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h index e6e0306e65..1ee67c8683 100644 --- a/src/runtime/funcdata.h +++ b/src/runtime/funcdata.h @@ -16,6 +16,7 @@ #define FUNCDATA_LocalsPointerMaps 1 #define FUNCDATA_InlTree 2 #define FUNCDATA_RegPointerMaps 3 +#define FUNCDATA_StackObjects 4 // Pseudo-assembly statements. diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 0da19cdf34..51e8ea4d31 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -24,6 +24,9 @@ func TestGcSys(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("skipping test; GOOS=windows http://golang.org/issue/27156") } + if runtime.GOOS == "linux" && runtime.GOARCH == "arm64" { + t.Skip("skipping test; GOOS=linux GOARCH=arm64 https://github.com/golang/go/issues/27636") + } got := runTestProg(t, "testprog", "GCSys") want := "OK\n" if got != want { @@ -571,8 +574,8 @@ func BenchmarkWriteBarrier(b *testing.B) { n := &node{mkTree(level - 1), mkTree(level - 1)} if level == 10 { // Seed GC with enough early pointers so it - // doesn't accidentally switch to mark 2 when - // it only has the top of the tree. + // doesn't start termination barriers when it + // only has the top of the tree. wbRoots = append(wbRoots, n) } return n diff --git a/src/runtime/gcinfo_test.go b/src/runtime/gcinfo_test.go index 7dd1a5607c..0741f6361c 100644 --- a/src/runtime/gcinfo_test.go +++ b/src/runtime/gcinfo_test.go @@ -35,14 +35,46 @@ func TestGCInfo(t *testing.T) { verifyGCInfo(t, "data eface", &dataEface, infoEface) verifyGCInfo(t, "data iface", &dataIface, infoIface) - verifyGCInfo(t, "stack Ptr", new(Ptr), infoPtr) - verifyGCInfo(t, "stack ScalarPtr", new(ScalarPtr), infoScalarPtr) - verifyGCInfo(t, "stack PtrScalar", new(PtrScalar), infoPtrScalar) - verifyGCInfo(t, "stack BigStruct", new(BigStruct), infoBigStruct()) - verifyGCInfo(t, "stack string", new(string), infoString) - verifyGCInfo(t, "stack slice", new([]string), infoSlice) - verifyGCInfo(t, "stack eface", new(interface{}), infoEface) - verifyGCInfo(t, "stack iface", new(Iface), infoIface) + { + var x Ptr + verifyGCInfo(t, "stack Ptr", &x, infoPtr) + runtime.KeepAlive(x) + } + { + var x ScalarPtr + verifyGCInfo(t, "stack ScalarPtr", &x, infoScalarPtr) + runtime.KeepAlive(x) + } + { + var x PtrScalar + verifyGCInfo(t, "stack PtrScalar", &x, infoPtrScalar) + runtime.KeepAlive(x) + } + { + var x BigStruct + verifyGCInfo(t, "stack BigStruct", &x, infoBigStruct()) + runtime.KeepAlive(x) + } + { + var x string + verifyGCInfo(t, "stack string", &x, infoString) + runtime.KeepAlive(x) + } + { + var x []string + verifyGCInfo(t, "stack slice", &x, infoSlice) + runtime.KeepAlive(x) + } + { + var x interface{} + verifyGCInfo(t, "stack eface", &x, infoEface) + runtime.KeepAlive(x) + } + { + var x Iface + verifyGCInfo(t, "stack iface", &x, infoIface) + runtime.KeepAlive(x) + } for i := 0; i < 10; i++ { verifyGCInfo(t, "heap Ptr", escape(new(Ptr)), trimDead(padDead(infoPtr))) diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 0fc02a8e80..eadbcaeee1 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -346,7 +346,7 @@ func dumpgoroutine(gp *g) { dumpint(uint64(gp.goid)) dumpint(uint64(gp.gopc)) dumpint(uint64(readgstatus(gp))) - dumpbool(isSystemGoroutine(gp)) + dumpbool(isSystemGoroutine(gp, false)) dumpbool(false) // isbackground dumpint(uint64(gp.waitsince)) dumpstr(gp.waitreason.String()) @@ -430,7 +430,7 @@ func dumproots() { // MSpan.types for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { // Finalizers for sp := s.specials; sp != nil; sp = sp.next { if sp.kind != _KindSpecialFinalizer { @@ -453,7 +453,7 @@ var freemark [_PageSize / 8]bool func dumpobjs() { for _, s := range mheap_.allspans { - if s.state != _MSpanInUse { + if s.state != mSpanInUse { continue } p := s.base() @@ -616,7 +616,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, func dumpmemprof() { iterate_memprof(dumpmemprof_callback) for _, s := range mheap_.allspans { - if s.state != _MSpanInUse { + if s.state != mSpanInUse { continue } for sp := s.specials; sp != nil; sp = sp.next { @@ -637,7 +637,7 @@ var dumphdr = []byte("go1.7 heap dump\n") func mdump() { // make sure we're done sweeping for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { s.ensureSwept() } } diff --git a/src/runtime/internal/sys/zgoos_aix.go b/src/runtime/internal/sys/zgoos_aix.go new file mode 100644 index 0000000000..9ce5b3434f --- /dev/null +++ b/src/runtime/internal/sys/zgoos_aix.go @@ -0,0 +1,21 @@ +// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. + +// +build aix + +package sys + +const GOOS = `aix` + +const GoosAndroid = 0 +const GoosAix = 1 +const GoosDarwin = 0 +const GoosDragonfly = 0 +const GoosFreebsd = 0 +const GoosLinux = 0 +const GoosNacl = 0 +const GoosNetbsd = 0 +const GoosOpenbsd = 0 +const GoosPlan9 = 0 +const GoosSolaris = 0 +const GoosWindows = 0 +const GoosZos = 0 diff --git a/src/runtime/internal/sys/zgoos_android.go b/src/runtime/internal/sys/zgoos_android.go index bfdc37792e..36a5768ab6 100644 --- a/src/runtime/internal/sys/zgoos_android.go +++ b/src/runtime/internal/sys/zgoos_android.go @@ -7,6 +7,7 @@ package sys const GOOS = `android` const GoosAndroid = 1 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_darwin.go b/src/runtime/internal/sys/zgoos_darwin.go index 1c4667f6de..10c0e88e9a 100644 --- a/src/runtime/internal/sys/zgoos_darwin.go +++ b/src/runtime/internal/sys/zgoos_darwin.go @@ -7,6 +7,7 @@ package sys const GOOS = `darwin` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 1 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_dragonfly.go b/src/runtime/internal/sys/zgoos_dragonfly.go index 728bf6abe8..5cb47cb84e 100644 --- a/src/runtime/internal/sys/zgoos_dragonfly.go +++ b/src/runtime/internal/sys/zgoos_dragonfly.go @@ -7,6 +7,7 @@ package sys const GOOS = `dragonfly` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 1 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_freebsd.go b/src/runtime/internal/sys/zgoos_freebsd.go index a8d659169b..470406ce5f 100644 --- a/src/runtime/internal/sys/zgoos_freebsd.go +++ b/src/runtime/internal/sys/zgoos_freebsd.go @@ -7,6 +7,7 @@ package sys const GOOS = `freebsd` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 1 diff --git a/src/runtime/internal/sys/zgoos_linux.go b/src/runtime/internal/sys/zgoos_linux.go index 289400c612..76235b748c 100644 --- a/src/runtime/internal/sys/zgoos_linux.go +++ b/src/runtime/internal/sys/zgoos_linux.go @@ -8,6 +8,7 @@ package sys const GOOS = `linux` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_nacl.go b/src/runtime/internal/sys/zgoos_nacl.go index 3fedb0a2c3..6d28b59667 100644 --- a/src/runtime/internal/sys/zgoos_nacl.go +++ b/src/runtime/internal/sys/zgoos_nacl.go @@ -7,6 +7,7 @@ package sys const GOOS = `nacl` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_netbsd.go b/src/runtime/internal/sys/zgoos_netbsd.go index 3346e3711c..ef8d938ddb 100644 --- a/src/runtime/internal/sys/zgoos_netbsd.go +++ b/src/runtime/internal/sys/zgoos_netbsd.go @@ -7,6 +7,7 @@ package sys const GOOS = `netbsd` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_openbsd.go b/src/runtime/internal/sys/zgoos_openbsd.go index 13c0323249..2e43847396 100644 --- a/src/runtime/internal/sys/zgoos_openbsd.go +++ b/src/runtime/internal/sys/zgoos_openbsd.go @@ -7,6 +7,7 @@ package sys const GOOS = `openbsd` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_plan9.go b/src/runtime/internal/sys/zgoos_plan9.go index 6b2e977b5e..ed598dcaac 100644 --- a/src/runtime/internal/sys/zgoos_plan9.go +++ b/src/runtime/internal/sys/zgoos_plan9.go @@ -7,6 +7,7 @@ package sys const GOOS = `plan9` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_solaris.go b/src/runtime/internal/sys/zgoos_solaris.go index cbf70f079a..fe690df6c2 100644 --- a/src/runtime/internal/sys/zgoos_solaris.go +++ b/src/runtime/internal/sys/zgoos_solaris.go @@ -7,6 +7,7 @@ package sys const GOOS = `solaris` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_windows.go b/src/runtime/internal/sys/zgoos_windows.go index 70839ca793..ea7c43bdf4 100644 --- a/src/runtime/internal/sys/zgoos_windows.go +++ b/src/runtime/internal/sys/zgoos_windows.go @@ -7,6 +7,7 @@ package sys const GOOS = `windows` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/internal/sys/zgoos_zos.go b/src/runtime/internal/sys/zgoos_zos.go index ecf449f703..d4027cf876 100644 --- a/src/runtime/internal/sys/zgoos_zos.go +++ b/src/runtime/internal/sys/zgoos_zos.go @@ -7,6 +7,7 @@ package sys const GOOS = `zos` const GoosAndroid = 0 +const GoosAix = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go index 6e01d70f75..d21a055685 100644 --- a/src/runtime/lock_sema.go +++ b/src/runtime/lock_sema.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin nacl netbsd openbsd plan9 solaris windows +// +build aix darwin nacl netbsd openbsd plan9 solaris windows package runtime diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 07e0a67240..c3fe1169dc 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -124,8 +124,6 @@ const ( // have the most objects per span. maxObjsPerSpan = pageSize / 8 - mSpanInUse = _MSpanInUse - concurrentSweep = _ConcurrentSweep _PageSize = 1 << _PageShift @@ -328,27 +326,27 @@ var physPageSize uintptr // may use larger alignment, so the caller must be careful to realign the // memory obtained by sysAlloc. // -// SysUnused notifies the operating system that the contents +// sysUnused notifies the operating system that the contents // of the memory region are no longer needed and can be reused // for other purposes. -// SysUsed notifies the operating system that the contents +// sysUsed notifies the operating system that the contents // of the memory region are needed again. // -// SysFree returns it unconditionally; this is only used if +// sysFree returns it unconditionally; this is only used if // an out-of-memory error has been detected midway through -// an allocation. It is okay if SysFree is a no-op. +// an allocation. It is okay if sysFree is a no-op. // -// SysReserve reserves address space without allocating memory. +// sysReserve reserves address space without allocating memory. // If the pointer passed to it is non-nil, the caller wants the -// reservation there, but SysReserve can still choose another +// reservation there, but sysReserve can still choose another // location if that one is unavailable. -// NOTE: SysReserve returns OS-aligned memory, but the heap allocator +// NOTE: sysReserve returns OS-aligned memory, but the heap allocator // may use larger alignment, so the caller must be careful to realign the // memory obtained by sysAlloc. // -// SysMap maps previously reserved address space for use. +// sysMap maps previously reserved address space for use. // -// SysFault marks a (already sysAlloc'd) region to fault +// sysFault marks a (already sysAlloc'd) region to fault // if accessed. Used only for debugging the runtime. func mallocinit() { @@ -735,6 +733,9 @@ func nextFreeFast(s *mspan) gclinkptr { // weight allocation. If it is a heavy weight allocation the caller must // determine whether a new GC cycle needs to be started or if the GC is active // whether this goroutine needs to assist the GC. +// +// Must run in a non-preemptible context since otherwise the owner of +// c could change. func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) { s = c.alloc[spc] shouldhelpgc = false @@ -745,9 +746,7 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bo println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) throw("s.allocCount != s.nelems && freeIndex == s.nelems") } - systemstack(func() { - c.refill(spc) - }) + c.refill(spc) shouldhelpgc = true s = c.alloc[spc] @@ -993,7 +992,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if shouldhelpgc { if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { - gcStart(gcBackgroundMode, t) + gcStart(t) } } diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go index 5142f4327a..6da8cf2ccb 100644 --- a/src/runtime/mbarrier.go +++ b/src/runtime/mbarrier.go @@ -318,6 +318,19 @@ func typedmemclr(typ *_type, ptr unsafe.Pointer) { memclrNoHeapPointers(ptr, typ.size) } +//go:linkname reflect_typedmemclr reflect.typedmemclr +func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) { + typedmemclr(typ, ptr) +} + +//go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial +func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) { + if typ.kind&kindNoPointers == 0 { + bulkBarrierPreWrite(uintptr(ptr), 0, size) + } + memclrNoHeapPointers(ptr, size) +} + // memclrHasPointers clears n bytes of typed memory starting at ptr. // The caller must ensure that the type of the object at ptr has // pointers, usually by checking typ.kind&kindNoPointers. However, ptr diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index e217e7695f..87fa027b4e 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -365,7 +365,7 @@ func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex ui s = spanOf(p) // If p is a bad pointer, it may not be in s's bounds. if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse { - if s == nil || s.state == _MSpanManual { + if s == nil || s.state == mSpanManual { // If s is nil, the virtual address has never been part of the heap. // This pointer may be to some mmap'd region, so we allow it. // Pointers into stacks are also ok, the runtime manages these explicitly. @@ -611,7 +611,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) { } } return - } else if s.state != _MSpanInUse || dst < s.base() || s.limit <= dst { + } else if s.state != mSpanInUse || dst < s.base() || s.limit <= dst { // dst was heap memory at some point, but isn't now. // It can't be a global. It must be either our stack, // or in the case of direct channel sends, it could be @@ -1911,6 +1911,20 @@ Run: return totalBits } +// materializeGCProg allocates space for the (1-bit) pointer bitmask +// for an object of size ptrdata. Then it fills that space with the +// pointer bitmask specified by the program prog. +// The bitmask starts at s.startAddr. +// The result must be deallocated with dematerializeGCProg. +func materializeGCProg(ptrdata uintptr, prog *byte) *mspan { + s := mheap_.allocManual((ptrdata/(8*sys.PtrSize)+pageSize-1)/pageSize, &memstats.gc_sys) + runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1) + return s +} +func dematerializeGCProg(s *mspan) { + mheap_.freeManual(s, &memstats.gc_sys) +} + func dumpGCProg(p *byte) { nptr := 0 for { @@ -1980,7 +1994,9 @@ func reflect_gcbits(x interface{}) []byte { return ret } -// Returns GC type info for object p for testing. +// Returns GC type info for the pointer stored in ep for testing. +// If ep points to the stack, only static live information will be returned +// (i.e. not for objects which are only dynamically live stack objects). func getgcmask(ep interface{}) (mask []byte) { e := *efaceOf(&ep) p := e.data @@ -2037,7 +2053,7 @@ func getgcmask(ep interface{}) (mask []byte) { _g_ := getg() gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0) if frame.fn.valid() { - locals, _ := getStackMap(&frame, nil, false) + locals, _, _ := getStackMap(&frame, nil, false) if locals.n == 0 { return } diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index d0b007f915..e20e92cdf4 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -4,7 +4,10 @@ package runtime -import "unsafe" +import ( + "runtime/internal/atomic" + "unsafe" +) // Per-thread (in Go, per-P) cache for small objects. // No locking needed because it is per-thread (per-P). @@ -42,6 +45,12 @@ type mcache struct { local_largefree uintptr // bytes freed for large objects (>maxsmallsize) local_nlargefree uintptr // number of frees for large objects (>maxsmallsize) local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize) + + // flushGen indicates the sweepgen during which this mcache + // was last flushed. If flushGen != mheap_.sweepgen, the spans + // in this mcache are stale and need to the flushed so they + // can be swept. This is done in acquirep. + flushGen uint32 } // A gclink is a node in a linked list of blocks, like mlink, @@ -76,6 +85,7 @@ var emptymspan mspan func allocmcache() *mcache { lock(&mheap_.lock) c := (*mcache)(mheap_.cachealloc.alloc()) + c.flushGen = mheap_.sweepgen unlock(&mheap_.lock) for i := range c.alloc { c.alloc[i] = &emptymspan @@ -101,21 +111,24 @@ func freemcache(c *mcache) { }) } -// Gets a span that has a free object in it and assigns it -// to be the cached span for the given sizeclass. Returns this span. +// refill acquires a new span of span class spc for c. This span will +// have at least one free object. The current span in c must be full. +// +// Must run in a non-preemptible context since otherwise the owner of +// c could change. func (c *mcache) refill(spc spanClass) { - _g_ := getg() - - _g_.m.locks++ // Return the current cached span to the central lists. s := c.alloc[spc] if uintptr(s.allocCount) != s.nelems { throw("refill of span with free space remaining") } - if s != &emptymspan { - s.incache = false + // Mark this span as no longer cached. + if s.sweepgen != mheap_.sweepgen+3 { + throw("bad sweepgen in refill") + } + atomic.Store(&s.sweepgen, mheap_.sweepgen) } // Get a new cached span from the central lists. @@ -128,8 +141,11 @@ func (c *mcache) refill(spc spanClass) { throw("span has no free space") } + // Indicate that this span is cached and prevent asynchronous + // sweeping in the next sweep phase. + s.sweepgen = mheap_.sweepgen + 3 + c.alloc[spc] = s - _g_.m.locks-- } func (c *mcache) releaseAll() { @@ -144,3 +160,26 @@ func (c *mcache) releaseAll() { c.tiny = 0 c.tinyoffset = 0 } + +// prepareForSweep flushes c if the system has entered a new sweep phase +// since c was populated. This must happen between the sweep phase +// starting and the first allocation from c. +func (c *mcache) prepareForSweep() { + // Alternatively, instead of making sure we do this on every P + // between starting the world and allocating on that P, we + // could leave allocate-black on, allow allocation to continue + // as usual, use a ragged barrier at the beginning of sweep to + // ensure all cached spans are swept, and then disable + // allocate-black. However, with this approach it's difficult + // to avoid spilling mark bits into the *next* GC cycle. + sg := mheap_.sweepgen + if c.flushGen == sg { + return + } else if c.flushGen != sg-2 { + println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg) + throw("bad flushGen") + } + c.releaseAll() + stackcache_clear(c) + atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart +} diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index c1e0b472bc..9ca8e5d222 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -135,7 +135,6 @@ havespan: // heap_live changed. gcController.revise() } - s.incache = true freeByteBase := s.freeindex &^ (64 - 1) whichByte := freeByteBase / 8 // Init alloc bits cache. @@ -150,28 +149,54 @@ havespan: // Return span from an MCache. func (c *mcentral) uncacheSpan(s *mspan) { - lock(&c.lock) - - s.incache = false - if s.allocCount == 0 { throw("uncaching span but s.allocCount == 0") } cap := int32((s.npages << _PageShift) / s.elemsize) n := cap - int32(s.allocCount) + + // cacheSpan updated alloc assuming all objects on s were + // going to be allocated. Adjust for any that weren't. We must + // do this before potentially sweeping the span. if n > 0 { + atomic.Xadd64(&c.nmalloc, -int64(n)) + } + + sg := mheap_.sweepgen + stale := s.sweepgen == sg+1 + if stale { + // Span was cached before sweep began. It's our + // responsibility to sweep it. + // + // Set sweepgen to indicate it's not cached but needs + // sweeping. sweep will set s.sweepgen to indicate s + // is swept. + s.sweepgen = sg - 1 + s.sweep(true) + // sweep may have freed objects, so recompute n. + n = cap - int32(s.allocCount) + } else { + // Indicate that s is no longer cached. + s.sweepgen = sg + } + + if n > 0 { + lock(&c.lock) c.empty.remove(s) c.nonempty.insert(s) - // mCentral_CacheSpan conservatively counted - // unallocated slots in heap_live. Undo this. - atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize)) - // cacheSpan updated alloc assuming all objects on s - // were going to be allocated. Adjust for any that - // weren't. - atomic.Xadd64(&c.nmalloc, -int64(n)) + if !stale { + // mCentral_CacheSpan conservatively counted + // unallocated slots in heap_live. Undo this. + // + // If this span was cached before sweep, then + // heap_live was totally recomputed since + // caching this span, so we don't do this for + // stale spans. + atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize)) + } + unlock(&c.lock) } - unlock(&c.lock) } // freeSpan updates c and s after sweeping s. @@ -183,13 +208,13 @@ func (c *mcentral) uncacheSpan(s *mspan) { // If preserve=true, it does not move s (the caller // must take care of it). func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool { - if s.incache { + if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 { throw("freeSpan given cached span") } s.needzero = 1 if preserve { - // preserve is set only when called from MCentral_CacheSpan above, + // preserve is set only when called from (un)cacheSpan above, // the span must be in the empty list. if !s.inList() { throw("can't preserve unlinked span") diff --git a/src/runtime/mem_darwin.go b/src/runtime/mem_aix.go index 75c59f9cdd..f11f0aba52 100644 --- a/src/runtime/mem_darwin.go +++ b/src/runtime/mem_aix.go @@ -1,26 +1,36 @@ -// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime -import "unsafe" +import ( + "unsafe" +) -// Don't split the stack as this function may be invoked without a valid G, -// which prevents us from allocating more stack. +// Don't split the stack as this method may be invoked without a valid G, which +// prevents us from allocating more stack. //go:nosplit func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { - v, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + p, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANONYMOUS|_MAP_PRIVATE, -1, 0) if err != 0 { + if err == _EACCES { + print("runtime: mmap: access denied\n") + exit(2) + } + if err == _EAGAIN { + print("runtime: mmap: too much locked memory (check 'ulimit -l').\n") + exit(2) + } + //println("sysAlloc failed: ", err) return nil } mSysStatInc(sysStat, n) - return v + return p } func sysUnused(v unsafe.Pointer, n uintptr) { - // Linux's MADV_DONTNEED is like BSD's MADV_FREE. - madvise(v, n, _MADV_FREE) + madvise(v, n, _MADV_DONTNEED) } func sysUsed(v unsafe.Pointer, n uintptr) { @@ -32,27 +42,29 @@ func sysUsed(v unsafe.Pointer, n uintptr) { func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) { mSysStatDec(sysStat, n) munmap(v, n) + } func sysFault(v unsafe.Pointer, n uintptr) { - mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0) + mmap(v, n, _PROT_NONE, _MAP_ANONYMOUS|_MAP_PRIVATE|_MAP_FIXED, -1, 0) } func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer { - p, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0) + p, err := mmap(v, n, _PROT_NONE, _MAP_ANONYMOUS|_MAP_PRIVATE, -1, 0) if err != 0 { return nil } return p } -const ( - _ENOMEM = 12 -) - func sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) { mSysStatInc(sysStat, n) - p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + + // AIX does not allow mapping a range that is already mapped. + // So always unmap first even if it is already unmapped. + munmap(v, n) + p, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANONYMOUS|_MAP_FIXED|_MAP_PRIVATE, -1, 0) + if err == _ENOMEM { throw("runtime: out of memory") } diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index cc70e806ea..13065b61d4 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build dragonfly freebsd nacl netbsd openbsd solaris +// +build darwin dragonfly freebsd nacl netbsd openbsd solaris package runtime diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 7aa48170a1..845f72ded2 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -5,6 +5,7 @@ package runtime import ( + "runtime/internal/atomic" "runtime/internal/sys" "unsafe" ) @@ -34,10 +35,12 @@ func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer { return p } +var adviseUnused = uint32(_MADV_FREE) + func sysUnused(v unsafe.Pointer, n uintptr) { // By default, Linux's "transparent huge page" support will // merge pages into a huge page if there's even a single - // present regular page, undoing the effects of the DONTNEED + // present regular page, undoing the effects of madvise(adviseUnused) // below. On amd64, that means khugepaged can turn a single // 4KB page to 2MB, bloating the process's RSS by as much as // 512X. (See issue #8832 and Linux kernel bug @@ -102,7 +105,13 @@ func sysUnused(v unsafe.Pointer, n uintptr) { throw("unaligned sysUnused") } - madvise(v, n, _MADV_DONTNEED) + advise := atomic.Load(&adviseUnused) + if errno := madvise(v, n, int32(advise)); advise == _MADV_FREE && errno != 0 { + // MADV_FREE was added in Linux 4.5. Fall back to MADV_DONTNEED if it is + // not supported. + atomic.Store(&adviseUnused, _MADV_DONTNEED) + madvise(v, n, _MADV_DONTNEED) + } } func sysUsed(v unsafe.Pointer, n uintptr) { diff --git a/src/runtime/memmove_amd64p32.s b/src/runtime/memmove_amd64p32.s index 8e9fdd14c5..114077311c 100644 --- a/src/runtime/memmove_amd64p32.s +++ b/src/runtime/memmove_amd64p32.s @@ -34,7 +34,7 @@ back: ADDL BX, DI ADDL BX, SI STD - + MOVL BX, CX SHRL $2, CX ANDL $3, BX diff --git a/src/runtime/memmove_arm.s b/src/runtime/memmove_arm.s index 324b21bf7a..8352fb7860 100644 --- a/src/runtime/memmove_arm.s +++ b/src/runtime/memmove_arm.s @@ -138,7 +138,7 @@ _f32loop: CMP TMP, TS BHS _f4tail - MOVM.IA.W (FROM), [R1-R8] + MOVM.IA.W (FROM), [R1-R8] MOVM.IA.W [R1-R8], (TS) B _f32loop diff --git a/src/runtime/memmove_plan9_386.s b/src/runtime/memmove_plan9_386.s index 7ff01940a2..65dec93f6b 100644 --- a/src/runtime/memmove_plan9_386.s +++ b/src/runtime/memmove_plan9_386.s @@ -56,7 +56,7 @@ tail: /* * forward copy loop */ -forward: +forward: MOVL BX, CX SHRL $2, CX ANDL $3, BX diff --git a/src/runtime/memmove_plan9_amd64.s b/src/runtime/memmove_plan9_amd64.s index f18b59f3d2..b729c7c0e7 100644 --- a/src/runtime/memmove_plan9_amd64.s +++ b/src/runtime/memmove_plan9_amd64.s @@ -73,7 +73,7 @@ back: ADDQ BX, CX CMPQ CX, DI JLS forward - + /* * whole thing backwards has * adjusted addresses diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index c95b5ed37f..9dfee5a4dc 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -28,8 +28,7 @@ // b. Sweep any unswept spans. There will only be unswept spans if // this GC cycle was forced before the expected time. // -// 2. GC performs the "mark 1" sub-phase. In this sub-phase, Ps are -// allowed to locally cache parts of the work queue. +// 2. GC performs the mark phase. // // a. Prepare for the mark phase by setting gcphase to _GCmark // (from _GCoff), enabling the write barrier, enabling mutator @@ -54,28 +53,21 @@ // object to black and shading all pointers found in the object // (which in turn may add those pointers to the work queue). // -// 3. Once the global work queue is empty (but local work queue caches -// may still contain work), GC performs the "mark 2" sub-phase. +// e. Because GC work is spread across local caches, GC uses a +// distributed termination algorithm to detect when there are no +// more root marking jobs or grey objects (see gcMarkDone). At this +// point, GC transitions to mark termination. // -// a. GC stops all workers, disables local work queue caches, -// flushes each P's local work queue cache to the global work queue -// cache, and reenables workers. -// -// b. GC again drains the work queue, as in 2d above. -// -// 4. Once the work queue is empty, GC performs mark termination. +// 3. GC performs mark termination. // // a. Stop the world. // // b. Set gcphase to _GCmarktermination, and disable workers and // assists. // -// c. Drain any remaining work from the work queue (typically there -// will be none). +// c. Perform housekeeping like flushing mcaches. // -// d. Perform other housekeeping like flushing mcaches. -// -// 5. GC performs the sweep phase. +// 4. GC performs the sweep phase. // // a. Prepare for the sweep phase by setting gcphase to _GCoff, // setting up sweep state and disabling the write barrier. @@ -86,7 +78,7 @@ // c. GC does concurrent sweeping in the background and in response // to allocation. See description below. // -// 6. When sufficient allocation has taken place, replay the sequence +// 5. When sufficient allocation has taken place, replay the sequence // starting with 1 above. See discussion of GC rate below. // Concurrent sweep. @@ -261,21 +253,6 @@ var writeBarrier struct { // gcphase == _GCmark. var gcBlackenEnabled uint32 -// gcBlackenPromptly indicates that optimizations that may -// hide work from the global work queue should be disabled. -// -// If gcBlackenPromptly is true, per-P gcWork caches should -// be flushed immediately and new objects should be allocated black. -// -// There is a tension between allocating objects white and -// allocating them black. If white and the objects die before being -// marked they can be collected during this GC cycle. On the other -// hand allocating them black will reduce _GCmarktermination latency -// since more work is done in the mark phase. This tension is resolved -// by allocating white until the mark phase is approaching its end and -// then allocating black for the remainder of the mark phase. -var gcBlackenPromptly bool - const ( _GCoff = iota // GC not running; sweeping in background, write barrier disabled _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED @@ -478,6 +455,12 @@ func (c *gcControllerState) startCycle() { c.fractionalUtilizationGoal = 0 } + // In STW mode, we just want dedicated workers. + if debug.gcstoptheworld > 0 { + c.dedicatedMarkWorkersNeeded = int64(gomaxprocs) + c.fractionalUtilizationGoal = 0 + } + // Clear per-P state for _, p := range allp { p.gcAssistTime = 0 @@ -955,32 +938,15 @@ var work struct { markrootNext uint32 // next markroot job markrootJobs uint32 // number of markroot jobs - nproc uint32 - tstart int64 - nwait uint32 - ndone uint32 - alldone note - - // helperDrainBlock indicates that GC mark termination helpers - // should pass gcDrainBlock to gcDrain to block in the - // getfull() barrier. Otherwise, they should pass gcDrainNoBlock. - // - // TODO: This is a temporary fallback to work around races - // that cause early mark termination. - helperDrainBlock bool + nproc uint32 + tstart int64 + nwait uint32 + ndone uint32 // Number of roots of various root types. Set by gcMarkRootPrepare. nFlushCacheRoots int nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int - // markrootDone indicates that roots have been marked at least - // once during the current GC cycle. This is checked by root - // marking operations that have to happen only during the - // first root marking pass, whether that's during the - // concurrent mark phase in current GC or mark termination in - // STW GC. - markrootDone bool - // Each type of GC state transition is protected by a lock. // Since multiple threads can simultaneously detect the state // transition condition, any thread that detects a transition @@ -996,8 +962,7 @@ var work struct { // startSema protects the transition from "off" to mark or // mark termination. startSema uint32 - // markDoneSema protects transitions from mark 1 to mark 2 and - // from mark 2 to mark termination. + // markDoneSema protects transitions from mark to mark termination. markDoneSema uint32 bgMarkReady note // signal background mark worker has started @@ -1087,7 +1052,7 @@ func GC() { // We're now in sweep N or later. Trigger GC cycle N+1, which // will first finish sweep N if necessary and then enter sweep // termination N+1. - gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerCycle, n: n + 1}) + gcStart(gcTrigger{kind: gcTriggerCycle, n: n + 1}) // Wait for mark termination N+1 to complete. gcWaitOnMark(n + 1) @@ -1225,13 +1190,13 @@ func (t gcTrigger) test() bool { return true } -// gcStart transitions the GC from _GCoff to _GCmark (if -// !mode.stwMark) or _GCmarktermination (if mode.stwMark) by -// performing sweep termination and GC initialization. +// gcStart starts the GC. It transitions from _GCoff to _GCmark (if +// debug.gcstoptheworld == 0) or performs all of GC (if +// debug.gcstoptheworld != 0). // // This may return without performing this transition in some cases, // such as when called on a system stack or with locks held. -func gcStart(mode gcMode, trigger gcTrigger) { +func gcStart(trigger gcTrigger) { // Since this is called from malloc and malloc is called in // the guts of a number of libraries that might be holding // locks, don't attempt to start GC in non-preemptible or @@ -1274,12 +1239,11 @@ func gcStart(mode gcMode, trigger gcTrigger) { // We do this after re-checking the transition condition so // that multiple goroutines that detect the heap trigger don't // start multiple STW GCs. - if mode == gcBackgroundMode { - if debug.gcstoptheworld == 1 { - mode = gcForceMode - } else if debug.gcstoptheworld == 2 { - mode = gcForceBlockMode - } + mode := gcBackgroundMode + if debug.gcstoptheworld == 1 { + mode = gcForceMode + } else if debug.gcstoptheworld == 2 { + mode = gcForceBlockMode } // Ok, we're doing it! Stop everybody else @@ -1289,10 +1253,16 @@ func gcStart(mode gcMode, trigger gcTrigger) { traceGCStart() } - if mode == gcBackgroundMode { - gcBgMarkStartWorkers() + // Check that all Ps have finished deferred mcache flushes. + for _, p := range allp { + if fg := atomic.Load(&p.mcache.flushGen); fg != mheap_.sweepgen { + println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen) + throw("p mcache not flushed") + } } + gcBgMarkStartWorkers() + gcResetMarkState() work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs @@ -1321,199 +1291,192 @@ func gcStart(mode gcMode, trigger gcTrigger) { clearpools() work.cycles++ - if mode == gcBackgroundMode { // Do as much work concurrently as possible - gcController.startCycle() - work.heapGoal = memstats.next_gc - // Enter concurrent mark phase and enable - // write barriers. - // - // Because the world is stopped, all Ps will - // observe that write barriers are enabled by - // the time we start the world and begin - // scanning. - // - // Write barriers must be enabled before assists are - // enabled because they must be enabled before - // any non-leaf heap objects are marked. Since - // allocations are blocked until assists can - // happen, we want enable assists as early as - // possible. - setGCPhase(_GCmark) + gcController.startCycle() + work.heapGoal = memstats.next_gc - gcBgMarkPrepare() // Must happen before assist enable. - gcMarkRootPrepare() + // In STW mode, disable scheduling of user Gs. This may also + // disable scheduling of this goroutine, so it may block as + // soon as we start the world again. + if mode != gcBackgroundMode { + schedEnableUser(false) + } - // Mark all active tinyalloc blocks. Since we're - // allocating from these, they need to be black like - // other allocations. The alternative is to blacken - // the tiny block on every allocation from it, which - // would slow down the tiny allocator. - gcMarkTinyAllocs() + // Enter concurrent mark phase and enable + // write barriers. + // + // Because the world is stopped, all Ps will + // observe that write barriers are enabled by + // the time we start the world and begin + // scanning. + // + // Write barriers must be enabled before assists are + // enabled because they must be enabled before + // any non-leaf heap objects are marked. Since + // allocations are blocked until assists can + // happen, we want enable assists as early as + // possible. + setGCPhase(_GCmark) - // At this point all Ps have enabled the write - // barrier, thus maintaining the no white to - // black invariant. Enable mutator assists to - // put back-pressure on fast allocating - // mutators. - atomic.Store(&gcBlackenEnabled, 1) + gcBgMarkPrepare() // Must happen before assist enable. + gcMarkRootPrepare() - // Assists and workers can start the moment we start - // the world. - gcController.markStartTime = now + // Mark all active tinyalloc blocks. Since we're + // allocating from these, they need to be black like + // other allocations. The alternative is to blacken + // the tiny block on every allocation from it, which + // would slow down the tiny allocator. + gcMarkTinyAllocs() - // Concurrent mark. - systemstack(func() { - now = startTheWorldWithSema(trace.enabled) - }) + // At this point all Ps have enabled the write + // barrier, thus maintaining the no white to + // black invariant. Enable mutator assists to + // put back-pressure on fast allocating + // mutators. + atomic.Store(&gcBlackenEnabled, 1) + + // Assists and workers can start the moment we start + // the world. + gcController.markStartTime = now + + // Concurrent mark. + systemstack(func() { + now = startTheWorldWithSema(trace.enabled) work.pauseNS += now - work.pauseStart work.tMark = now - } else { - if trace.enabled { - // Switch to mark termination STW. - traceGCSTWDone() - traceGCSTWStart(0) - } - t := nanotime() - work.tMark, work.tMarkTerm = t, t - work.heapGoal = work.heap0 - - // Perform mark termination. This will restart the world. - gcMarkTermination(memstats.triggerRatio) + }) + // In STW mode, we could block the instant systemstack + // returns, so don't do anything important here. Make sure we + // block rather than returning to user code. + if mode != gcBackgroundMode { + Gosched() } semrelease(&work.startSema) } -// gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2 -// to mark termination. +// gcMarkDoneFlushed counts the number of P's with flushed work. // -// This should be called when all mark work has been drained. In mark -// 1, this includes all root marking jobs, global work buffers, and -// active work buffers in assists and background workers; however, -// work may still be cached in per-P work buffers. In mark 2, per-P -// caches are disabled. +// Ideally this would be a captured local in gcMarkDone, but forEachP +// escapes its callback closure, so it can't capture anything. +// +// This is protected by markDoneSema. +var gcMarkDoneFlushed uint32 + +// gcMarkDone transitions the GC from mark to mark termination if all +// reachable objects have been marked (that is, there are no grey +// objects and can be no more in the future). Otherwise, it flushes +// all local work to the global queues where it can be discovered by +// other workers. +// +// This should be called when all local mark work has been drained and +// there are no remaining workers. Specifically, when +// +// work.nwait == work.nproc && !gcMarkWorkAvailable(p) // // The calling context must be preemptible. // -// Note that it is explicitly okay to have write barriers in this -// function because completion of concurrent mark is best-effort -// anyway. Any work created by write barriers here will be cleaned up -// by mark termination. +// Flushing local work is important because idle Ps may have local +// work queued. This is the only way to make that work visible and +// drive GC to completion. +// +// It is explicitly okay to have write barriers in this function. If +// it does transition to mark termination, then all reachable objects +// have been marked, so the write barrier cannot shade any more +// objects. func gcMarkDone() { -top: + // Ensure only one thread is running the ragged barrier at a + // time. semacquire(&work.markDoneSema) +top: // Re-check transition condition under transition lock. + // + // It's critical that this checks the global work queues are + // empty before performing the ragged barrier. Otherwise, + // there could be global work that a P could take after the P + // has passed the ragged barrier. if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) { semrelease(&work.markDoneSema) return } - // Disallow starting new workers so that any remaining workers - // in the current mark phase will drain out. - // - // TODO(austin): Should dedicated workers keep an eye on this - // and exit gcDrain promptly? - atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff) - prevFractionalGoal := gcController.fractionalUtilizationGoal - gcController.fractionalUtilizationGoal = 0 - - if !gcBlackenPromptly { - // Transition from mark 1 to mark 2. - // - // The global work list is empty, but there can still be work - // sitting in the per-P work caches. - // Flush and disable work caches. - - // Disallow caching workbufs and indicate that we're in mark 2. - gcBlackenPromptly = true - - // Prevent completion of mark 2 until we've flushed - // cached workbufs. - atomic.Xadd(&work.nwait, -1) - - // GC is set up for mark 2. Let Gs blocked on the - // transition lock go while we flush caches. - semrelease(&work.markDoneSema) - - systemstack(func() { - // Flush all currently cached workbufs and - // ensure all Ps see gcBlackenPromptly. This - // also blocks until any remaining mark 1 - // workers have exited their loop so we can - // start new mark 2 workers. - forEachP(func(_p_ *p) { - wbBufFlush1(_p_) - _p_.gcw.dispose() - }) + // Flush all local buffers and collect flushedWork flags. + gcMarkDoneFlushed = 0 + systemstack(func() { + forEachP(func(_p_ *p) { + // Flush the write barrier buffer, since this may add + // work to the gcWork. + wbBufFlush1(_p_) + // Flush the gcWork, since this may create global work + // and set the flushedWork flag. + // + // TODO(austin): Break up these workbufs to + // better distribute work. + _p_.gcw.dispose() + // Collect the flushedWork flag. + if _p_.gcw.flushedWork { + atomic.Xadd(&gcMarkDoneFlushed, 1) + _p_.gcw.flushedWork = false + } }) + }) - // Check that roots are marked. We should be able to - // do this before the forEachP, but based on issue - // #16083 there may be a (harmless) race where we can - // enter mark 2 while some workers are still scanning - // stacks. The forEachP ensures these scans are done. - // - // TODO(austin): Figure out the race and fix this - // properly. - gcMarkRootCheck() - - // Now we can start up mark 2 workers. - atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff) - gcController.fractionalUtilizationGoal = prevFractionalGoal + if gcMarkDoneFlushed != 0 { + // More grey objects were discovered since the + // previous termination check, so there may be more + // work to do. Keep going. It's possible the + // transition condition became true again during the + // ragged barrier, so re-check it. + goto top + } - incnwait := atomic.Xadd(&work.nwait, +1) - if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { - // This loop will make progress because - // gcBlackenPromptly is now true, so it won't - // take this same "if" branch. - goto top - } - } else { - // Transition to mark termination. - now := nanotime() - work.tMarkTerm = now - work.pauseStart = now - getg().m.preemptoff = "gcing" - if trace.enabled { - traceGCSTWStart(0) - } - systemstack(stopTheWorldWithSema) - // The gcphase is _GCmark, it will transition to _GCmarktermination - // below. The important thing is that the wb remains active until - // all marking is complete. This includes writes made by the GC. + // There was no global work, no local work, and no Ps + // communicated work since we took markDoneSema. Therefore + // there are no grey objects and no more objects can be + // shaded. Transition to mark termination. + now := nanotime() + work.tMarkTerm = now + work.pauseStart = now + getg().m.preemptoff = "gcing" + if trace.enabled { + traceGCSTWStart(0) + } + systemstack(stopTheWorldWithSema) + // The gcphase is _GCmark, it will transition to _GCmarktermination + // below. The important thing is that the wb remains active until + // all marking is complete. This includes writes made by the GC. - // Record that one root marking pass has completed. - work.markrootDone = true + // Disable assists and background workers. We must do + // this before waking blocked assists. + atomic.Store(&gcBlackenEnabled, 0) - // Disable assists and background workers. We must do - // this before waking blocked assists. - atomic.Store(&gcBlackenEnabled, 0) + // Wake all blocked assists. These will run when we + // start the world again. + gcWakeAllAssists() - // Wake all blocked assists. These will run when we - // start the world again. - gcWakeAllAssists() + // Likewise, release the transition lock. Blocked + // workers and assists will run when we start the + // world again. + semrelease(&work.markDoneSema) - // Likewise, release the transition lock. Blocked - // workers and assists will run when we start the - // world again. - semrelease(&work.markDoneSema) + // In STW mode, re-enable user goroutines. These will be + // queued to run after we start the world. + schedEnableUser(true) - // endCycle depends on all gcWork cache stats being - // flushed. This is ensured by mark 2. - nextTriggerRatio := gcController.endCycle() + // endCycle depends on all gcWork cache stats being flushed. + // The termination algorithm above ensured that up to + // allocations since the ragged barrier. + nextTriggerRatio := gcController.endCycle() - // Perform mark termination. This will restart the world. - gcMarkTermination(nextTriggerRatio) - } + // Perform mark termination. This will restart the world. + gcMarkTermination(nextTriggerRatio) } func gcMarkTermination(nextTriggerRatio float64) { // World is stopped. // Start marktermination which includes enabling the write barrier. atomic.Store(&gcBlackenEnabled, 0) - gcBlackenPromptly = false setGCPhase(_GCmarktermination) work.heap1 = memstats.heap_live @@ -1546,35 +1509,22 @@ func gcMarkTermination(nextTriggerRatio float64) { systemstack(func() { work.heap2 = work.bytesMarked if debug.gccheckmark > 0 { - // Run a full stop-the-world mark using checkmark bits, - // to check that we didn't forget to mark anything during - // the concurrent mark process. + // Run a full non-parallel, stop-the-world + // mark using checkmark bits, to check that we + // didn't forget to mark anything during the + // concurrent mark process. gcResetMarkState() initCheckmarks() - gcMark(startTime) + gcw := &getg().m.p.ptr().gcw + gcDrain(gcw, 0) + wbBufFlush1(getg().m.p.ptr()) + gcw.dispose() clearCheckmarks() } // marking is complete so we can turn the write barrier off setGCPhase(_GCoff) gcSweep(work.mode) - - if debug.gctrace > 1 { - startTime = nanotime() - // The g stacks have been scanned so - // they have gcscanvalid==true and gcworkdone==true. - // Reset these so that all stacks will be rescanned. - gcResetMarkState() - finishsweep_m() - - // Still in STW but gcphase is _GCoff, reset to _GCmarktermination - // At this point all objects will be found during the gcMark which - // does a complete STW mark and object scan. - setGCPhase(_GCmarktermination) - gcMark(startTime) - setGCPhase(_GCoff) // marking is done, turn off wb. - gcSweep(work.mode) - } }) _g_.m.traceback = 0 @@ -1652,6 +1602,16 @@ func gcMarkTermination(nextTriggerRatio float64) { // Free stack spans. This must be done between GC cycles. systemstack(freeStackSpans) + // Ensure all mcaches are flushed. Each P will flush its own + // mcache before allocating, but idle Ps may not. Since this + // is necessary to sweep all spans, we need to ensure all + // mcaches are flushed before we start the next GC cycle. + systemstack(func() { + forEachP(func(_p_ *p) { + _p_.mcache.prepareForSweep() + }) + }) + // Print gctrace before dropping worldsema. As soon as we drop // worldsema another cycle could start and smash the stats // we're trying to print. @@ -1852,7 +1812,7 @@ func gcBgMarkWorker(_p_ *p) { } // Go back to draining, this time // without preemption. - gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit) + gcDrain(&_p_.gcw, gcDrainFlushBgCredit) case gcMarkWorkerFractionalMode: gcDrain(&_p_.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit) case gcMarkWorkerIdleMode: @@ -1861,16 +1821,6 @@ func gcBgMarkWorker(_p_ *p) { casgstatus(gp, _Gwaiting, _Grunning) }) - // If we are nearing the end of mark, dispose - // of the cache promptly. We must do this - // before signaling that we're no longer - // working so that other workers can't observe - // no workers and no work while we have this - // cached, and before we compute done. - if gcBlackenPromptly { - _p_.gcw.dispose() - } - // Account for time. duration := nanotime() - startTime switch _p_.gcMarkWorkerMode { @@ -1947,50 +1897,11 @@ func gcMark(start_time int64) { } work.tstart = start_time - // Queue root marking jobs. - gcMarkRootPrepare() - - work.nwait = 0 - work.ndone = 0 - work.nproc = uint32(gcprocs()) - - if work.full == 0 && work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots == 0 { - // There's no work on the work queue and no root jobs - // that can produce work, so don't bother entering the - // getfull() barrier. - // - // This will be the situation the vast majority of the - // time after concurrent mark. However, we still need - // a fallback for STW GC and because there are some - // known races that occasionally leave work around for - // mark termination. - // - // We're still hedging our bets here: if we do - // accidentally produce some work, we'll still process - // it, just not necessarily in parallel. - // - // TODO(austin): Fix the races and and remove - // work draining from mark termination so we don't - // need the fallback path. - work.helperDrainBlock = false - } else { - work.helperDrainBlock = true - } - - if work.nproc > 1 { - noteclear(&work.alldone) - helpgc(int32(work.nproc)) - } - - gchelperstart() - - gcw := &getg().m.p.ptr().gcw - if work.helperDrainBlock { - gcDrain(gcw, gcDrainBlock) - } else { - gcDrain(gcw, gcDrainNoBlock) + // Check that there's no marking work remaining. + if work.full != 0 || work.markrootNext < work.markrootJobs { + print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n") + panic("non-empty mark queue after concurrent mark") } - gcw.dispose() if debug.gccheckmark > 0 { // This is expensive when there's a large number of @@ -2001,23 +1912,35 @@ func gcMark(start_time int64) { throw("work.full != 0") } - if work.nproc > 1 { - notesleep(&work.alldone) - } - - // Record that at least one root marking pass has completed. - work.markrootDone = true - - // Double-check that all gcWork caches are empty. This should - // be ensured by mark 2 before we enter mark termination. + // Clear out buffers and double-check that all gcWork caches + // are empty. This should be ensured by gcMarkDone before we + // enter mark termination. + // + // TODO: We could clear out buffers just before mark if this + // has a non-negligible impact on STW time. for _, p := range allp { + // The write barrier may have buffered pointers since + // the gcMarkDone barrier. However, since the barrier + // ensured all reachable objects were marked, all of + // these must be pointers to black objects. Hence we + // can just discard the write barrier buffer. + if debug.gccheckmark > 0 { + // For debugging, flush the buffer and make + // sure it really was all marked. + wbBufFlush1(p) + } else { + p.wbBuf.reset() + } + gcw := &p.gcw if !gcw.empty() { throw("P has cached GC work at end of mark termination") } - if gcw.scanWork != 0 || gcw.bytesMarked != 0 { - throw("P has unflushed stats at end of mark termination") - } + // There may still be cached empty buffers, which we + // need to flush since we're going to free them. Also, + // there may be non-zero stats because we allocated + // black after the gcMarkDone barrier. + gcw.dispose() } cachestats() @@ -2102,7 +2025,6 @@ func gcResetMarkState() { work.bytesMarked = 0 work.initialHeapLive = atomic.Load64(&memstats.heap_live) - work.markrootDone = false } // Hooks for other packages @@ -2149,48 +2071,6 @@ func clearpools() { unlock(&sched.deferlock) } -// gchelper runs mark termination tasks on Ps other than the P -// coordinating mark termination. -// -// The caller is responsible for ensuring that this has a P to run on, -// even though it's running during STW. Because of this, it's allowed -// to have write barriers. -// -//go:yeswritebarrierrec -func gchelper() { - _g_ := getg() - _g_.m.traceback = 2 - gchelperstart() - - // Parallel mark over GC roots and heap - if gcphase == _GCmarktermination { - gcw := &_g_.m.p.ptr().gcw - if work.helperDrainBlock { - gcDrain(gcw, gcDrainBlock) // blocks in getfull - } else { - gcDrain(gcw, gcDrainNoBlock) - } - gcw.dispose() - } - - nproc := atomic.Load(&work.nproc) // work.nproc can change right after we increment work.ndone - if atomic.Xadd(&work.ndone, +1) == nproc-1 { - notewakeup(&work.alldone) - } - _g_.m.traceback = 0 -} - -func gchelperstart() { - _g_ := getg() - - if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc { - throw("gchelperstart: bad m->helpgc") - } - if _g_ != _g_.m.g0 { - throw("gchelper not running on g0 stack") - } -} - // Timing // itoaDiv formats val/(10**dec) into buf. diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 69ff895512..d4dcfb6cb9 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -52,11 +52,7 @@ const ( // //go:nowritebarrier func gcMarkRootPrepare() { - if gcphase == _GCmarktermination { - work.nFlushCacheRoots = int(gomaxprocs) - } else { - work.nFlushCacheRoots = 0 - } + work.nFlushCacheRoots = 0 // Compute how many data and BSS root blocks there are. nBlocks := func(bytes uintptr) int { @@ -66,62 +62,41 @@ func gcMarkRootPrepare() { work.nDataRoots = 0 work.nBSSRoots = 0 - // Only scan globals once per cycle; preferably concurrently. - if !work.markrootDone { - for _, datap := range activeModules() { - nDataRoots := nBlocks(datap.edata - datap.data) - if nDataRoots > work.nDataRoots { - work.nDataRoots = nDataRoots - } + // Scan globals. + for _, datap := range activeModules() { + nDataRoots := nBlocks(datap.edata - datap.data) + if nDataRoots > work.nDataRoots { + work.nDataRoots = nDataRoots } + } - for _, datap := range activeModules() { - nBSSRoots := nBlocks(datap.ebss - datap.bss) - if nBSSRoots > work.nBSSRoots { - work.nBSSRoots = nBSSRoots - } + for _, datap := range activeModules() { + nBSSRoots := nBlocks(datap.ebss - datap.bss) + if nBSSRoots > work.nBSSRoots { + work.nBSSRoots = nBSSRoots } } - if !work.markrootDone { - // On the first markroot, we need to scan span roots. - // In concurrent GC, this happens during concurrent - // mark and we depend on addfinalizer to ensure the - // above invariants for objects that get finalizers - // after concurrent mark. In STW GC, this will happen - // during mark termination. - // - // We're only interested in scanning the in-use spans, - // which will all be swept at this point. More spans - // may be added to this list during concurrent GC, but - // we only care about spans that were allocated before - // this mark phase. - work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks() - - // On the first markroot, we need to scan all Gs. Gs - // may be created after this point, but it's okay that - // we ignore them because they begin life without any - // roots, so there's nothing to scan, and any roots - // they create during the concurrent phase will be - // scanned during mark termination. During mark - // termination, allglen isn't changing, so we'll scan - // all Gs. - work.nStackRoots = int(atomic.Loaduintptr(&allglen)) - } else { - // We've already scanned span roots and kept the scan - // up-to-date during concurrent mark. - work.nSpanRoots = 0 - - // The hybrid barrier ensures that stacks can't - // contain pointers to unmarked objects, so on the - // second markroot, there's no need to scan stacks. - work.nStackRoots = 0 + // Scan span roots for finalizer specials. + // + // We depend on addfinalizer to mark objects that get + // finalizers after root marking. + // + // We're only interested in scanning the in-use spans, + // which will all be swept at this point. More spans + // may be added to this list during concurrent GC, but + // we only care about spans that were allocated before + // this mark phase. + work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks() - if debug.gcrescanstacks > 0 { - // Scan stacks anyway for debugging. - work.nStackRoots = int(atomic.Loaduintptr(&allglen)) - } - } + // Scan stacks. + // + // Gs may be created after this point, but it's okay that we + // ignore them because they begin life without any roots, so + // there's nothing to scan, and any roots they create during + // the concurrent phase will be scanned during mark + // termination. + work.nStackRoots = int(atomic.Loaduintptr(&allglen)) work.markrootNext = 0 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots) @@ -138,19 +113,10 @@ func gcMarkRootCheck() { lock(&allglock) // Check that stacks have been scanned. var gp *g - if gcphase == _GCmarktermination && debug.gcrescanstacks > 0 { - for i := 0; i < len(allgs); i++ { - gp = allgs[i] - if !(gp.gcscandone && gp.gcscanvalid) && readgstatus(gp) != _Gdead { - goto fail - } - } - } else { - for i := 0; i < work.nStackRoots; i++ { - gp = allgs[i] - if !gp.gcscandone { - goto fail - } + for i := 0; i < work.nStackRoots; i++ { + gp = allgs[i] + if !gp.gcscandone { + goto fail } } unlock(&allglock) @@ -201,24 +167,15 @@ func markroot(gcw *gcWork, i uint32) { } case i == fixedRootFinalizers: - // Only do this once per GC cycle since we don't call - // queuefinalizer during marking. - if work.markrootDone { - break - } for fb := allfin; fb != nil; fb = fb.alllink { cnt := uintptr(atomic.Load(&fb.cnt)) - scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw) + scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil) } case i == fixedRootFreeGStacks: - // Only do this once per GC cycle; preferably - // concurrently. - if !work.markrootDone { - // Switch to the system stack so we can call - // stackfree. - systemstack(markrootFreeGStacks) - } + // Switch to the system stack so we can call + // stackfree. + systemstack(markrootFreeGStacks) case baseSpans <= i && i < baseStacks: // mark MSpan.specials @@ -291,7 +248,7 @@ func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) { } // Scan this shard. - scanblock(b, n, ptrmask, gcw) + scanblock(b, n, ptrmask, gcw, nil) } // markrootFreeGStacks frees stacks of dead Gs. @@ -342,10 +299,6 @@ func markrootSpans(gcw *gcWork, shard int) { // TODO(austin): There are several ideas for making this more // efficient in issue #11485. - if work.markrootDone { - throw("markrootSpans during second markroot") - } - sg := mheap_.sweepgen spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard) // Note that work.spans may not include spans that were @@ -358,7 +311,8 @@ func markrootSpans(gcw *gcWork, shard int) { if s.state != mSpanInUse { continue } - if !useCheckmark && s.sweepgen != sg { + // Check that this span was swept (it may be cached or uncached). + if !useCheckmark && !(s.sweepgen == sg || s.sweepgen == sg+3) { // sweepgen was updated (+2) during non-checkmark GC pass print("sweep ", s.sweepgen, " ", sg, "\n") throw("gc: unswept span") @@ -395,7 +349,7 @@ func markrootSpans(gcw *gcWork, shard int) { scanobject(p, gcw) // The special itself is a root. - scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw) + scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil) } unlock(&s.speciallock) @@ -556,11 +510,6 @@ func gcAssistAlloc1(gp *g, scanWork int64) { // will be more cache friendly. gcw := &getg().m.p.ptr().gcw workDone := gcDrainN(gcw, scanWork) - // If we are near the end of the mark phase - // dispose of the gcw. - if gcBlackenPromptly { - gcw.dispose() - } casgstatus(gp, _Gwaiting, _Grunning) @@ -577,8 +526,7 @@ func gcAssistAlloc1(gp *g, scanWork int64) { incnwait := atomic.Xadd(&work.nwait, +1) if incnwait > work.nproc { println("runtime: work.nwait=", incnwait, - "work.nproc=", work.nproc, - "gcBlackenPromptly=", gcBlackenPromptly) + "work.nproc=", work.nproc) throw("work.nwait > work.nproc") } @@ -737,53 +685,140 @@ func scanstack(gp *g, gcw *gcWork) { if gp == getg() { throw("can't scan our own stack") } - mp := gp.m - if mp != nil && mp.helpgc != 0 { - throw("can't scan gchelper stack") - } - // Shrink the stack if not much of it is being used. During - // concurrent GC, we can do this during concurrent mark. - if !work.markrootDone { - shrinkstack(gp) + // Shrink the stack if not much of it is being used. + shrinkstack(gp) + + var state stackScanState + state.stack = gp.stack + + if stackTraceDebug { + println("stack trace goroutine", gp.goid) } // Scan the saved context register. This is effectively a live // register that gets moved back and forth between the // register and sched.ctxt without a write barrier. if gp.sched.ctxt != nil { - scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw) + scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state) } - // Scan the stack. - var cache pcvalueCache + // Scan the stack. Accumulate a list of stack objects. scanframe := func(frame *stkframe, unused unsafe.Pointer) bool { - scanframeworker(frame, &cache, gcw) + scanframeworker(frame, &state, gcw) return true } gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0) tracebackdefers(gp, scanframe, nil) + + // Find and scan all reachable stack objects. + state.buildIndex() + for { + p := state.getPtr() + if p == 0 { + break + } + obj := state.findObject(p) + if obj == nil { + continue + } + t := obj.typ + if t == nil { + // We've already scanned this object. + continue + } + obj.setType(nil) // Don't scan it again. + if stackTraceDebug { + println(" live stkobj at", hex(state.stack.lo+uintptr(obj.off)), "of type", t.string()) + } + gcdata := t.gcdata + var s *mspan + if t.kind&kindGCProg != 0 { + // This path is pretty unlikely, an object large enough + // to have a GC program allocated on the stack. + // We need some space to unpack the program into a straight + // bitmask, which we allocate/free here. + // TODO: it would be nice if there were a way to run a GC + // program without having to store all its bits. We'd have + // to change from a Lempel-Ziv style program to something else. + // Or we can forbid putting objects on stacks if they require + // a gc program (see issue 27447). + s = materializeGCProg(t.ptrdata, gcdata) + gcdata = (*byte)(unsafe.Pointer(s.startAddr)) + } + + scanblock(state.stack.lo+uintptr(obj.off), t.ptrdata, gcdata, gcw, &state) + + if s != nil { + dematerializeGCProg(s) + } + } + + // Deallocate object buffers. + // (Pointer buffers were all deallocated in the loop above.) + for state.head != nil { + x := state.head + state.head = x.next + if stackTraceDebug { + for _, obj := range x.obj[:x.nobj] { + if obj.typ == nil { // reachable + continue + } + println(" dead stkobj at", hex(gp.stack.lo+uintptr(obj.off)), "of type", obj.typ.string()) + // Note: not necessarily really dead - only reachable-from-ptr dead. + } + } + x.nobj = 0 + putempty((*workbuf)(unsafe.Pointer(x))) + } + if state.buf != nil || state.freeBuf != nil { + throw("remaining pointer buffers") + } + gp.gcscanvalid = true } // Scan a stack frame: local variables and function arguments/results. //go:nowritebarrier -func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) { +func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork) { if _DebugGC > 1 && frame.continpc != 0 { print("scanframe ", funcname(frame.fn), "\n") } - locals, args := getStackMap(frame, cache, false) + locals, args, objs := getStackMap(frame, &state.cache, false) // Scan local variables if stack frame has been allocated. if locals.n > 0 { size := uintptr(locals.n) * sys.PtrSize - scanblock(frame.varp-size, size, locals.bytedata, gcw) + scanblock(frame.varp-size, size, locals.bytedata, gcw, state) } // Scan arguments. if args.n > 0 { - scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw) + scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state) + } + + // Add all stack objects to the stack object list. + if frame.varp != 0 { + // varp is 0 for defers, where there are no locals. + // In that case, there can't be a pointer to its args, either. + // (And all args would be scanned above anyway.) + for _, obj := range objs { + off := obj.off + base := frame.varp // locals base pointer + if off >= 0 { + base = frame.argp // arguments and return values base pointer + } + ptr := base + uintptr(off) + if ptr < frame.sp { + // object hasn't been allocated in the frame yet. + continue + } + if stackTraceDebug { + println("stkobj at", hex(ptr), "of type", obj.typ.string()) + } + state.addObject(ptr, obj.typ) + } } } @@ -791,34 +826,26 @@ type gcDrainFlags int const ( gcDrainUntilPreempt gcDrainFlags = 1 << iota - gcDrainNoBlock gcDrainFlushBgCredit gcDrainIdle gcDrainFractional - - // gcDrainBlock means neither gcDrainUntilPreempt or - // gcDrainNoBlock. It is the default, but callers should use - // the constant for documentation purposes. - gcDrainBlock gcDrainFlags = 0 ) // gcDrain scans roots and objects in work buffers, blackening grey -// objects until all roots and work buffers have been drained. +// objects until it is unable to get more work. It may return before +// GC is done; it's the caller's responsibility to balance work from +// other Ps. // // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt -// is set. This implies gcDrainNoBlock. +// is set. // // If flags&gcDrainIdle != 0, gcDrain returns when there is other work -// to do. This implies gcDrainNoBlock. +// to do. // // If flags&gcDrainFractional != 0, gcDrain self-preempts when // pollFractionalWorkerExit() returns true. This implies // gcDrainNoBlock. // -// If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is -// unable to get more work. Otherwise, it will block until all -// blocking calls are blocked in gcDrain. -// // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work // credit to gcController.bgScanCredit every gcCreditSlack units of // scan work. @@ -831,7 +858,6 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { gp := getg().m.curg preemptible := flags&gcDrainUntilPreempt != 0 - blocking := flags&(gcDrainUntilPreempt|gcDrainIdle|gcDrainFractional|gcDrainNoBlock) == 0 flushBgCredit := flags&gcDrainFlushBgCredit != 0 idle := flags&gcDrainIdle != 0 @@ -875,17 +901,19 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { gcw.balance() } - var b uintptr - if blocking { - b = gcw.get() - } else { - b = gcw.tryGetFast() + b := gcw.tryGetFast() + if b == 0 { + b = gcw.tryGet() if b == 0 { + // Flush the write barrier + // buffer; this may create + // more work. + wbBufFlush(nil, 0) b = gcw.tryGet() } } if b == 0 { - // work barrier reached or tryGet failed. + // Unable to get work. break } scanobject(b, gcw) @@ -911,10 +939,6 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { } } - // In blocking mode, write barriers are not allowed after this - // point because we must preserve the condition that the work - // buffers are empty. - done: // Flush remaining scan work credit. if gcw.scanWork > 0 { @@ -963,6 +987,12 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 { b := gcw.tryGetFast() if b == 0 { b = gcw.tryGet() + if b == 0 { + // Flush the write barrier buffer; + // this may create more work. + wbBufFlush(nil, 0) + b = gcw.tryGet() + } } if b == 0 { @@ -1003,8 +1033,9 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 { // This is used to scan non-heap roots, so it does not update // gcw.bytesMarked or gcw.scanWork. // +// If stk != nil, possible stack pointers are also reported to stk.putPtr. //go:nowritebarrier -func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) { +func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) { // Use local copies of original parameters, so that a stack trace // due to one of the throws below shows the original block // base and extent. @@ -1021,10 +1052,12 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) { for j := 0; j < 8 && i < n; j++ { if bits&1 != 0 { // Same work as in scanobject; see comments there. - obj := *(*uintptr)(unsafe.Pointer(b + i)) - if obj != 0 { - if obj, span, objIndex := findObject(obj, b, i); obj != 0 { + p := *(*uintptr)(unsafe.Pointer(b + i)) + if p != 0 { + if obj, span, objIndex := findObject(p, b, i); obj != 0 { greyobject(obj, b, i, span, gcw, objIndex) + } else if stk != nil && p >= stk.stack.lo && p < stk.stack.hi { + stk.putPtr(p) } } } @@ -1142,11 +1175,6 @@ func shade(b uintptr) { if obj, span, objIndex := findObject(b, 0, 0); obj != 0 { gcw := &getg().m.p.ptr().gcw greyobject(obj, 0, 0, span, gcw, objIndex) - if gcphase == _GCmarktermination || gcBlackenPromptly { - // Ps aren't allowed to cache work during mark - // termination. - gcw.dispose() - } } } @@ -1239,7 +1267,7 @@ func gcDumpObject(label string, obj, off uintptr) { skipped := false size := s.elemsize - if s.state == _MSpanManual && size == 0 { + if s.state == mSpanManual && size == 0 { // We're printing something from a stack frame. We // don't know how big it is, so just show up to an // including off. @@ -1276,18 +1304,13 @@ func gcDumpObject(label string, obj, off uintptr) { //go:nowritebarrier //go:nosplit func gcmarknewobject(obj, size, scanSize uintptr) { - if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen. + if useCheckmark { // The world should be stopped so this should not happen. throw("gcmarknewobject called while doing checkmark") } markBitsForAddr(obj).setMarked() gcw := &getg().m.p.ptr().gcw gcw.bytesMarked += uint64(size) gcw.scanWork += int64(scanSize) - if gcBlackenPromptly { - // There shouldn't be anything in the work queue, but - // we still need to flush stats. - gcw.dispose() - } } // gcMarkTinyAllocs greys all active tiny alloc blocks. @@ -1302,9 +1325,6 @@ func gcMarkTinyAllocs() { _, span, objIndex := findObject(c.tiny, 0, 0) gcw := &p.gcw greyobject(c.tiny, 0, 0, span, gcw, objIndex) - if gcBlackenPromptly { - gcw.dispose() - } } } @@ -1335,7 +1355,7 @@ var useCheckmark = false func initCheckmarks() { useCheckmark = true for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout()) } } @@ -1344,7 +1364,7 @@ func initCheckmarks() { func clearCheckmarks() { useCheckmark = false for _, s := range mheap_.allspans { - if s.state == _MSpanInUse { + if s.state == mSpanInUse { heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout()) } } diff --git a/src/runtime/mgcstack.go b/src/runtime/mgcstack.go new file mode 100644 index 0000000000..86e60d4381 --- /dev/null +++ b/src/runtime/mgcstack.go @@ -0,0 +1,330 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Garbage collector: stack objects and stack tracing +// See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing +// Also see issue 22350. + +// Stack tracing solves the problem of determining which parts of the +// stack are live and should be scanned. It runs as part of scanning +// a single goroutine stack. +// +// Normally determining which parts of the stack are live is easy to +// do statically, as user code has explicit references (reads and +// writes) to stack variables. The compiler can do a simple dataflow +// analysis to determine liveness of stack variables at every point in +// the code. See cmd/compile/internal/gc/plive.go for that analysis. +// +// However, when we take the address of a stack variable, determining +// whether that variable is still live is less clear. We can still +// look for static accesses, but accesses through a pointer to the +// variable are difficult in general to track statically. That pointer +// can be passed among functions on the stack, conditionally retained, +// etc. +// +// Instead, we will track pointers to stack variables dynamically. +// All pointers to stack-allocated variables will themselves be on the +// stack somewhere (or in associated locations, like defer records), so +// we can find them all efficiently. +// +// Stack tracing is organized as a mini garbage collection tracing +// pass. The objects in this garbage collection are all the variables +// on the stack whose address is taken, and which themselves contain a +// pointer. We call these variables "stack objects". +// +// We begin by determining all the stack objects on the stack and all +// the statically live pointers that may point into the stack. We then +// process each pointer to see if it points to a stack object. If it +// does, we scan that stack object. It may contain pointers into the +// heap, in which case those pointers are passed to the main garbage +// collection. It may also contain pointers into the stack, in which +// case we add them to our set of stack pointers. +// +// Once we're done processing all the pointers (including the ones we +// added during processing), we've found all the stack objects that +// are live. Any dead stack objects are not scanned and their contents +// will not keep heap objects live. Unlike the main garbage +// collection, we can't sweep the dead stack objects; they live on in +// a moribund state until the stack frame that contains them is +// popped. +// +// A stack can look like this: +// +// +----------+ +// | foo() | +// | +------+ | +// | | A | | <---\ +// | +------+ | | +// | | | +// | +------+ | | +// | | B | | | +// | +------+ | | +// | | | +// +----------+ | +// | bar() | | +// | +------+ | | +// | | C | | <-\ | +// | +----|-+ | | | +// | | | | | +// | +----v-+ | | | +// | | D ---------/ +// | +------+ | | +// | | | +// +----------+ | +// | baz() | | +// | +------+ | | +// | | E -------/ +// | +------+ | +// | ^ | +// | F: --/ | +// | | +// +----------+ +// +// foo() calls bar() calls baz(). Each has a frame on the stack. +// foo() has stack objects A and B. +// bar() has stack objects C and D, with C pointing to D and D pointing to A. +// baz() has a stack object E pointing to C, and a local variable F pointing to E. +// +// Starting from the pointer in local variable F, we will eventually +// scan all of E, C, D, and A (in that order). B is never scanned +// because there is no live pointer to it. If B is also statically +// dead (meaning that foo() never accesses B again after it calls +// bar()), then B's pointers into the heap are not considered live. + +package runtime + +import ( + "runtime/internal/sys" + "unsafe" +) + +const stackTraceDebug = false + +// Buffer for pointers found during stack tracing. +// Must be smaller than or equal to workbuf. +// +//go:notinheap +type stackWorkBuf struct { + stackWorkBufHdr + obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / sys.PtrSize]uintptr +} + +// Header declaration must come after the buf declaration above, because of issue #14620. +// +//go:notinheap +type stackWorkBufHdr struct { + workbufhdr + next *stackWorkBuf // linked list of workbufs + // Note: we could theoretically repurpose lfnode.next as this next pointer. + // It would save 1 word, but that probably isn't worth busting open + // the lfnode API. +} + +// Buffer for stack objects found on a goroutine stack. +// Must be smaller than or equal to workbuf. +// +//go:notinheap +type stackObjectBuf struct { + stackObjectBufHdr + obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject +} + +//go:notinheap +type stackObjectBufHdr struct { + workbufhdr + next *stackObjectBuf +} + +func init() { + if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) { + panic("stackWorkBuf too big") + } + if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) { + panic("stackObjectBuf too big") + } +} + +// A stackObject represents a variable on the stack that has had +// its address taken. +// +//go:notinheap +type stackObject struct { + off uint32 // offset above stack.lo + size uint32 // size of object + typ *_type // type info (for ptr/nonptr bits). nil if object has been scanned. + left *stackObject // objects with lower addresses + right *stackObject // objects with higher addresses +} + +// obj.typ = typ, but with no write barrier. +//go:nowritebarrier +func (obj *stackObject) setType(typ *_type) { + // Types of stack objects are always in read-only memory, not the heap. + // So not using a write barrier is ok. + *(*uintptr)(unsafe.Pointer(&obj.typ)) = uintptr(unsafe.Pointer(typ)) +} + +// A stackScanState keeps track of the state used during the GC walk +// of a goroutine. +// +//go:notinheap +type stackScanState struct { + cache pcvalueCache + + // stack limits + stack stack + + // buf contains the set of possible pointers to stack objects. + // Organized as a LIFO linked list of buffers. + // All buffers except possibly the head buffer are full. + buf *stackWorkBuf + freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis + + // list of stack objects + // Objects are in increasing address order. + head *stackObjectBuf + tail *stackObjectBuf + nobjs int + + // root of binary tree for fast object lookup by address + // Initialized by buildIndex. + root *stackObject +} + +// Add p as a potential pointer to a stack object. +// p must be a stack address. +func (s *stackScanState) putPtr(p uintptr) { + if p < s.stack.lo || p >= s.stack.hi { + throw("address not a stack address") + } + buf := s.buf + if buf == nil { + // Initial setup. + buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) + buf.nobj = 0 + buf.next = nil + s.buf = buf + } else if buf.nobj == len(buf.obj) { + if s.freeBuf != nil { + buf = s.freeBuf + s.freeBuf = nil + } else { + buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) + } + buf.nobj = 0 + buf.next = s.buf + s.buf = buf + } + buf.obj[buf.nobj] = p + buf.nobj++ +} + +// Remove and return a potential pointer to a stack object. +// Returns 0 if there are no more pointers available. +func (s *stackScanState) getPtr() uintptr { + buf := s.buf + if buf == nil { + // Never had any data. + return 0 + } + if buf.nobj == 0 { + if s.freeBuf != nil { + // Free old freeBuf. + putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) + } + // Move buf to the freeBuf. + s.freeBuf = buf + buf = buf.next + s.buf = buf + if buf == nil { + // No more data. + putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) + s.freeBuf = nil + return 0 + } + } + buf.nobj-- + return buf.obj[buf.nobj] +} + +// addObject adds a stack object at addr of type typ to the set of stack objects. +func (s *stackScanState) addObject(addr uintptr, typ *_type) { + x := s.tail + if x == nil { + // initial setup + x = (*stackObjectBuf)(unsafe.Pointer(getempty())) + x.next = nil + s.head = x + s.tail = x + } + if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size { + throw("objects added out of order or overlapping") + } + if x.nobj == len(x.obj) { + // full buffer - allocate a new buffer, add to end of linked list + y := (*stackObjectBuf)(unsafe.Pointer(getempty())) + y.next = nil + x.next = y + s.tail = y + x = y + } + obj := &x.obj[x.nobj] + x.nobj++ + obj.off = uint32(addr - s.stack.lo) + obj.size = uint32(typ.size) + obj.setType(typ) + // obj.left and obj.right will be initalized by buildIndex before use. + s.nobjs++ +} + +// buildIndex initializes s.root to a binary search tree. +// It should be called after all addObject calls but before +// any call of findObject. +func (s *stackScanState) buildIndex() { + s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs) +} + +// Build a binary search tree with the n objects in the list +// x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ... +// Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx]. +// (The first object that was not included in the binary search tree.) +// If n == 0, returns nil, x. +func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) { + if n == 0 { + return nil, x, idx + } + var left, right *stackObject + left, x, idx = binarySearchTree(x, idx, n/2) + root = &x.obj[idx] + idx++ + if idx == len(x.obj) { + x = x.next + idx = 0 + } + right, x, idx = binarySearchTree(x, idx, n-n/2-1) + root.left = left + root.right = right + return root, x, idx +} + +// findObject returns the stack object containing address a, if any. +// Must have called buildIndex previously. +func (s *stackScanState) findObject(a uintptr) *stackObject { + off := uint32(a - s.stack.lo) + obj := s.root + for { + if obj == nil { + return nil + } + if off < obj.off { + obj = obj.left + continue + } + if off >= obj.off+obj.size { + obj = obj.right + continue + } + return obj + } +} diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index c7baa455fe..00950aede2 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -88,10 +88,11 @@ func sweepone() uintptr { } atomic.Xadd(&mheap_.sweepers, +1) - npages := ^uintptr(0) + // Find a span to sweep. + var s *mspan sg := mheap_.sweepgen for { - s := mheap_.sweepSpans[1-sg/2%2].pop() + s = mheap_.sweepSpans[1-sg/2%2].pop() if s == nil { atomic.Store(&mheap_.sweepdone, 1) break @@ -106,9 +107,14 @@ func sweepone() uintptr { } continue } - if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) { - continue + if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) { + break } + } + + // Sweep the span we found. + npages := ^uintptr(0) + if s != nil { npages = s.npages if !s.sweep(false) { // Span is still in-use, so this returned no @@ -116,7 +122,6 @@ func sweepone() uintptr { // move to the swept in-use list. npages = 0 } - break } // Decrement the number of active sweepers and if this is the @@ -156,16 +161,21 @@ func (s *mspan) ensureSwept() { } sg := mheap_.sweepgen - if atomic.Load(&s.sweepgen) == sg { + spangen := atomic.Load(&s.sweepgen) + if spangen == sg || spangen == sg+3 { return } - // The caller must be sure that the span is a MSpanInUse span. + // The caller must be sure that the span is a mSpanInUse span. if atomic.Cas(&s.sweepgen, sg-2, sg-1) { s.sweep(false) return } // unfortunate condition, and we don't have efficient means to wait - for atomic.Load(&s.sweepgen) != sg { + for { + spangen := atomic.Load(&s.sweepgen) + if spangen == sg || spangen == sg+3 { + break + } osyield() } } @@ -339,18 +349,18 @@ func (s *mspan) sweep(preserve bool) bool { // Free large span to heap // NOTE(rsc,dvyukov): The original implementation of efence - // in CL 22060046 used SysFree instead of SysFault, so that + // in CL 22060046 used sysFree instead of sysFault, so that // the operating system would eventually give the memory // back to us again, so that an efence program could run // longer without running out of memory. Unfortunately, - // calling SysFree here without any kind of adjustment of the + // calling sysFree here without any kind of adjustment of the // heap data structures means that when the memory does // come back to us, we have the wrong metadata for it, either in // the MSpan structures or in the garbage collection bitmap. - // Using SysFault here means that the program will run out of + // Using sysFault here means that the program will run out of // memory fairly quickly in efence mode, but at least it won't // have mysterious crashes due to confused memory reuse. - // It should be possible to switch back to SysFree if we also + // It should be possible to switch back to sysFree if we also // implement and then call some kind of MHeap_DeleteSpan. if debug.efence > 0 { s.limit = 0 // prevent mlookup from finding this span diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go index 99771e2e57..c32c5eddd7 100644 --- a/src/runtime/mgcwork.go +++ b/src/runtime/mgcwork.go @@ -46,10 +46,7 @@ func init() { // // (preemption must be disabled) // gcw := &getg().m.p.ptr().gcw -// .. call gcw.put() to produce and gcw.get() to consume .. -// if gcBlackenPromptly { -// gcw.dispose() -// } +// .. call gcw.put() to produce and gcw.tryGet() to consume .. // // It's important that any use of gcWork during the mark phase prevent // the garbage collector from transitioning to mark termination since @@ -83,6 +80,12 @@ type gcWork struct { // Scan work performed on this gcWork. This is aggregated into // gcController by dispose and may also be flushed by callers. scanWork int64 + + // flushedWork indicates that a non-empty work buffer was + // flushed to the global work list since the last gcMarkDone + // termination check. Specifically, this indicates that this + // gcWork may have communicated work to another gcWork. + flushedWork bool } // Most of the methods of gcWork are go:nowritebarrierrec because the @@ -116,6 +119,7 @@ func (w *gcWork) put(obj uintptr) { wbuf = w.wbuf1 if wbuf.nobj == len(wbuf.obj) { putfull(wbuf) + w.flushedWork = true wbuf = getempty() w.wbuf1 = wbuf flushed = true @@ -169,6 +173,7 @@ func (w *gcWork) putBatch(obj []uintptr) { for len(obj) > 0 { for wbuf.nobj == len(wbuf.obj) { putfull(wbuf) + w.flushedWork = true w.wbuf1, w.wbuf2 = w.wbuf2, getempty() wbuf = w.wbuf1 flushed = true @@ -231,37 +236,6 @@ func (w *gcWork) tryGetFast() uintptr { return wbuf.obj[wbuf.nobj] } -// get dequeues a pointer for the garbage collector to trace, blocking -// if necessary to ensure all pointers from all queues and caches have -// been retrieved. get returns 0 if there are no pointers remaining. -//go:nowritebarrierrec -func (w *gcWork) get() uintptr { - wbuf := w.wbuf1 - if wbuf == nil { - w.init() - wbuf = w.wbuf1 - // wbuf is empty at this point. - } - if wbuf.nobj == 0 { - w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1 - wbuf = w.wbuf1 - if wbuf.nobj == 0 { - owbuf := wbuf - wbuf = getfull() - if wbuf == nil { - return 0 - } - putempty(owbuf) - w.wbuf1 = wbuf - } - } - - // TODO: This might be a good place to add prefetch code - - wbuf.nobj-- - return wbuf.obj[wbuf.nobj] -} - // dispose returns any cached pointers to the global queue. // The buffers are being put on the full queue so that the // write barriers will not simply reacquire them before the @@ -275,6 +249,7 @@ func (w *gcWork) dispose() { putempty(wbuf) } else { putfull(wbuf) + w.flushedWork = true } w.wbuf1 = nil @@ -283,6 +258,7 @@ func (w *gcWork) dispose() { putempty(wbuf) } else { putfull(wbuf) + w.flushedWork = true } w.wbuf2 = nil } @@ -309,9 +285,11 @@ func (w *gcWork) balance() { } if wbuf := w.wbuf2; wbuf.nobj != 0 { putfull(wbuf) + w.flushedWork = true w.wbuf2 = getempty() } else if wbuf := w.wbuf1; wbuf.nobj > 4 { w.wbuf1 = handoff(wbuf) + w.flushedWork = true // handoff did putfull } else { return } @@ -440,61 +418,6 @@ func trygetfull() *workbuf { return b } -// Get a full work buffer off the work.full list. -// If nothing is available wait until all the other gc helpers have -// finished and then return nil. -// getfull acts as a barrier for work.nproc helpers. As long as one -// gchelper is actively marking objects it -// may create a workbuffer that the other helpers can work on. -// The for loop either exits when a work buffer is found -// or when _all_ of the work.nproc GC helpers are in the loop -// looking for work and thus not capable of creating new work. -// This is in fact the termination condition for the STW mark -// phase. -//go:nowritebarrier -func getfull() *workbuf { - b := (*workbuf)(work.full.pop()) - if b != nil { - b.checknonempty() - return b - } - - incnwait := atomic.Xadd(&work.nwait, +1) - if incnwait > work.nproc { - println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc) - throw("work.nwait > work.nproc") - } - for i := 0; ; i++ { - if work.full != 0 { - decnwait := atomic.Xadd(&work.nwait, -1) - if decnwait == work.nproc { - println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) - throw("work.nwait > work.nproc") - } - b = (*workbuf)(work.full.pop()) - if b != nil { - b.checknonempty() - return b - } - incnwait := atomic.Xadd(&work.nwait, +1) - if incnwait > work.nproc { - println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc) - throw("work.nwait > work.nproc") - } - } - if work.nwait == work.nproc && work.markrootNext >= work.markrootJobs { - return nil - } - if i < 10 { - procyield(20) - } else if i < 20 { - osyield() - } else { - usleep(100) - } - } -} - //go:nowritebarrier func handoff(b *workbuf) *workbuf { // Make new buffer with half of b's pointers. diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 00ecfa2d66..7a11bdc058 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -82,7 +82,7 @@ type mheap struct { // accounting for current progress. If we could only adjust // the slope, it would create a discontinuity in debt if any // progress has already been made. - pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock + pagesInUse uint64 // pages of spans in stats mSpanInUse; R/W with mheap.lock pagesSwept uint64 // pages swept this cycle; updated atomically pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without @@ -199,18 +199,18 @@ type arenaHint struct { // An MSpan is a run of pages. // -// When a MSpan is in the heap free list, state == MSpanFree +// When a MSpan is in the heap free list, state == mSpanFree // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. // -// When a MSpan is allocated, state == MSpanInUse or MSpanManual +// When a MSpan is allocated, state == mSpanInUse or mSpanManual // and heapmap(i) == span for all s->start <= i < s->start+s->npages. // Every MSpan is in one doubly-linked list, // either one of the MHeap's free lists or one of the // MCentral's span lists. -// An MSpan representing actual memory has state _MSpanInUse, -// _MSpanManual, or _MSpanFree. Transitions between these states are +// An MSpan representing actual memory has state mSpanInUse, +// mSpanManual, or mSpanFree. Transitions between these states are // constrained as follows: // // * A span may transition from free to in-use or manual during any GC @@ -226,19 +226,19 @@ type arenaHint struct { type mSpanState uint8 const ( - _MSpanDead mSpanState = iota - _MSpanInUse // allocated for garbage collected heap - _MSpanManual // allocated for manual management (e.g., stack allocator) - _MSpanFree + mSpanDead mSpanState = iota + mSpanInUse // allocated for garbage collected heap + mSpanManual // allocated for manual management (e.g., stack allocator) + mSpanFree ) // mSpanStateNames are the names of the span states, indexed by // mSpanState. var mSpanStateNames = []string{ - "_MSpanDead", - "_MSpanInUse", - "_MSpanManual", - "_MSpanFree", + "mSpanDead", + "mSpanInUse", + "mSpanManual", + "mSpanFree", } // mSpanList heads a linked list of spans. @@ -258,7 +258,7 @@ type mspan struct { startAddr uintptr // address of first byte of span aka s.base() npages uintptr // number of pages in span - manualFreeList gclinkptr // list of free objects in _MSpanManual spans + manualFreeList gclinkptr // list of free objects in mSpanManual spans // freeindex is the slot index between 0 and nelems at which to begin scanning // for the next free object in this span. @@ -317,6 +317,8 @@ type mspan struct { // if sweepgen == h->sweepgen - 2, the span needs sweeping // if sweepgen == h->sweepgen - 1, the span is currently being swept // if sweepgen == h->sweepgen, the span is swept and ready to use + // if sweepgen == h->sweepgen + 1, the span was cached before sweep began and is still cached, and needs sweeping + // if sweepgen == h->sweepgen + 3, the span was swept and then cached and is still cached // h->sweepgen is incremented by 2 after every GC sweepgen uint32 @@ -324,7 +326,6 @@ type mspan struct { baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base allocCount uint16 // number of allocated objects spanclass spanClass // size class and noscan (uint8) - incache bool // being used by an mcache state mSpanState // mspaninuse etc needzero uint8 // needs to be zeroed before allocation divShift uint8 // for divide by elemsize - divMagic.shift @@ -458,7 +459,7 @@ func (i arenaIdx) l2() uint { } // inheap reports whether b is a pointer into a (potentially dead) heap object. -// It returns false for pointers into _MSpanManual spans. +// It returns false for pointers into mSpanManual spans. // Non-preemptible because it is used by write barriers. //go:nowritebarrier //go:nosplit @@ -477,7 +478,7 @@ func inHeapOrStack(b uintptr) bool { return false } switch s.state { - case mSpanInUse, _MSpanManual: + case mSpanInUse, mSpanManual: return b < s.limit default: return false @@ -696,7 +697,7 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan { // able to map interior pointer to containing span. atomic.Store(&s.sweepgen, h.sweepgen) h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list. - s.state = _MSpanInUse + s.state = mSpanInUse s.allocCount = 0 s.spanclass = spanclass if sizeclass := spanclass.sizeclass(); sizeclass == 0 { @@ -788,7 +789,7 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { lock(&h.lock) s := h.allocSpanLocked(npage, stat) if s != nil { - s.state = _MSpanManual + s.state = mSpanManual s.manualFreeList = 0 s.allocCount = 0 s.spanclass = 0 @@ -829,7 +830,7 @@ func (h *mheap) setSpans(base, npage uintptr, s *mspan) { // Allocates a span of the given size. h must be locked. // The returned span has been removed from the -// free list, but its state is still MSpanFree. +// free list, but its state is still mSpanFree. func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { var list *mSpanList var s *mspan @@ -857,7 +858,7 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan { HaveSpan: // Mark span in use. - if s.state != _MSpanFree { + if s.state != mSpanFree { throw("MHeap_AllocLocked - MSpan not free") } if s.npages < npage { @@ -878,10 +879,10 @@ HaveSpan: h.setSpan(t.base(), t) h.setSpan(t.base()+t.npages*pageSize-1, t) t.needzero = s.needzero - s.state = _MSpanManual // prevent coalescing with s - t.state = _MSpanManual + s.state = mSpanManual // prevent coalescing with s + t.state = mSpanManual h.freeSpanLocked(t, false, false, s.unusedsince) - s.state = _MSpanFree + s.state = mSpanFree } s.unusedsince = 0 @@ -930,7 +931,7 @@ func (h *mheap) grow(npage uintptr) bool { s.init(uintptr(v), size/pageSize) h.setSpans(s.base(), s.npages, s) atomic.Store(&s.sweepgen, h.sweepgen) - s.state = _MSpanInUse + s.state = mSpanInUse h.pagesInUse += uint64(s.npages) h.freeSpanLocked(s, false, true, 0) return true @@ -986,11 +987,11 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) { // s must be on a busy list (h.busy or h.busylarge) or unlinked. func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) { switch s.state { - case _MSpanManual: + case mSpanManual: if s.allocCount != 0 { throw("MHeap_FreeSpanLocked - invalid stack free") } - case _MSpanInUse: + case mSpanInUse: if s.allocCount != 0 || s.sweepgen != h.sweepgen { print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") throw("MHeap_FreeSpanLocked - invalid free") @@ -1006,7 +1007,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i if acctidle { memstats.heap_idle += uint64(s.npages << _PageShift) } - s.state = _MSpanFree + s.state = mSpanFree if s.inList() { h.busyList(s.npages).remove(s) } @@ -1020,7 +1021,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i s.npreleased = 0 // Coalesce with earlier, later spans. - if before := spanOf(s.base() - 1); before != nil && before.state == _MSpanFree { + if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree { // Now adjust s. s.startAddr = before.startAddr s.npages += before.npages @@ -1035,12 +1036,12 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i } else { h.freeList(before.npages).remove(before) } - before.state = _MSpanDead + before.state = mSpanDead h.spanalloc.free(unsafe.Pointer(before)) } // Now check to see if next (greater addresses) span is free and can be coalesced. - if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == _MSpanFree { + if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree { s.npages += after.npages s.npreleased += after.npreleased s.needzero |= after.needzero @@ -1050,7 +1051,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i } else { h.freeList(after.npages).remove(after) } - after.state = _MSpanDead + after.state = mSpanDead h.spanalloc.free(unsafe.Pointer(after)) } @@ -1185,9 +1186,8 @@ func (span *mspan) init(base uintptr, npages uintptr) { span.npages = npages span.allocCount = 0 span.spanclass = 0 - span.incache = false span.elemsize = 0 - span.state = _MSpanDead + span.state = mSpanDead span.unusedsince = 0 span.npreleased = 0 span.speciallock.key = 0 @@ -1437,10 +1437,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p scanobject(base, gcw) // Mark the finalizer itself, since the // special isn't part of the GC'd heap. - scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw) - if gcBlackenPromptly { - gcw.dispose() - } + scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil) releasem(mp) } return true diff --git a/src/runtime/mmap.go b/src/runtime/mmap.go index fe09e7029e..2868f3fd4e 100644 --- a/src/runtime/mmap.go +++ b/src/runtime/mmap.go @@ -10,6 +10,7 @@ // +build !linux !arm64 // +build !js // +build !darwin +// +build !aix package runtime diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 43e4810d97..2bd41b650f 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -723,7 +723,7 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { isOK := func(gp1 *g) bool { // Checking isSystemGoroutine here makes GoroutineProfile // consistent with both NumGoroutine and Stack. - return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1) + return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false) } stopTheWorld("profile") diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index f67d05414d..1bd6566052 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -38,7 +38,7 @@ type mstats struct { heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above) heap_sys uint64 // virtual address space obtained from system for GC'd heap heap_idle uint64 // bytes in idle spans - heap_inuse uint64 // bytes in _MSpanInUse spans + heap_inuse uint64 // bytes in mSpanInUse spans heap_released uint64 // bytes released to the os heap_objects uint64 // total number of allocated objects diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go index 4df16d55b8..f35f7286ac 100644 --- a/src/runtime/mwbbuf.go +++ b/src/runtime/mwbbuf.go @@ -79,7 +79,7 @@ const ( func (b *wbBuf) reset() { start := uintptr(unsafe.Pointer(&b.buf[0])) b.next = start - if gcBlackenPromptly || writeBarrier.cgo { + if writeBarrier.cgo { // Effectively disable the buffer by forcing a flush // on every barrier. b.end = uintptr(unsafe.Pointer(&b.buf[wbBufEntryPointers])) @@ -107,6 +107,11 @@ func (b *wbBuf) discard() { b.next = uintptr(unsafe.Pointer(&b.buf[0])) } +// empty returns true if b contains no pointers. +func (b *wbBuf) empty() bool { + return b.next == uintptr(unsafe.Pointer(&b.buf[0])) +} + // putFast adds old and new to the write barrier buffer and returns // false if a flush is necessary. Callers should use this as: // @@ -270,9 +275,4 @@ func wbBufFlush1(_p_ *p) { // Enqueue the greyed objects. gcw.putBatch(ptrs[:pos]) - if gcphase == _GCmarktermination || gcBlackenPromptly { - // Ps aren't allowed to cache work during mark - // termination. - gcw.dispose() - } } diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go index f9c422650a..da822a7308 100644 --- a/src/runtime/netpoll.go +++ b/src/runtime/netpoll.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows +// +build aix darwin dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris windows package runtime @@ -166,8 +166,8 @@ func poll_runtime_pollWait(pd *pollDesc, mode int) int { if err != 0 { return err } - // As for now only Solaris uses level-triggered IO. - if GOOS == "solaris" { + // As for now only Solaris and AIX use level-triggered IO. + if GOOS == "solaris" || GOOS == "aix" { netpollarm(pd, mode) } for !netpollblock(pd, int32(mode), false) { diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go new file mode 100644 index 0000000000..1e886dae94 --- /dev/null +++ b/src/runtime/netpoll_aix.go @@ -0,0 +1,247 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// This is based on the former libgo/runtime/netpoll_select.c implementation +// except that it uses poll instead of select and is written in Go. +// It's also based on Solaris implementation for the arming mechanisms + +//go:cgo_import_dynamic libc_poll poll "libc.a/shr_64.o" +//go:linkname libc_poll libc_poll + +var libc_poll libFunc + +//go:nosplit +func poll(pfds *pollfd, npfds uintptr, timeout uintptr) (int32, int32) { + r, err := syscall3(&libc_poll, uintptr(unsafe.Pointer(pfds)), npfds, timeout) + return int32(r), int32(err) +} + +//go:nosplit +func fcntl(fd, cmd int32, arg uintptr) int32 { + r, _ := syscall3(&libc_fcntl, uintptr(fd), uintptr(cmd), arg) + return int32(r) +} + +// pollfd represents the poll structure for AIX operating system. +type pollfd struct { + fd int32 + events int16 + revents int16 +} + +const _POLLIN = 0x0001 +const _POLLOUT = 0x0002 +const _POLLHUP = 0x2000 +const _POLLERR = 0x4000 +const _O_NONBLOCK = 0x4 + +var ( + pfds []pollfd + pds []*pollDesc + mtxpoll mutex + mtxset mutex + rdwake int32 + wrwake int32 + pendingUpdates int32 +) + +const pollVerbose = false + +func netpollinit() { + var p [2]int32 + + // Create the pipe we use to wakeup poll. + if err := pipe(&p[0]); err < 0 { + throw("netpollinit: failed to create pipe") + } + rdwake = p[0] + wrwake = p[1] + + fl := uintptr(fcntl(rdwake, _F_GETFL, 0)) + fcntl(rdwake, _F_SETFL, fl|_O_NONBLOCK) + fcntl(rdwake, _F_SETFD, _FD_CLOEXEC) + + fl = uintptr(fcntl(wrwake, _F_GETFL, 0)) + fcntl(wrwake, _F_SETFL, fl|_O_NONBLOCK) + fcntl(wrwake, _F_SETFD, _FD_CLOEXEC) + + // Pre-allocate array of pollfd structures for poll. + if pollVerbose { + println("*** allocating") + } + pfds = make([]pollfd, 1, 128) + if pollVerbose { + println("*** allocating done", &pfds[0]) + } + + // Poll the read side of the pipe. + pfds[0].fd = rdwake + pfds[0].events = _POLLIN + + pds = make([]*pollDesc, 1, 128) + pds[0] = nil +} + +func netpolldescriptor() uintptr { + // Both fd must be returned + if rdwake > 0xFFFF || wrwake > 0xFFFF { + throw("netpolldescriptor: invalid fd number") + } + return uintptr(rdwake<<16 | wrwake) +} + +// netpollwakeup writes on wrwake to wakeup poll before any changes. +func netpollwakeup() { + if pendingUpdates == 0 { + pendingUpdates = 1 + if pollVerbose { + println("*** writing 1 byte") + } + b := [1]byte{0} + write(uintptr(wrwake), unsafe.Pointer(&b[0]), 1) + } +} + +func netpollopen(fd uintptr, pd *pollDesc) int32 { + if pollVerbose { + println("*** netpollopen", fd) + } + lock(&mtxpoll) + netpollwakeup() + + lock(&mtxset) + unlock(&mtxpoll) + + pd.user = uint32(len(pfds)) + pfds = append(pfds, pollfd{fd: int32(fd)}) + pds = append(pds, pd) + unlock(&mtxset) + return 0 +} + +func netpollclose(fd uintptr) int32 { + if pollVerbose { + println("*** netpollclose", fd) + } + lock(&mtxpoll) + netpollwakeup() + + lock(&mtxset) + unlock(&mtxpoll) + + for i := 0; i < len(pfds); i++ { + if pfds[i].fd == int32(fd) { + pfds[i] = pfds[len(pfds)-1] + pfds = pfds[:len(pfds)-1] + + pds[i] = pds[len(pds)-1] + pds[i].user = uint32(i) + pds = pds[:len(pds)-1] + break + } + } + unlock(&mtxset) + return 0 +} + +func netpollarm(pd *pollDesc, mode int) { + if pollVerbose { + println("*** netpollarm", pd.fd, mode) + } + lock(&mtxpoll) + netpollwakeup() + + lock(&mtxset) + unlock(&mtxpoll) + + switch mode { + case 'r': + pfds[pd.user].events |= _POLLIN + case 'w': + pfds[pd.user].events |= _POLLOUT + } + unlock(&mtxset) +} + +//go:nowritebarrierrec +func netpoll(block bool) gList { + timeout := ^uintptr(0) + if !block { + timeout = 0 + return gList{} + } + if pollVerbose { + println("*** netpoll", block) + } +retry: + lock(&mtxpoll) + lock(&mtxset) + pendingUpdates = 0 + unlock(&mtxpoll) + + if pollVerbose { + println("*** netpoll before poll") + } + n, e := poll(&pfds[0], uintptr(len(pfds)), timeout) + if pollVerbose { + println("*** netpoll after poll", n) + } + if n < 0 { + if e != _EINTR { + println("errno=", e, " len(pfds)=", len(pfds)) + throw("poll failed") + } + if pollVerbose { + println("*** poll failed") + } + unlock(&mtxset) + goto retry + } + // Check if some descriptors need to be changed + if n != 0 && pfds[0].revents&(_POLLIN|_POLLHUP|_POLLERR) != 0 { + var b [1]byte + for read(rdwake, unsafe.Pointer(&b[0]), 1) == 1 { + if pollVerbose { + println("*** read 1 byte from pipe") + } + } + // Do not look at the other fds in this case as the mode may have changed + // XXX only additions of flags are made, so maybe it is ok + unlock(&mtxset) + goto retry + } + var toRun gList + for i := 0; i < len(pfds) && n > 0; i++ { + pfd := &pfds[i] + + var mode int32 + if pfd.revents&(_POLLIN|_POLLHUP|_POLLERR) != 0 { + mode += 'r' + pfd.events &= ^_POLLIN + } + if pfd.revents&(_POLLOUT|_POLLHUP|_POLLERR) != 0 { + mode += 'w' + pfd.events &= ^_POLLOUT + } + if mode != 0 { + if pollVerbose { + println("*** netpollready i=", i, "revents=", pfd.revents, "events=", pfd.events, "pd=", pds[i]) + } + netpollready(&toRun, pds[i], mode) + n-- + } + } + unlock(&mtxset) + if block && toRun.empty() { + goto retry + } + if pollVerbose { + println("*** netpoll returning end") + } + return toRun +} diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go new file mode 100644 index 0000000000..9e26ce23fc --- /dev/null +++ b/src/runtime/os2_aix.go @@ -0,0 +1,479 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains main runtime AIX syscalls. +// Pollset syscalls are in netpoll_aix.go. +// The implementation is based on Solaris and Windows. +// Each syscall is made by calling its libc symbol using asmcgocall and asmsyscall6 +// asssembly functions. + +package runtime + +import ( + "unsafe" +) + +// Symbols imported for __start function. + +//go:cgo_import_dynamic libc___n_pthreads __n_pthreads "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libc___mod_init __mod_init "libc.a/shr_64.o" +//go:linkname libc___n_pthreads libc___n_pthread +//go:linkname libc___mod_init libc___mod_init + +var ( + libc___n_pthread, + libc___mod_init libFunc +) + +// Syscalls + +//go:cgo_import_dynamic libc__Errno _Errno "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_close close "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_exit exit "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_getpid getpid "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_kill kill "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_madvise madvise "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_malloc malloc "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_mmap mmap "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_munmap munmap "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_open open "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_pipe pipe "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_raise raise "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_read read "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sem_init sem_init "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sem_post sem_post "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sem_timedwait sem_timedwait "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sem_wait sem_wait "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_setitimer setitimer "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sigaction sigaction "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sigaltstack sigaltstack "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_sysconf sysconf "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_usleep usleep "libc.a/shr_64.o" +//go:cgo_import_dynamic libc_write write "libc.a/shr_64.o" + +//go:cgo_import_dynamic libpthread___pth_init __pth_init "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_destroy pthread_attr_destroy "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_init pthread_attr_init "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_getstacksize pthread_attr_getstacksize "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_setstacksize pthread_attr_setstacksize "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_setdetachstate pthread_attr_setdetachstate "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_attr_setstackaddr pthread_attr_setstackaddr "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_create pthread_create "libpthread.a/shr_xpg5_64.o" +//go:cgo_import_dynamic libpthread_sigthreadmask sigthreadmask "libpthread.a/shr_xpg5_64.o" + +//go:linkname libc__Errno libc__Errno +//go:linkname libc_clock_gettime libc_clock_gettime +//go:linkname libc_close libc_close +//go:linkname libc_exit libc_exit +//go:linkname libc_getpid libc_getpid +//go:linkname libc_kill libc_kill +//go:linkname libc_madvise libc_madvise +//go:linkname libc_malloc libc_malloc +//go:linkname libc_mmap libc_mmap +//go:linkname libc_munmap libc_munmap +//go:linkname libc_open libc_open +//go:linkname libc_pipe libc_pipe +//go:linkname libc_raise libc_raise +//go:linkname libc_read libc_read +//go:linkname libc_sched_yield libc_sched_yield +//go:linkname libc_sem_init libc_sem_init +//go:linkname libc_sem_post libc_sem_post +//go:linkname libc_sem_timedwait libc_sem_timedwait +//go:linkname libc_sem_wait libc_sem_wait +//go:linkname libc_setitimer libc_setitimer +//go:linkname libc_sigaction libc_sigaction +//go:linkname libc_sigaltstack libc_sigaltstack +//go:linkname libc_sysconf libc_sysconf +//go:linkname libc_usleep libc_usleep +//go:linkname libc_write libc_write + +//go:linkname libpthread___pth_init libpthread___pth_init +//go:linkname libpthread_attr_destroy libpthread_attr_destroy +//go:linkname libpthread_attr_init libpthread_attr_init +//go:linkname libpthread_attr_getstacksize libpthread_attr_getstacksize +//go:linkname libpthread_attr_setstacksize libpthread_attr_setstacksize +//go:linkname libpthread_attr_setdetachstate libpthread_attr_setdetachstate +//go:linkname libpthread_attr_setstackaddr libpthread_attr_setstackaddr +//go:linkname libpthread_create libpthread_create +//go:linkname libpthread_sigthreadmask libpthread_sigthreadmask + +var ( + //libc + libc__Errno, + libc_clock_gettime, + libc_close, + libc_exit, + libc_getpid, + libc_kill, + libc_madvise, + libc_malloc, + libc_mmap, + libc_munmap, + libc_open, + libc_pipe, + libc_raise, + libc_read, + libc_sched_yield, + libc_sem_init, + libc_sem_post, + libc_sem_timedwait, + libc_sem_wait, + libc_setitimer, + libc_sigaction, + libc_sigaltstack, + libc_sysconf, + libc_usleep, + libc_write, + //libpthread + libpthread___pth_init, + libpthread_attr_destroy, + libpthread_attr_init, + libpthread_attr_getstacksize, + libpthread_attr_setstacksize, + libpthread_attr_setdetachstate, + libpthread_attr_setstackaddr, + libpthread_create, + libpthread_sigthreadmask libFunc +) + +type libFunc uintptr + +// asmsyscall6 calls the libc symbol using a C convention. +// It's defined in sys_aix_ppc64.go. +var asmsyscall6 libFunc + +//go:nowritebarrier +//go:nosplit +func syscall0(fn *libFunc) (r, err uintptr) { + c := &getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 0 + c.args = uintptr(noescape(unsafe.Pointer(&fn))) // it's unused but must be non-nil, otherwise crashes + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall1(fn *libFunc, a0 uintptr) (r, err uintptr) { + c := &getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 1 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall2(fn *libFunc, a0, a1 uintptr) (r, err uintptr) { + c := &getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 2 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall3(fn *libFunc, a0, a1, a2 uintptr) (r, err uintptr) { + c := &getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 3 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall4(fn *libFunc, a0, a1, a2, a3 uintptr) (r, err uintptr) { + c := &getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 4 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall5(fn *libFunc, a0, a1, a2, a3, a4 uintptr) (r, err uintptr) { + c := &getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 5 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + return c.r1, c.err +} + +//go:nowritebarrier +//go:nosplit +func syscall6(fn *libFunc, a0, a1, a2, a3, a4, a5 uintptr) (r, err uintptr) { + c := &getg().m.libcall + c.fn = uintptr(unsafe.Pointer(fn)) + c.n = 6 + c.args = uintptr(noescape(unsafe.Pointer(&a0))) + + asmcgocall(unsafe.Pointer(&asmsyscall6), unsafe.Pointer(c)) + + return c.r1, c.err +} + +//go:nosplit +func exit(code int32) { + syscall1(&libc_exit, uintptr(code)) +} + +//go:nosplit +func write(fd uintptr, p unsafe.Pointer, n int32) int32 { + r, _ := syscall3(&libc_write, uintptr(fd), uintptr(p), uintptr(n)) + return int32(r) + +} + +//go:nosplit +func read(fd int32, p unsafe.Pointer, n int32) int32 { + r, _ := syscall3(&libc_read, uintptr(fd), uintptr(p), uintptr(n)) + return int32(r) +} + +//go:nosplit +func open(name *byte, mode, perm int32) int32 { + r, _ := syscall3(&libc_open, uintptr(unsafe.Pointer(name)), uintptr(mode), uintptr(perm)) + return int32(r) +} + +//go:nosplit +func closefd(fd int32) int32 { + r, _ := syscall1(&libc_close, uintptr(fd)) + return int32(r) +} + +//go:nosplit +func pipe(fd *int32) int32 { + r, _ := syscall1(&libc_pipe, uintptr(unsafe.Pointer(fd))) + return int32(r) +} + +// mmap calls the mmap system call. +// We only pass the lower 32 bits of file offset to the +// assembly routine; the higher bits (if required), should be provided +// by the assembly routine as 0. +// The err result is an OS error code such as ENOMEM. +//go:nosplit +func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int) { + r, err0 := syscall6(&libc_mmap, uintptr(addr), uintptr(n), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(off)) + return unsafe.Pointer(r), int(err0) +} + +//go:nosplit +func munmap(addr unsafe.Pointer, n uintptr) { + r, err := syscall2(&libc_munmap, uintptr(addr), uintptr(n)) + if int32(r) == -1 { + println("syscall munmap failed: ", hex(err)) + throw("syscall munmap") + } +} + +//go:nosplit +func madvise(addr unsafe.Pointer, n uintptr, flags int32) { + r, err := syscall3(&libc_madvise, uintptr(addr), uintptr(n), uintptr(flags)) + if int32(r) == -1 { + println("syscall madvise failed: ", hex(err)) + throw("syscall madvise") + } +} + +//go:nosplit +func sigaction(sig uintptr, new, old *sigactiont) { + r, err := syscall3(&libc_sigaction, sig, uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old))) + if int32(r) == -1 { + println("Sigaction failed for sig: ", sig, " with error:", hex(err)) + throw("syscall sigaction") + } +} + +//go:nosplit +func sigaltstack(new, old *stackt) { + r, err := syscall2(&libc_sigaltstack, uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old))) + if int32(r) == -1 { + println("syscall sigaltstack failed: ", hex(err)) + throw("syscall sigaltstack") + } +} + +//go:nosplit +func usleep(us uint32) { + r, err := syscall1(&libc_usleep, uintptr(us)) + if int32(r) == -1 { + println("syscall usleep failed: ", hex(err)) + throw("syscall usleep") + } +} + +//go:nosplit +func clock_gettime(clockid int32, tp *timespec) int32 { + r, _ := syscall2(&libc_clock_gettime, uintptr(clockid), uintptr(unsafe.Pointer(tp))) + return int32(r) +} + +//go:nosplit +func setitimer(mode int32, new, old *itimerval) { + r, err := syscall3(&libc_setitimer, uintptr(mode), uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old))) + if int32(r) == -1 { + println("syscall setitimer failed: ", hex(err)) + throw("syscall setitimer") + } +} + +//go:nosplit +func malloc(size uintptr) unsafe.Pointer { + r, _ := syscall1(&libc_malloc, size) + return unsafe.Pointer(r) +} + +//go:nosplit +func sem_init(sem *semt, pshared int32, value uint32) int32 { + r, _ := syscall3(&libc_sem_init, uintptr(unsafe.Pointer(sem)), uintptr(pshared), uintptr(value)) + return int32(r) +} + +//go:nosplit +func sem_wait(sem *semt) (int32, int32) { + r, err := syscall1(&libc_sem_wait, uintptr(unsafe.Pointer(sem))) + return int32(r), int32(err) +} + +//go:nosplit +func sem_post(sem *semt) int32 { + r, _ := syscall1(&libc_sem_post, uintptr(unsafe.Pointer(sem))) + return int32(r) +} + +//go:nosplit +func sem_timedwait(sem *semt, timeout *timespec) (int32, int32) { + r, err := syscall2(&libc_sem_timedwait, uintptr(unsafe.Pointer(sem)), uintptr(unsafe.Pointer(timeout))) + return int32(r), int32(err) +} + +//go:nosplit +func raise(sig uint32) { + r, err := syscall1(&libc_raise, uintptr(sig)) + if int32(r) == -1 { + println("syscall raise failed: ", hex(err)) + throw("syscall raise") + } +} + +//go:nosplit +func raiseproc(sig uint32) { + pid, err := syscall0(&libc_getpid) + if int32(pid) == -1 { + println("syscall getpid failed: ", hex(err)) + throw("syscall raiseproc") + } + + syscall2(&libc_kill, pid, uintptr(sig)) +} + +func osyield1() + +//go:nosplit +func osyield() { + _g_ := getg() + + // Check the validity of m because we might be called in cgo callback + // path early enough where there isn't a m available yet. + if _g_ != nil && _g_.m != nil { + r, err := syscall0(&libc_sched_yield) + if int32(r) == -1 { + println("syscall osyield failed: ", hex(err)) + throw("syscall osyield") + } + return + } + osyield1() +} + +//go:nosplit +func sysconf(name int32) uintptr { + r, _ := syscall1(&libc_sysconf, uintptr(name)) + if int32(r) == -1 { + throw("syscall sysconf") + } + return r + +} + +// pthread functions returns its error code in the main return value +// Therefore, err returns by syscall means nothing and must not be used + +//go:nosplit +func pthread_attr_destroy(attr *pthread_attr) int32 { + r, _ := syscall1(&libpthread_attr_destroy, uintptr(unsafe.Pointer(attr))) + return int32(r) +} + +//go:nosplit +func pthread_attr_init(attr *pthread_attr) int32 { + r, _ := syscall1(&libpthread_attr_init, uintptr(unsafe.Pointer(attr))) + return int32(r) +} + +//go:nosplit +func pthread_attr_setdetachstate(attr *pthread_attr, state int32) int32 { + r, _ := syscall2(&libpthread_attr_setdetachstate, uintptr(unsafe.Pointer(attr)), uintptr(state)) + return int32(r) +} + +//go:nosplit +func pthread_attr_setstackaddr(attr *pthread_attr, stk unsafe.Pointer) int32 { + r, _ := syscall2(&libpthread_attr_setstackaddr, uintptr(unsafe.Pointer(attr)), uintptr(stk)) + return int32(r) +} + +//go:nosplit +func pthread_attr_getstacksize(attr *pthread_attr, size *uint64) int32 { + r, _ := syscall2(&libpthread_attr_getstacksize, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(size))) + return int32(r) +} + +//go:nosplit +func pthread_attr_setstacksize(attr *pthread_attr, size uint64) int32 { + r, _ := syscall2(&libpthread_attr_setstacksize, uintptr(unsafe.Pointer(attr)), uintptr(size)) + return int32(r) +} + +//go:nosplit +func pthread_create(tid *pthread, attr *pthread_attr, fn *funcDescriptor, arg unsafe.Pointer) int32 { + r, _ := syscall4(&libpthread_create, uintptr(unsafe.Pointer(tid)), uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(fn)), uintptr(arg)) + return int32(r) +} + +// On multi-thread program, sigprocmask must not be called. +// It's replaced by sigthreadmask. +//go:nosplit +func sigprocmask(how int32, new, old *sigset) { + r, err := syscall3(&libpthread_sigthreadmask, uintptr(how), uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old))) + if int32(r) != 0 { + println("syscall sigthreadmask failed: ", hex(err)) + throw("syscall sigthreadmask") + } +} diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go new file mode 100644 index 0000000000..31590f22d8 --- /dev/null +++ b/src/runtime/os_aix.go @@ -0,0 +1,262 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package runtime + +import ( + "unsafe" +) + +const ( + threadStackSize = 0x100000 // size of a thread stack allocated by OS +) + +// funcDescriptor is a structure representing a function descriptor +// A variable with this type is always created in assembler +type funcDescriptor struct { + fn uintptr + toc uintptr + envPointer uintptr // unused in Golang +} + +type mOS struct { + waitsema uintptr // semaphore for parking on locks + perrno uintptr // pointer to tls errno +} + +//go:nosplit +func semacreate(mp *m) { + if mp.waitsema != 0 { + return + } + + var sem *semt + + // Call libc's malloc rather than malloc. This will + // allocate space on the C heap. We can't call mallocgc + // here because it could cause a deadlock. + sem = (*semt)(malloc(unsafe.Sizeof(*sem))) + if sem_init(sem, 0, 0) != 0 { + throw("sem_init") + } + mp.waitsema = uintptr(unsafe.Pointer(sem)) +} + +//go:nosplit +func semasleep(ns int64) int32 { + _m_ := getg().m + if ns >= 0 { + var ts timespec + + if clock_gettime(_CLOCK_REALTIME, &ts) != 0 { + throw("clock_gettime") + } + ts.tv_sec += ns / 1e9 + ts.tv_nsec += ns % 1e9 + if ts.tv_nsec >= 1e9 { + ts.tv_sec++ + ts.tv_nsec -= 1e9 + } + + if r, err := sem_timedwait((*semt)(unsafe.Pointer(_m_.waitsema)), &ts); r != 0 { + if err == _ETIMEDOUT || err == _EAGAIN || err == _EINTR { + return -1 + } + println("sem_timedwait err ", err, " ts.tv_sec ", ts.tv_sec, " ts.tv_nsec ", ts.tv_nsec, " ns ", ns, " id ", _m_.id) + throw("sem_timedwait") + } + return 0 + } + for { + r1, err := sem_wait((*semt)(unsafe.Pointer(_m_.waitsema))) + if r1 == 0 { + break + } + if err == _EINTR { + continue + } + throw("sem_wait") + } + return 0 +} + +//go:nosplit +func semawakeup(mp *m) { + if sem_post((*semt)(unsafe.Pointer(mp.waitsema))) != 0 { + throw("sem_post") + } +} + +func osinit() { + ncpu = int32(sysconf(__SC_NPROCESSORS_ONLN)) + physPageSize = sysconf(__SC_PAGE_SIZE) +} + +// Ms related functions +func mpreinit(mp *m) { + mp.gsignal = malg(32 * 1024) // AIX wants >= 8K + mp.gsignal.m = mp +} + +// errno address must be retrieved by calling _Errno libc function. +// This will return a pointer to errno +func miniterrno() { + mp := getg().m + r, _ := syscall0(&libc__Errno) + mp.perrno = r + +} + +func minit() { + miniterrno() + minitSignals() +} + +func unminit() { + unminitSignals() +} + +// tstart is a function descriptor to _tstart defined in assembly. +var tstart funcDescriptor + +func newosproc(mp *m) { + var ( + attr pthread_attr + oset sigset + tid pthread + ) + + if pthread_attr_init(&attr) != 0 { + throw("pthread_attr_init") + } + + if pthread_attr_setstacksize(&attr, threadStackSize) != 0 { + throw("pthread_attr_getstacksize") + } + + if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { + throw("pthread_attr_setdetachstate") + } + + // Disable signals during create, so that the new thread starts + // with signals disabled. It will enable them in minit. + sigprocmask(_SIG_SETMASK, &sigset_all, &oset) + var ret int32 + for tries := 0; tries < 20; tries++ { + // pthread_create can fail with EAGAIN for no reasons + // but it will be ok if it retries. + ret = pthread_create(&tid, &attr, &tstart, unsafe.Pointer(mp)) + if ret != _EAGAIN { + break + } + usleep(uint32(tries+1) * 1000) // Milliseconds. + } + sigprocmask(_SIG_SETMASK, &oset, nil) + if ret != 0 { + print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n") + if ret == _EAGAIN { + println("runtime: may need to increase max user processes (ulimit -u)") + } + throw("newosproc") + } + +} + +func exitThread(wait *uint32) { + // We should never reach exitThread on AIX because we let + // libc clean up threads. + throw("exitThread") +} + +var urandom_dev = []byte("/dev/urandom\x00") + +//go:nosplit +func getRandomData(r []byte) { + fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) + n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) + closefd(fd) + extendRandom(r, int(n)) +} + +func goenvs() { + goenvs_unix() +} + +/* SIGNAL */ + +const ( + _NSIG = 256 +) + +// sigtramp is a function descriptor to _sigtramp defined in assembly +var sigtramp funcDescriptor + +//go:nosplit +//go:nowritebarrierrec +func setsig(i uint32, fn uintptr) { + var sa sigactiont + sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART + sa.sa_mask = sigset_all + if fn == funcPC(sighandler) { + fn = uintptr(unsafe.Pointer(&sigtramp)) + } + sa.sa_handler = fn + sigaction(uintptr(i), &sa, nil) + +} + +//go:nosplit +//go:nowritebarrierrec +func setsigstack(i uint32) { + throw("Not yet implemented\n") +} + +//go:nosplit +//go:nowritebarrierrec +func getsig(i uint32) uintptr { + var sa sigactiont + sigaction(uintptr(i), nil, &sa) + return sa.sa_handler +} + +// setSignaltstackSP sets the ss_sp field of a stackt. +//go:nosplit +func setSignalstackSP(s *stackt, sp uintptr) { + *(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp +} + +func (c *sigctxt) fixsigcode(sig uint32) { +} + +func sigaddset(mask *sigset, i int) { + (*mask)[(i-1)/64] |= 1 << ((uint32(i) - 1) & 63) +} + +func sigdelset(mask *sigset, i int) { + (*mask)[(i-1)/64] &^= 1 << ((uint32(i) - 1) & 63) +} + +const ( + _CLOCK_REALTIME = 9 + _CLOCK_MONOTONIC = 10 +) + +//go:nosplit +func nanotime() int64 { + tp := ×pec{} + if clock_gettime(_CLOCK_REALTIME, tp) != 0 { + throw("syscall clock_gettime failed") + } + return tp.tv_sec*1000000000 + tp.tv_nsec +} + +func walltime() (sec int64, nsec int32) { + ts := ×pec{} + if clock_gettime(_CLOCK_REALTIME, ts) != 0 { + throw("syscall clock_gettime failed") + } + return ts.tv_sec, int32(ts.tv_nsec) +} diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go index d2144edf2e..26b02820cd 100644 --- a/src/runtime/os_darwin.go +++ b/src/runtime/os_darwin.go @@ -34,6 +34,10 @@ func semacreate(mp *m) { //go:nosplit func semasleep(ns int64) int32 { + var start int64 + if ns >= 0 { + start = nanotime() + } mp := getg().m pthread_mutex_lock(&mp.mutex) for { @@ -43,8 +47,13 @@ func semasleep(ns int64) int32 { return 0 } if ns >= 0 { + spent := nanotime() - start + if spent >= ns { + pthread_mutex_unlock(&mp.mutex) + return -1 + } var t timespec - t.set_nsec(ns) + t.set_nsec(ns - spent) err := pthread_cond_timedwait_relative_np(&mp.cond, &mp.mutex, &t) if err == _ETIMEDOUT { pthread_mutex_unlock(&mp.mutex) diff --git a/src/runtime/os_nacl.go b/src/runtime/os_nacl.go index 23ab03b953..ac7bf69582 100644 --- a/src/runtime/os_nacl.go +++ b/src/runtime/os_nacl.go @@ -197,23 +197,23 @@ func semacreate(mp *m) { //go:nosplit func semasleep(ns int64) int32 { var ret int32 - systemstack(func() { _g_ := getg() if nacl_mutex_lock(_g_.m.waitsemalock) < 0 { throw("semasleep") } - + var ts timespec + if ns >= 0 { + end := ns + nanotime() + ts.tv_sec = end / 1e9 + ts.tv_nsec = int32(end % 1e9) + } for _g_.m.waitsemacount == 0 { if ns < 0 { if nacl_cond_wait(_g_.m.waitsema, _g_.m.waitsemalock) < 0 { throw("semasleep") } } else { - var ts timespec - end := ns + nanotime() - ts.tv_sec = end / 1e9 - ts.tv_nsec = int32(end % 1e9) r := nacl_cond_timed_wait_abs(_g_.m.waitsema, _g_.m.waitsemalock, &ts) if r == -_ETIMEDOUT { nacl_mutex_unlock(_g_.m.waitsemalock) diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go index a9bf407a36..7deab3ed03 100644 --- a/src/runtime/os_netbsd.go +++ b/src/runtime/os_netbsd.go @@ -126,15 +126,9 @@ func semacreate(mp *m) { //go:nosplit func semasleep(ns int64) int32 { _g_ := getg() - - // Compute sleep deadline. - var tsp *timespec - var ts timespec + var deadline int64 if ns >= 0 { - var nsec int32 - ts.set_sec(timediv(ns, 1000000000, &nsec)) - ts.set_nsec(nsec) - tsp = &ts + deadline = nanotime() + ns } for { @@ -147,18 +141,21 @@ func semasleep(ns int64) int32 { } // Sleep until unparked by semawakeup or timeout. + var tsp *timespec + var ts timespec + if ns >= 0 { + wait := deadline - nanotime() + if wait <= 0 { + return -1 + } + var nsec int32 + ts.set_sec(timediv(wait, 1000000000, &nsec)) + ts.set_nsec(nsec) + tsp = &ts + } ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&_g_.m.waitsemacount), nil) if ret == _ETIMEDOUT { return -1 - } else if ret == _EINTR && ns >= 0 { - // Avoid sleeping forever if we keep getting - // interrupted (for example by the profiling - // timer). It would be if tsp upon return had the - // remaining time to sleep, but this is good enough. - var nsec int32 - ns /= 2 - ts.set_sec(timediv(ns, 1000000000, &nsec)) - ts.set_nsec(nsec) } } } diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index 5607bf95c1..03dd95bf17 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -43,6 +43,7 @@ const ( //go:cgo_import_dynamic runtime._SetWaitableTimer SetWaitableTimer%6 "kernel32.dll" //go:cgo_import_dynamic runtime._SuspendThread SuspendThread%1 "kernel32.dll" //go:cgo_import_dynamic runtime._SwitchToThread SwitchToThread%0 "kernel32.dll" +//go:cgo_import_dynamic runtime._TlsAlloc TlsAlloc%0 "kernel32.dll" //go:cgo_import_dynamic runtime._VirtualAlloc VirtualAlloc%4 "kernel32.dll" //go:cgo_import_dynamic runtime._VirtualFree VirtualFree%3 "kernel32.dll" //go:cgo_import_dynamic runtime._VirtualQuery VirtualQuery%3 "kernel32.dll" @@ -91,6 +92,7 @@ var ( _SetWaitableTimer, _SuspendThread, _SwitchToThread, + _TlsAlloc, _VirtualAlloc, _VirtualFree, _VirtualQuery, @@ -856,18 +858,38 @@ func profileloop() var profiletimer uintptr -func profilem(mp *m) { +func profilem(mp *m, thread uintptr) { var r *context rbuf := make([]byte, unsafe.Sizeof(*r)+15) - tls := &mp.tls[0] - gp := *((**g)(unsafe.Pointer(tls))) - // align Context to 16 bytes r = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&rbuf[15]))) &^ 15)) r.contextflags = _CONTEXT_CONTROL - stdcall2(_GetThreadContext, mp.thread, uintptr(unsafe.Pointer(r))) - sigprof(r.ip(), r.sp(), 0, gp, mp) + stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(r))) + + var gp *g + switch GOARCH { + default: + panic("unsupported architecture") + case "arm": + // TODO(jordanrh1): this is incorrect when Go is executing + // on the system or signal stacks because curg returns + // the current user g. The true g is stored in thread + // local storage, which we cannot access from another CPU. + // We cannot pull R10 from the thread context because + // it might be executing C code, in which case R10 + // would not be g. + gp = mp.curg + case "386", "amd64": + tls := &mp.tls[0] + gp = *((**g)(unsafe.Pointer(tls))) + } + + if gp == nil { + sigprofNonGoPC(r.ip()) + } else { + sigprof(r.ip(), r.sp(), 0, gp, mp) + } } func profileloop1(param uintptr) uint32 { @@ -884,9 +906,16 @@ func profileloop1(param uintptr) uint32 { if thread == 0 || mp.profilehz == 0 || mp.blocked { continue } - stdcall1(_SuspendThread, thread) + // mp may exit between the load above and the + // SuspendThread, so be careful. + if int32(stdcall1(_SuspendThread, thread)) == -1 { + // The thread no longer exists. + continue + } if mp.profilehz != 0 && !mp.blocked { - profilem(mp) + // Pass the thread handle in case mp + // was in the process of shutting down. + profilem(mp, thread) } stdcall1(_ResumeThread, thread) } diff --git a/src/runtime/os_windows_arm.go b/src/runtime/os_windows_arm.go new file mode 100644 index 0000000000..3115f7241d --- /dev/null +++ b/src/runtime/os_windows_arm.go @@ -0,0 +1,18 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +//go:nosplit +func cputicks() int64 { + return nanotime() +} + +func checkgoarm() { + if goarm < 7 { + print("Need atomic synchronization instructions, coprocessor ", + "access instructions. Recompile using GOARM=7.\n") + exit(1) + } +} diff --git a/src/runtime/pprof/internal/profile/profile.go b/src/runtime/pprof/internal/profile/profile.go index 64c3e3f054..863bd403a4 100644 --- a/src/runtime/pprof/internal/profile/profile.go +++ b/src/runtime/pprof/internal/profile/profile.go @@ -200,7 +200,7 @@ var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) // first. func (p *Profile) setMain() { for i := 0; i < len(p.Mapping); i++ { - file := strings.TrimSpace(strings.Replace(p.Mapping[i].File, "(deleted)", "", -1)) + file := strings.TrimSpace(strings.ReplaceAll(p.Mapping[i].File, "(deleted)", "")) if len(file) == 0 { continue } diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 126ba50054..593924183f 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -602,7 +602,7 @@ func TestBlockProfile(t *testing.T) { } for _, test := range tests { - if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) { + if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) { t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof) } } diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go index cbd0b83376..bd5c8f7afc 100644 --- a/src/runtime/pprof/proto.go +++ b/src/runtime/pprof/proto.go @@ -524,6 +524,14 @@ func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, continue } file := string(line) + + // Trim deleted file marker. + deletedStr := " (deleted)" + deletedLen := len(deletedStr) + if len(file) >= deletedLen && file[len(file)-deletedLen:] == deletedStr { + file = file[:len(file)-deletedLen] + } + if len(inode) == 1 && inode[0] == '0' && file == "" { // Huge-page text mappings list the initial fragment of // mapped but unpopulated memory as being inode 0. diff --git a/src/runtime/pprof/proto_test.go b/src/runtime/pprof/proto_test.go index 76bd46da02..4452d51231 100644 --- a/src/runtime/pprof/proto_test.go +++ b/src/runtime/pprof/proto_test.go @@ -216,24 +216,89 @@ c000000000-c000036000 rw-p 00000000 00:00 0 07000000 07093000 06c00000 /path/to/gobench_server_main ` +var profSelfMapsTestsWithDeleted = ` +00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat (deleted) +0060a000-0060b000 r--p 0000a000 fc:01 787766 /bin/cat (deleted) +0060b000-0060c000 rw-p 0000b000 fc:01 787766 /bin/cat (deleted) +014ab000-014cc000 rw-p 00000000 00:00 0 [heap] +7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064 /usr/lib/locale/locale-archive +7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0 +7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0 +7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0 +7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0 +7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0 [stack] +7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0 [vdso] +ffffffffff600000-ffffffffff601000 r-xp 00000090 00:00 0 [vsyscall] +-> +00400000 0040b000 00000000 /bin/cat +7f7d7797c000 7f7d77b36000 00000000 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d41000 7f7d77d64000 00000000 /lib/x86_64-linux-gnu/ld-2.19.so +7ffc34343000 7ffc34345000 00000000 [vdso] +ffffffffff600000 ffffffffff601000 00000090 [vsyscall] + +00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat with space +0060a000-0060b000 r--p 0000a000 fc:01 787766 /bin/cat with space +0060b000-0060c000 rw-p 0000b000 fc:01 787766 /bin/cat with space +014ab000-014cc000 rw-p 00000000 00:00 0 [heap] +7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064 /usr/lib/locale/locale-archive +7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0 +7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0 +7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0 +7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so +7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0 +7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0 [stack] +7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0 [vdso] +ffffffffff600000-ffffffffff601000 r-xp 00000090 00:00 0 [vsyscall] +-> +00400000 0040b000 00000000 /bin/cat with space +7f7d7797c000 7f7d77b36000 00000000 /lib/x86_64-linux-gnu/libc-2.19.so +7f7d77d41000 7f7d77d64000 00000000 /lib/x86_64-linux-gnu/ld-2.19.so +7ffc34343000 7ffc34345000 00000000 [vdso] +ffffffffff600000 ffffffffff601000 00000090 [vsyscall] +` + func TestProcSelfMaps(t *testing.T) { - for tx, tt := range strings.Split(profSelfMapsTests, "\n\n") { - i := strings.Index(tt, "->\n") - if i < 0 { - t.Fatal("malformed test case") - } - in, out := tt[:i], tt[i+len("->\n"):] - if len(out) > 0 && out[len(out)-1] != '\n' { - out += "\n" - } - var buf bytes.Buffer - parseProcSelfMaps([]byte(in), func(lo, hi, offset uint64, file, buildID string) { - fmt.Fprintf(&buf, "%08x %08x %08x %s\n", lo, hi, offset, file) - }) - if buf.String() != out { - t.Errorf("#%d: have:\n%s\nwant:\n%s\n%q\n%q", tx, buf.String(), out, buf.String(), out) + + f := func(t *testing.T, input string) { + for tx, tt := range strings.Split(input, "\n\n") { + i := strings.Index(tt, "->\n") + if i < 0 { + t.Fatal("malformed test case") + } + in, out := tt[:i], tt[i+len("->\n"):] + if len(out) > 0 && out[len(out)-1] != '\n' { + out += "\n" + } + var buf bytes.Buffer + parseProcSelfMaps([]byte(in), func(lo, hi, offset uint64, file, buildID string) { + fmt.Fprintf(&buf, "%08x %08x %08x %s\n", lo, hi, offset, file) + }) + if buf.String() != out { + t.Errorf("#%d: have:\n%s\nwant:\n%s\n%q\n%q", tx, buf.String(), out, buf.String(), out) + } } } + + t.Run("Normal", func(t *testing.T) { + f(t, profSelfMapsTests) + }) + + t.Run("WithDeletedFile", func(t *testing.T) { + f(t, profSelfMapsTestsWithDeleted) + }) } // TestMapping checkes the mapping section of CPU profiles diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 73b4a1d9d6..acfdc8472e 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -254,7 +254,7 @@ func forcegchelper() { println("GC forced") } // Time-triggered, fully concurrent. - gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()}) + gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) } } @@ -663,59 +663,6 @@ func ready(gp *g, traceskip int, next bool) { } } -func gcprocs() int32 { - // Figure out how many CPUs to use during GC. - // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. - lock(&sched.lock) - n := gomaxprocs - if n > ncpu { - n = ncpu - } - if n > _MaxGcproc { - n = _MaxGcproc - } - if n > sched.nmidle+1 { // one M is currently running - n = sched.nmidle + 1 - } - unlock(&sched.lock) - return n -} - -func needaddgcproc() bool { - lock(&sched.lock) - n := gomaxprocs - if n > ncpu { - n = ncpu - } - if n > _MaxGcproc { - n = _MaxGcproc - } - n -= sched.nmidle + 1 // one M is currently running - unlock(&sched.lock) - return n > 0 -} - -func helpgc(nproc int32) { - _g_ := getg() - lock(&sched.lock) - pos := 0 - for n := int32(1); n < nproc; n++ { // one M is currently running - if allp[pos].mcache == _g_.m.mcache { - pos++ - } - mp := mget() - if mp == nil { - throw("gcprocs inconsistency") - } - mp.helpgc = n - mp.p.set(allp[pos]) - mp.mcache = allp[pos].mcache - pos++ - notewakeup(&mp.park) - } - unlock(&sched.lock) -} - // freezeStopWait is a large value that freezetheworld sets // sched.stopwait to in order to request that all Gs permanently stop. const freezeStopWait = 0x7fffffff @@ -1132,11 +1079,6 @@ func stopTheWorldWithSema() { } } -func mhelpgc() { - _g_ := getg() - _g_.m.helpgc = -1 -} - func startTheWorldWithSema(emitTraceEvent bool) int64 { _g_ := getg() @@ -1145,7 +1087,6 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 { list := netpoll(false) // non-blocking injectglist(&list) } - add := needaddgcproc() lock(&sched.lock) procs := gomaxprocs @@ -1175,7 +1116,6 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 { } else { // Start M to run P. Do not start another M below. newm(nil, p) - add = false } } @@ -1192,16 +1132,6 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 { wakep() } - if add { - // If GC could have used another helper proc, start one now, - // in the hope that it will be available next time. - // It would have been even better to start it before the collection, - // but doing so requires allocating memory, so it's tricky to - // coordinate. This lazy approach works out in practice: - // we don't mind if the first couple gc rounds don't have quite - // the maximum number of procs. - newm(mhelpgc, nil) - } _g_.m.locks-- if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack _g_.stackguard0 = stackPreempt @@ -1242,8 +1172,8 @@ func mstart() { mstart1() // Exit this thread. - if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" || GOOS == "darwin" { - // Window, Solaris, Darwin and Plan 9 always system-allocate + if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" || GOOS == "darwin" || GOOS == "aix" { + // Window, Solaris, Darwin, AIX and Plan 9 always system-allocate // the stack, but put it in _g_.stack before mstart, // so the logic above hasn't set osStack yet. osStack = true @@ -1276,10 +1206,7 @@ func mstart1() { fn() } - if _g_.m.helpgc != 0 { - _g_.m.helpgc = 0 - stopm() - } else if _g_.m != &m0 { + if _g_.m != &m0 { acquirep(_g_.m.nextp.ptr()) _g_.m.nextp = 0 } @@ -2003,21 +1930,11 @@ func stopm() { throw("stopm spinning") } -retry: lock(&sched.lock) mput(_g_.m) unlock(&sched.lock) notesleep(&_g_.m.park) noteclear(&_g_.m.park) - if _g_.m.helpgc != 0 { - // helpgc() set _g_.m.p and _g_.m.mcache, so we have a P. - gchelper() - // Undo the effects of helpgc(). - _g_.m.helpgc = 0 - _g_.m.mcache = nil - _g_.m.p = 0 - goto retry - } acquirep(_g_.m.nextp.ptr()) _g_.m.nextp = 0 } @@ -2615,6 +2532,23 @@ top: resetspinning() } + if sched.disable.user && !schedEnabled(gp) { + // Scheduling of this goroutine is disabled. Put it on + // the list of pending runnable goroutines for when we + // re-enable user scheduling and look again. + lock(&sched.lock) + if schedEnabled(gp) { + // Something re-enabled scheduling while we + // were acquiring the lock. + unlock(&sched.lock) + } else { + sched.disable.runnable.pushBack(gp) + sched.disable.n++ + unlock(&sched.lock) + goto top + } + } + if gp.lockedm != 0 { // Hands off own p to the locked m, // then blocks waiting for a new p. @@ -2730,7 +2664,7 @@ func goexit0(gp *g) { _g_ := getg() casgstatus(gp, _Grunning, _Gdead) - if isSystemGoroutine(gp) { + if isSystemGoroutine(gp, false) { atomic.Xadd(&sched.ngsys, -1) } gp.m = nil @@ -3033,6 +2967,12 @@ func exitsyscall() { _g_.stackguard0 = _g_.stack.lo + _StackGuard } _g_.throwsplit = false + + if sched.disable.user && !schedEnabled(_g_) { + // Scheduling of this goroutine is disabled. + Gosched() + } + return } @@ -3168,7 +3108,10 @@ func exitsyscall0(gp *g) { casgstatus(gp, _Gsyscall, _Grunnable) dropg() lock(&sched.lock) - _p_ := pidleget() + var _p_ *p + if schedEnabled(_g_) { + _p_ = pidleget() + } if _p_ == nil { globrunqput(gp) } else if atomic.Load(&sched.sysmonwait) != 0 { @@ -3381,7 +3324,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintpt if _g_.m.curg != nil { newg.labels = _g_.m.curg.labels } - if isSystemGoroutine(newg) { + if isSystemGoroutine(newg, false) { atomic.Xadd(&sched.ngsys, +1) } newg.gcscanvalid = false @@ -3831,7 +3774,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { pc = funcPC(_ExternalCode) + sys.PCQuantum } stk[0] = pc - if mp.preemptoff != "" || mp.helpgc != 0 { + if mp.preemptoff != "" { stk[1] = funcPC(_GC) + sys.PCQuantum } else { stk[1] = funcPC(_System) + sys.PCQuantum @@ -4093,6 +4036,7 @@ func procresize(nprocs int32) *p { if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { // continue to use the current P _g_.m.p.ptr().status = _Prunning + _g_.m.p.ptr().mcache.prepareForSweep() } else { // release the current P and acquire allp[0] if _g_.m.p != 0 { @@ -4143,6 +4087,10 @@ func acquirep(_p_ *p) { _g_ := getg() _g_.m.mcache = _p_.mcache + // Perform deferred mcache flush before this P can allocate + // from a potentially stale mcache. + _p_.mcache.prepareForSweep() + if trace.enabled { traceProcStart() } @@ -4244,7 +4192,7 @@ func checkdead() { lock(&allglock) for i := 0; i < len(allgs); i++ { gp := allgs[i] - if isSystemGoroutine(gp) { + if isSystemGoroutine(gp, false) { continue } s := readgstatus(gp) @@ -4603,7 +4551,7 @@ func schedtrace(detailed bool) { if lockedg != nil { id3 = lockedg.goid } - print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") + print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") } lock(&allglock) @@ -4625,6 +4573,40 @@ func schedtrace(detailed bool) { unlock(&sched.lock) } +// schedEnableUser enables or disables the scheduling of user +// goroutines. +// +// This does not stop already running user goroutines, so the caller +// should first stop the world when disabling user goroutines. +func schedEnableUser(enable bool) { + lock(&sched.lock) + if sched.disable.user == !enable { + unlock(&sched.lock) + return + } + sched.disable.user = !enable + if enable { + n := sched.disable.n + sched.disable.n = 0 + globrunqputbatch(&sched.disable.runnable, n) + unlock(&sched.lock) + for ; n != 0 && sched.npidle != 0; n-- { + startm(nil, false) + } + } else { + unlock(&sched.lock) + } +} + +// schedEnabled returns whether gp should be scheduled. It returns +// false is scheduling of gp is disabled. +func schedEnabled(gp *g) bool { + if sched.disable.user { + return isSystemGoroutine(gp, true) + } + return true +} + // Put mp on midle list. // Sched must be locked. // May run during STW, so write barriers are not allowed. diff --git a/src/runtime/race/testdata/chan_test.go b/src/runtime/race/testdata/chan_test.go index 7f349c42ed..60e55ed66a 100644 --- a/src/runtime/race/testdata/chan_test.go +++ b/src/runtime/race/testdata/chan_test.go @@ -577,18 +577,32 @@ func TestRaceChanItselfCap(t *testing.T) { <-compl } -func TestRaceChanCloseLen(t *testing.T) { - v := 0 - _ = v +func TestNoRaceChanCloseLen(t *testing.T) { c := make(chan int, 10) - c <- 0 + r := make(chan int, 10) + go func() { + r <- len(c) + }() go func() { - v = 1 close(c) + r <- 0 }() - time.Sleep(1e7) - _ = len(c) - v = 2 + <-r + <-r +} + +func TestNoRaceChanCloseCap(t *testing.T) { + c := make(chan int, 10) + r := make(chan int, 10) + go func() { + r <- cap(c) + }() + go func() { + close(c) + r <- 0 + }() + <-r + <-r } func TestRaceChanCloseSend(t *testing.T) { diff --git a/src/runtime/rt0_aix_ppc64.s b/src/runtime/rt0_aix_ppc64.s new file mode 100644 index 0000000000..0e3d582809 --- /dev/null +++ b/src/runtime/rt0_aix_ppc64.s @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// _rt0_ppc64_aix is a function descriptor of the entrypoint function +// __start. This name is needed by cmd/link. +DATA _rt0_ppc64_aix+0(SB)/8, $__start<>(SB) +DATA _rt0_ppc64_aix+8(SB)/8, $TOC(SB) +GLOBL _rt0_ppc64_aix(SB), NOPTR, $16 + + +// The starting function must return in the loader to +// initialise some librairies, especially libthread which +// creates the main thread and adds the TLS in R13 +// R19 contains a function descriptor to the loader function +// which needs to be called. +// This code is similar to the __start function in C +TEXT __start<>(SB),NOSPLIT,$-8 + XOR R0, R0 + MOVD $libc___n_pthreads(SB), R4 + MOVD 0(R4), R4 + MOVD $libc___mod_init(SB), R5 + MOVD 0(R5), R5 + MOVD 0(R19), R0 + MOVD R2, 40(R1) + MOVD 8(R19), R2 + MOVD R18, R3 + MOVD R0, CTR + BL (CTR) // Return to AIX loader + + // Launch rt0_go + MOVD 40(R1), R2 + MOVD R14, R3 // argc + MOVD R15, R4 // argv + MOVD $runtime·rt0_go(SB), R12 + MOVD R12, CTR + BR (CTR) + diff --git a/src/runtime/rt0_linux_ppc64le.s b/src/runtime/rt0_linux_ppc64le.s index 73b9ae392d..54ea9d58f7 100644 --- a/src/runtime/rt0_linux_ppc64le.s +++ b/src/runtime/rt0_linux_ppc64le.s @@ -12,7 +12,7 @@ TEXT _rt0_ppc64le_linux_lib(SB),NOSPLIT,$-8 MOVW CR, R0 // Save CR in caller's frame MOVD R0, 8(R1) MOVDU R1, -320(R1) // Allocate frame. - + // Preserve callee-save registers. MOVD R14, 24(R1) MOVD R15, 32(R1) diff --git a/src/runtime/rt0_nacl_amd64p32.s b/src/runtime/rt0_nacl_amd64p32.s index 54e4b1de89..38583c58b2 100644 --- a/src/runtime/rt0_nacl_amd64p32.s +++ b/src/runtime/rt0_nacl_amd64p32.s @@ -11,7 +11,7 @@ // 8(DI) - argc // 12(DI) - argv, then 0, then envv, then 0, then auxv // NaCl entry here is almost the same, except that there -// is no saved caller PC, so 0(FP) is -8(FP) and so on. +// is no saved caller PC, so 0(FP) is -8(FP) and so on. TEXT _rt0_amd64p32_nacl(SB),NOSPLIT,$16 MOVL DI, 0(SP) CALL runtime·nacl_sysinfo(SB) diff --git a/src/runtime/rt0_windows_arm.s b/src/runtime/rt0_windows_arm.s new file mode 100644 index 0000000000..c5787d0dee --- /dev/null +++ b/src/runtime/rt0_windows_arm.s @@ -0,0 +1,12 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "textflag.h" + +// This is the entry point for the program from the +// kernel for an ordinary -buildmode=exe program. +TEXT _rt0_arm_windows(SB),NOSPLIT|NOFRAME,$0 + B ·rt0_go(SB) diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index d9c6f6d22a..0c24d3dce6 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -242,14 +242,14 @@ func testGdbPython(t *testing.T, cgo bool) { t.Fatalf("info goroutines failed: %s", bl) } - printMapvarRe1 := regexp.MustCompile(`\Q = map[string]string = {["abc"] = "def", ["ghi"] = "jkl"}\E$`) - printMapvarRe2 := regexp.MustCompile(`\Q = map[string]string = {["ghi"] = "jkl", ["abc"] = "def"}\E$`) + printMapvarRe1 := regexp.MustCompile(`^\$[0-9]+ = map\[string\]string = {\[(0x[0-9a-f]+\s+)?"abc"\] = (0x[0-9a-f]+\s+)?"def", \[(0x[0-9a-f]+\s+)?"ghi"\] = (0x[0-9a-f]+\s+)?"jkl"}$`) + printMapvarRe2 := regexp.MustCompile(`^\$[0-9]+ = map\[string\]string = {\[(0x[0-9a-f]+\s+)?"ghi"\] = (0x[0-9a-f]+\s+)?"jkl", \[(0x[0-9a-f]+\s+)?"abc"\] = (0x[0-9a-f]+\s+)?"def"}$`) if bl := blocks["print mapvar"]; !printMapvarRe1.MatchString(bl) && !printMapvarRe2.MatchString(bl) { t.Fatalf("print mapvar failed: %s", bl) } - strVarRe := regexp.MustCompile(`\Q = "abc"\E$`) + strVarRe := regexp.MustCompile(`^\$[0-9]+ = (0x[0-9a-f]+\s+)?"abc"$`) if bl := blocks["print strvar"]; !strVarRe.MatchString(bl) { t.Fatalf("print strvar failed: %s", bl) } @@ -263,8 +263,11 @@ func testGdbPython(t *testing.T, cgo bool) { // aggregates from their fields and reverted their printing // back to its original form. - infoLocalsRe := regexp.MustCompile(`slicevar *= *\[\]string *= *{"def"}`) - if bl := blocks["info locals"]; !infoLocalsRe.MatchString(bl) { + infoLocalsRe1 := regexp.MustCompile(`slicevar *= *\[\]string *= *{"def"}`) + // Format output from gdb v8.2 + infoLocalsRe2 := regexp.MustCompile(`^slicevar = .*\nmapvar = .*\nstrvar = 0x[0-9a-f]+ "abc"`) + if bl := blocks["info locals"]; !infoLocalsRe1.MatchString(bl) && + !infoLocalsRe2.MatchString(bl) { t.Fatalf("info locals failed: %s", bl) } @@ -425,11 +428,11 @@ func TestGdbAutotmpTypes(t *testing.T) { // Check that the backtrace matches the source code. types := []string{ - "struct []main.astruct;", - "struct bucket<string,main.astruct>;", - "struct hash<string,main.astruct>;", - "struct main.astruct;", - "typedef struct hash<string,main.astruct> * map[string]main.astruct;", + "[]main.astruct;", + "bucket<string,main.astruct>;", + "hash<string,main.astruct>;", + "main.astruct;", + "hash<string,main.astruct> * map[string]main.astruct;", } for _, name := range types { if !strings.Contains(sgot, name) { @@ -484,13 +487,13 @@ func TestGdbConst(t *testing.T) { "-ex", "print main.aConstant", "-ex", "print main.largeConstant", "-ex", "print main.minusOne", - "-ex", "print 'runtime._MSpanInUse'", + "-ex", "print 'runtime.mSpanInUse'", "-ex", "print 'runtime._PageSize'", filepath.Join(dir, "a.exe"), } got, _ := exec.Command("gdb", args...).CombinedOutput() - sgot := strings.Replace(string(got), "\r\n", "\n", -1) + sgot := strings.ReplaceAll(string(got), "\r\n", "\n") t.Logf("output %q", sgot) diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index d5f78baded..8b8f4dcb1e 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -305,7 +305,6 @@ var debug struct { gccheckmark int32 gcpacertrace int32 gcshrinkstackoff int32 - gcrescanstacks int32 gcstoptheworld int32 gctrace int32 invalidptr int32 @@ -323,7 +322,6 @@ var dbgvars = []dbgVar{ {"gccheckmark", &debug.gccheckmark}, {"gcpacertrace", &debug.gcpacertrace}, {"gcshrinkstackoff", &debug.gcshrinkstackoff}, - {"gcrescanstacks", &debug.gcrescanstacks}, {"gcstoptheworld", &debug.gcstoptheworld}, {"gctrace", &debug.gctrace}, {"invalidptr", &debug.invalidptr}, @@ -416,7 +414,9 @@ func timediv(v int64, div int32, rem *int32) int32 { for bit := 30; bit >= 0; bit-- { if v >= int64(div)<<uint(bit) { v = v - (int64(div) << uint(bit)) - res += 1 << uint(bit) + // Before this for loop, res was 0, thus all these + // power of 2 increments are now just bitsets. + res |= 1 << uint(bit) } } if v >= int64(div) { diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 259bb376ae..bbb66bb8fa 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -424,7 +424,6 @@ type m struct { locks int32 dying int32 profilehz int32 - helpgc int32 spinning bool // m is out of work and is actively looking for work blocked bool // m is blocked on a note inwb bool // m is executing a write barrier @@ -580,6 +579,18 @@ type schedt struct { runq gQueue runqsize int32 + // disable controls selective disabling of the scheduler. + // + // Use schedEnableUser to control this. + // + // disable is protected by sched.lock. + disable struct { + // user disables scheduling of user goroutines. + user bool + runnable gQueue // pending runnable Gs + n int32 // length of runnable + } + // Global cache of dead G's. gFree struct { lock mutex @@ -639,14 +650,16 @@ type _func struct { entry uintptr // start pc nameoff int32 // function name - args int32 // in/out args size - funcID funcID // set for certain special runtime functions + args int32 // in/out args size + deferreturn uint32 // offset of a deferreturn block from entry, if any. pcsp int32 pcfile int32 pcln int32 npcdata int32 - nfuncdata int32 + funcID funcID // set for certain special runtime functions + _ [2]int8 // unused + nfuncdata uint8 // must be last } // layout of Itab known to compilers diff --git a/src/runtime/runtime_unix_test.go b/src/runtime/runtime_unix_test.go index e91216365e..b0cbbbe3e6 100644 --- a/src/runtime/runtime_unix_test.go +++ b/src/runtime/runtime_unix_test.go @@ -6,7 +6,7 @@ // We need a fast system call to provoke the race, // and Close(-1) is nearly universally fast. -// +build darwin dragonfly freebsd linux netbsd openbsd plan9 +// +build aix darwin dragonfly freebsd linux netbsd openbsd plan9 package runtime_test diff --git a/src/runtime/select.go b/src/runtime/select.go index 3a3ac6b7ac..2729c2ecf9 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -245,7 +245,7 @@ loop: case caseSend: if raceenabled { - racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) + racereadpc(c.raceaddr(), cas.pc, chansendpc) } if c.closed != 0 { goto sclose @@ -462,7 +462,7 @@ rclose: typedmemclr(c.elemtype, cas.elem) } if raceenabled { - raceacquire(unsafe.Pointer(c)) + raceacquire(c.raceaddr()) } goto retc diff --git a/src/runtime/semasleep_test.go b/src/runtime/semasleep_test.go new file mode 100644 index 0000000000..5b2cc64483 --- /dev/null +++ b/src/runtime/semasleep_test.go @@ -0,0 +1,88 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !nacl,!plan9,!windows,!js + +package runtime_test + +import ( + "internal/testenv" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "syscall" + "testing" + "time" +) + +// Issue #27250. Spurious wakeups to pthread_cond_timedwait_relative_np +// shouldn't cause semasleep to retry with the same timeout which would +// cause indefinite spinning. +func TestSpuriousWakeupsNeverHangSemasleep(t *testing.T) { + testenv.MustHaveGoBuild(t) + tempDir, err := ioutil.TempDir("", "issue-27250") + if err != nil { + t.Fatalf("Failed to create the temp directory: %v", err) + } + defer os.RemoveAll(tempDir) + + repro := ` + package main + + import "time" + + func main() { + <-time.After(1 * time.Second) + } + ` + mainPath := filepath.Join(tempDir, "main.go") + if err := ioutil.WriteFile(mainPath, []byte(repro), 0644); err != nil { + t.Fatalf("Failed to create temp file for repro.go: %v", err) + } + binaryPath := filepath.Join(tempDir, "binary") + + // Build the binary so that we can send the signal to its PID. + out, err := exec.Command(testenv.GoToolPath(t), "build", "-o", binaryPath, mainPath).CombinedOutput() + if err != nil { + t.Fatalf("Failed to compile the binary: err: %v\nOutput: %s\n", err, out) + } + if err := os.Chmod(binaryPath, 0755); err != nil { + t.Fatalf("Failed to chmod binary: %v", err) + } + + // Now run the binary. + cmd := exec.Command(binaryPath) + if err := cmd.Start(); err != nil { + t.Fatalf("Failed to start command: %v", err) + } + doneCh := make(chan error, 1) + go func() { + doneCh <- cmd.Wait() + }() + + // With the repro running, we can continuously send to it + // a non-terminal signal such as SIGIO, to spuriously + // wakeup pthread_cond_timedwait_relative_np. + unfixedTimer := time.NewTimer(2 * time.Second) + for { + select { + case <-time.After(200 * time.Millisecond): + // Send the pesky signal that toggles spinning + // indefinitely if #27520 is not fixed. + cmd.Process.Signal(syscall.SIGIO) + + case <-unfixedTimer.C: + t.Error("Program failed to return on time and has to be killed, issue #27520 still exists") + cmd.Process.Signal(syscall.SIGKILL) + return + + case err := <-doneCh: + if err != nil { + t.Fatalf("The program returned but unfortunately with an error: %v", err) + } + return + } + } +} diff --git a/src/runtime/signal_aix_ppc64.go b/src/runtime/signal_aix_ppc64.go new file mode 100644 index 0000000000..c17563e2a5 --- /dev/null +++ b/src/runtime/signal_aix_ppc64.go @@ -0,0 +1,85 @@ +/// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix + +package runtime + +import ( + "runtime/internal/sys" + "unsafe" +) + +type sigctxt struct { + info *siginfo + ctxt unsafe.Pointer +} + +//go:nosplit +//go:nowritebarrierrec +func (c *sigctxt) regs() *context64 { return &(*ucontext)(c.ctxt).uc_mcontext } + +func (c *sigctxt) r0() uint64 { return c.regs().gpr[0] } +func (c *sigctxt) r1() uint64 { return c.regs().gpr[1] } +func (c *sigctxt) r2() uint64 { return c.regs().gpr[2] } +func (c *sigctxt) r3() uint64 { return c.regs().gpr[3] } +func (c *sigctxt) r4() uint64 { return c.regs().gpr[4] } +func (c *sigctxt) r5() uint64 { return c.regs().gpr[5] } +func (c *sigctxt) r6() uint64 { return c.regs().gpr[6] } +func (c *sigctxt) r7() uint64 { return c.regs().gpr[7] } +func (c *sigctxt) r8() uint64 { return c.regs().gpr[8] } +func (c *sigctxt) r9() uint64 { return c.regs().gpr[9] } +func (c *sigctxt) r10() uint64 { return c.regs().gpr[10] } +func (c *sigctxt) r11() uint64 { return c.regs().gpr[11] } +func (c *sigctxt) r12() uint64 { return c.regs().gpr[12] } +func (c *sigctxt) r13() uint64 { return c.regs().gpr[13] } +func (c *sigctxt) r14() uint64 { return c.regs().gpr[14] } +func (c *sigctxt) r15() uint64 { return c.regs().gpr[15] } +func (c *sigctxt) r16() uint64 { return c.regs().gpr[16] } +func (c *sigctxt) r17() uint64 { return c.regs().gpr[17] } +func (c *sigctxt) r18() uint64 { return c.regs().gpr[18] } +func (c *sigctxt) r19() uint64 { return c.regs().gpr[19] } +func (c *sigctxt) r20() uint64 { return c.regs().gpr[20] } +func (c *sigctxt) r21() uint64 { return c.regs().gpr[21] } +func (c *sigctxt) r22() uint64 { return c.regs().gpr[22] } +func (c *sigctxt) r23() uint64 { return c.regs().gpr[23] } +func (c *sigctxt) r24() uint64 { return c.regs().gpr[24] } +func (c *sigctxt) r25() uint64 { return c.regs().gpr[25] } +func (c *sigctxt) r26() uint64 { return c.regs().gpr[26] } +func (c *sigctxt) r27() uint64 { return c.regs().gpr[27] } +func (c *sigctxt) r28() uint64 { return c.regs().gpr[28] } +func (c *sigctxt) r29() uint64 { return c.regs().gpr[29] } +func (c *sigctxt) r30() uint64 { return c.regs().gpr[30] } +func (c *sigctxt) r31() uint64 { return c.regs().gpr[31] } +func (c *sigctxt) sp() uint64 { return c.regs().gpr[1] } + +//go:nosplit +//go:nowritebarrierrec +func (c *sigctxt) pc() uint64 { return c.regs().iar } + +func (c *sigctxt) ctr() uint64 { return c.regs().ctr } +func (c *sigctxt) link() uint64 { return c.regs().lr } +func (c *sigctxt) xer() uint32 { return c.regs().xer } +func (c *sigctxt) ccr() uint32 { return c.regs().cr } +func (c *sigctxt) fpscr() uint32 { return c.regs().fpscr } +func (c *sigctxt) fpscrx() uint32 { return c.regs().fpscrx } + +// TODO(aix): find trap equivalent +func (c *sigctxt) trap() uint32 { return 0x0 } + +func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) } +func (c *sigctxt) sigaddr() uint64 { return uint64(c.info.si_addr) } +func (c *sigctxt) fault() uintptr { return uintptr(c.sigaddr()) } + +func (c *sigctxt) set_r0(x uint64) { c.regs().gpr[0] = x } +func (c *sigctxt) set_r12(x uint64) { c.regs().gpr[12] = x } +func (c *sigctxt) set_r30(x uint64) { c.regs().gpr[30] = x } +func (c *sigctxt) set_pc(x uint64) { c.regs().iar = x } +func (c *sigctxt) set_sp(x uint64) { c.regs().gpr[1] = x } +func (c *sigctxt) set_link(x uint64) { c.regs().lr = x } + +func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) } +func (c *sigctxt) set_sigaddr(x uint64) { + *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x) +} diff --git a/src/runtime/signal_ppc64x.go b/src/runtime/signal_ppc64x.go index 5a1a5cae60..cac1a23c9f 100644 --- a/src/runtime/signal_ppc64x.go +++ b/src/runtime/signal_ppc64x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build linux +// +build aix linux // +build ppc64 ppc64le package runtime diff --git a/src/runtime/signal_sighandler.go b/src/runtime/signal_sighandler.go index 5a734f9050..6e71e41f52 100644 --- a/src/runtime/signal_sighandler.go +++ b/src/runtime/signal_sighandler.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris package runtime diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index 6cd9f8ddb6..12a938c8c9 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package runtime diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go index a63450038d..873ce66abe 100644 --- a/src/runtime/signal_windows.go +++ b/src/runtime/signal_windows.go @@ -27,7 +27,7 @@ func lastcontinuetramp() func initExceptionHandler() { stdcall2(_AddVectoredExceptionHandler, 1, funcPC(exceptiontramp)) - if _AddVectoredContinueHandler == nil || unsafe.Sizeof(&_AddVectoredContinueHandler) == 4 { + if _AddVectoredContinueHandler == nil || GOARCH == "386" { // use SetUnhandledExceptionFilter for windows-386 or // if VectoredContinueHandler is unavailable. // note: SetUnhandledExceptionFilter handler won't be called, if debugging. @@ -177,9 +177,15 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 { } print("\n") + // TODO(jordanrh1): This may be needed for 386/AMD64 as well. + if GOARCH == "arm" { + _g_.m.throwing = 1 + _g_.m.caughtsig.set(gp) + } + level, _, docrash := gotraceback() if level > 0 { - tracebacktrap(r.ip(), r.sp(), 0, gp) + tracebacktrap(r.ip(), r.sp(), r.lr(), gp) tracebackothers(gp) dumpregs(r) } diff --git a/src/runtime/sigtab_aix.go b/src/runtime/sigtab_aix.go new file mode 100644 index 0000000000..42e5606ab6 --- /dev/null +++ b/src/runtime/sigtab_aix.go @@ -0,0 +1,264 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +var sigtable = [...]sigTabT{ + 0: {0, "SIGNONE: no trap"}, + _SIGHUP: {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"}, + _SIGINT: {_SigNotify + _SigKill, "SIGINT: interrupt"}, + _SIGQUIT: {_SigNotify + _SigThrow, "SIGQUIT: quit"}, + _SIGILL: {_SigThrow + _SigUnblock, "SIGILL: illegal instruction"}, + _SIGTRAP: {_SigThrow + _SigUnblock, "SIGTRAP: trace trap"}, + _SIGABRT: {_SigNotify + _SigThrow, "SIGABRT: abort"}, + _SIGBUS: {_SigPanic + _SigUnblock, "SIGBUS: bus error"}, + _SIGFPE: {_SigPanic + _SigUnblock, "SIGFPE: floating-point exception"}, + _SIGKILL: {0, "SIGKILL: kill"}, + _SIGUSR1: {_SigNotify, "SIGUSR1: user-defined signal 1"}, + _SIGSEGV: {_SigPanic + _SigUnblock, "SIGSEGV: segmentation violation"}, + _SIGUSR2: {_SigNotify, "SIGUSR2: user-defined signal 2"}, + _SIGPIPE: {_SigNotify, "SIGPIPE: write to broken pipe"}, + _SIGALRM: {_SigNotify, "SIGALRM: alarm clock"}, + _SIGTERM: {_SigNotify + _SigKill, "SIGTERM: termination"}, + _SIGCHLD: {_SigNotify + _SigUnblock, "SIGCHLD: child status has changed"}, + _SIGCONT: {_SigNotify + _SigDefault, "SIGCONT: continue"}, + _SIGSTOP: {0, "SIGSTOP: stop"}, + _SIGTSTP: {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"}, + _SIGTTIN: {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"}, + _SIGTTOU: {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"}, + _SIGURG: {_SigNotify, "SIGURG: urgent condition on socket"}, + _SIGXCPU: {_SigNotify, "SIGXCPU: cpu limit exceeded"}, + _SIGXFSZ: {_SigNotify, "SIGXFSZ: file size limit exceeded"}, + _SIGVTALRM: {_SigNotify, "SIGVTALRM: virtual alarm clock"}, + _SIGPROF: {_SigNotify + _SigUnblock, "SIGPROF: profiling alarm clock"}, + _SIGWINCH: {_SigNotify, "SIGWINCH: window size change"}, + _SIGSYS: {_SigThrow, "SIGSYS: bad system call"}, + _SIGIO: {_SigNotify, "SIGIO: i/o now possible"}, + _SIGPWR: {_SigNotify, "SIGPWR: power failure restart"}, + _SIGEMT: {_SigThrow, "SIGEMT: emulate instruction executed"}, + _SIGWAITING: {0, "SIGWAITING: reserved signal no longer used by"}, + 26: {_SigNotify, "signal 26"}, + 27: {_SigNotify, "signal 27"}, + 33: {_SigNotify, "signal 33"}, + 35: {_SigNotify, "signal 35"}, + 36: {_SigNotify, "signal 36"}, + 37: {_SigNotify, "signal 37"}, + 38: {_SigNotify, "signal 38"}, + 40: {_SigNotify, "signal 40"}, + 41: {_SigNotify, "signal 41"}, + 42: {_SigNotify, "signal 42"}, + 43: {_SigNotify, "signal 43"}, + 44: {_SigNotify, "signal 44"}, + 45: {_SigNotify, "signal 45"}, + 46: {_SigNotify, "signal 46"}, + 47: {_SigNotify, "signal 47"}, + 48: {_SigNotify, "signal 48"}, + 49: {_SigNotify, "signal 49"}, + 50: {_SigNotify, "signal 50"}, + 51: {_SigNotify, "signal 51"}, + 52: {_SigNotify, "signal 52"}, + 53: {_SigNotify, "signal 53"}, + 54: {_SigNotify, "signal 54"}, + 55: {_SigNotify, "signal 55"}, + 56: {_SigNotify, "signal 56"}, + 57: {_SigNotify, "signal 57"}, + 58: {_SigNotify, "signal 58"}, + 59: {_SigNotify, "signal 59"}, + 60: {_SigNotify, "signal 60"}, + 61: {_SigNotify, "signal 61"}, + 62: {_SigNotify, "signal 62"}, + 63: {_SigNotify, "signal 63"}, + 64: {_SigNotify, "signal 64"}, + 65: {_SigNotify, "signal 65"}, + 66: {_SigNotify, "signal 66"}, + 67: {_SigNotify, "signal 67"}, + 68: {_SigNotify, "signal 68"}, + 69: {_SigNotify, "signal 69"}, + 70: {_SigNotify, "signal 70"}, + 71: {_SigNotify, "signal 71"}, + 72: {_SigNotify, "signal 72"}, + 73: {_SigNotify, "signal 73"}, + 74: {_SigNotify, "signal 74"}, + 75: {_SigNotify, "signal 75"}, + 76: {_SigNotify, "signal 76"}, + 77: {_SigNotify, "signal 77"}, + 78: {_SigNotify, "signal 78"}, + 79: {_SigNotify, "signal 79"}, + 80: {_SigNotify, "signal 80"}, + 81: {_SigNotify, "signal 81"}, + 82: {_SigNotify, "signal 82"}, + 83: {_SigNotify, "signal 83"}, + 84: {_SigNotify, "signal 84"}, + 85: {_SigNotify, "signal 85"}, + 86: {_SigNotify, "signal 86"}, + 87: {_SigNotify, "signal 87"}, + 88: {_SigNotify, "signal 88"}, + 89: {_SigNotify, "signal 89"}, + 90: {_SigNotify, "signal 90"}, + 91: {_SigNotify, "signal 91"}, + 92: {_SigNotify, "signal 92"}, + 93: {_SigNotify, "signal 93"}, + 94: {_SigNotify, "signal 94"}, + 95: {_SigNotify, "signal 95"}, + 96: {_SigNotify, "signal 96"}, + 97: {_SigNotify, "signal 97"}, + 98: {_SigNotify, "signal 98"}, + 99: {_SigNotify, "signal 99"}, + 100: {_SigNotify, "signal 100"}, + 101: {_SigNotify, "signal 101"}, + 102: {_SigNotify, "signal 102"}, + 103: {_SigNotify, "signal 103"}, + 104: {_SigNotify, "signal 104"}, + 105: {_SigNotify, "signal 105"}, + 106: {_SigNotify, "signal 106"}, + 107: {_SigNotify, "signal 107"}, + 108: {_SigNotify, "signal 108"}, + 109: {_SigNotify, "signal 109"}, + 110: {_SigNotify, "signal 110"}, + 111: {_SigNotify, "signal 111"}, + 112: {_SigNotify, "signal 112"}, + 113: {_SigNotify, "signal 113"}, + 114: {_SigNotify, "signal 114"}, + 115: {_SigNotify, "signal 115"}, + 116: {_SigNotify, "signal 116"}, + 117: {_SigNotify, "signal 117"}, + 118: {_SigNotify, "signal 118"}, + 119: {_SigNotify, "signal 119"}, + 120: {_SigNotify, "signal 120"}, + 121: {_SigNotify, "signal 121"}, + 122: {_SigNotify, "signal 122"}, + 123: {_SigNotify, "signal 123"}, + 124: {_SigNotify, "signal 124"}, + 125: {_SigNotify, "signal 125"}, + 126: {_SigNotify, "signal 126"}, + 127: {_SigNotify, "signal 127"}, + 128: {_SigNotify, "signal 128"}, + 129: {_SigNotify, "signal 129"}, + 130: {_SigNotify, "signal 130"}, + 131: {_SigNotify, "signal 131"}, + 132: {_SigNotify, "signal 132"}, + 133: {_SigNotify, "signal 133"}, + 134: {_SigNotify, "signal 134"}, + 135: {_SigNotify, "signal 135"}, + 136: {_SigNotify, "signal 136"}, + 137: {_SigNotify, "signal 137"}, + 138: {_SigNotify, "signal 138"}, + 139: {_SigNotify, "signal 139"}, + 140: {_SigNotify, "signal 140"}, + 141: {_SigNotify, "signal 141"}, + 142: {_SigNotify, "signal 142"}, + 143: {_SigNotify, "signal 143"}, + 144: {_SigNotify, "signal 144"}, + 145: {_SigNotify, "signal 145"}, + 146: {_SigNotify, "signal 146"}, + 147: {_SigNotify, "signal 147"}, + 148: {_SigNotify, "signal 148"}, + 149: {_SigNotify, "signal 149"}, + 150: {_SigNotify, "signal 150"}, + 151: {_SigNotify, "signal 151"}, + 152: {_SigNotify, "signal 152"}, + 153: {_SigNotify, "signal 153"}, + 154: {_SigNotify, "signal 154"}, + 155: {_SigNotify, "signal 155"}, + 156: {_SigNotify, "signal 156"}, + 157: {_SigNotify, "signal 157"}, + 158: {_SigNotify, "signal 158"}, + 159: {_SigNotify, "signal 159"}, + 160: {_SigNotify, "signal 160"}, + 161: {_SigNotify, "signal 161"}, + 162: {_SigNotify, "signal 162"}, + 163: {_SigNotify, "signal 163"}, + 164: {_SigNotify, "signal 164"}, + 165: {_SigNotify, "signal 165"}, + 166: {_SigNotify, "signal 166"}, + 167: {_SigNotify, "signal 167"}, + 168: {_SigNotify, "signal 168"}, + 169: {_SigNotify, "signal 169"}, + 170: {_SigNotify, "signal 170"}, + 171: {_SigNotify, "signal 171"}, + 172: {_SigNotify, "signal 172"}, + 173: {_SigNotify, "signal 173"}, + 174: {_SigNotify, "signal 174"}, + 175: {_SigNotify, "signal 175"}, + 176: {_SigNotify, "signal 176"}, + 177: {_SigNotify, "signal 177"}, + 178: {_SigNotify, "signal 178"}, + 179: {_SigNotify, "signal 179"}, + 180: {_SigNotify, "signal 180"}, + 181: {_SigNotify, "signal 181"}, + 182: {_SigNotify, "signal 182"}, + 183: {_SigNotify, "signal 183"}, + 184: {_SigNotify, "signal 184"}, + 185: {_SigNotify, "signal 185"}, + 186: {_SigNotify, "signal 186"}, + 187: {_SigNotify, "signal 187"}, + 188: {_SigNotify, "signal 188"}, + 189: {_SigNotify, "signal 189"}, + 190: {_SigNotify, "signal 190"}, + 191: {_SigNotify, "signal 191"}, + 192: {_SigNotify, "signal 192"}, + 193: {_SigNotify, "signal 193"}, + 194: {_SigNotify, "signal 194"}, + 195: {_SigNotify, "signal 195"}, + 196: {_SigNotify, "signal 196"}, + 197: {_SigNotify, "signal 197"}, + 198: {_SigNotify, "signal 198"}, + 199: {_SigNotify, "signal 199"}, + 200: {_SigNotify, "signal 200"}, + 201: {_SigNotify, "signal 201"}, + 202: {_SigNotify, "signal 202"}, + 203: {_SigNotify, "signal 203"}, + 204: {_SigNotify, "signal 204"}, + 205: {_SigNotify, "signal 205"}, + 206: {_SigNotify, "signal 206"}, + 207: {_SigNotify, "signal 207"}, + 208: {_SigNotify, "signal 208"}, + 209: {_SigNotify, "signal 209"}, + 210: {_SigNotify, "signal 210"}, + 211: {_SigNotify, "signal 211"}, + 212: {_SigNotify, "signal 212"}, + 213: {_SigNotify, "signal 213"}, + 214: {_SigNotify, "signal 214"}, + 215: {_SigNotify, "signal 215"}, + 216: {_SigNotify, "signal 216"}, + 217: {_SigNotify, "signal 217"}, + 218: {_SigNotify, "signal 218"}, + 219: {_SigNotify, "signal 219"}, + 220: {_SigNotify, "signal 220"}, + 221: {_SigNotify, "signal 221"}, + 222: {_SigNotify, "signal 222"}, + 223: {_SigNotify, "signal 223"}, + 224: {_SigNotify, "signal 224"}, + 225: {_SigNotify, "signal 225"}, + 226: {_SigNotify, "signal 226"}, + 227: {_SigNotify, "signal 227"}, + 228: {_SigNotify, "signal 228"}, + 229: {_SigNotify, "signal 229"}, + 230: {_SigNotify, "signal 230"}, + 231: {_SigNotify, "signal 231"}, + 232: {_SigNotify, "signal 232"}, + 233: {_SigNotify, "signal 233"}, + 234: {_SigNotify, "signal 234"}, + 235: {_SigNotify, "signal 235"}, + 236: {_SigNotify, "signal 236"}, + 237: {_SigNotify, "signal 237"}, + 238: {_SigNotify, "signal 238"}, + 239: {_SigNotify, "signal 239"}, + 240: {_SigNotify, "signal 240"}, + 241: {_SigNotify, "signal 241"}, + 242: {_SigNotify, "signal 242"}, + 243: {_SigNotify, "signal 243"}, + 244: {_SigNotify, "signal 244"}, + 245: {_SigNotify, "signal 245"}, + 246: {_SigNotify, "signal 246"}, + 247: {_SigNotify, "signal 247"}, + 248: {_SigNotify, "signal 248"}, + 249: {_SigNotify, "signal 249"}, + 250: {_SigNotify, "signal 250"}, + 251: {_SigNotify, "signal 251"}, + 252: {_SigNotify, "signal 252"}, + 253: {_SigNotify, "signal 253"}, + 254: {_SigNotify, "signal 254"}, + 255: {_SigNotify, "signal 255"}, +} diff --git a/src/runtime/stack.go b/src/runtime/stack.go index c7bfc0434b..b815aa859e 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -211,7 +211,7 @@ func stackpoolalloc(order uint8) gclinkptr { // Adds stack x to the free pool. Must be called with stackpoolmu held. func stackpoolfree(x gclinkptr, order uint8) { s := spanOfUnchecked(uintptr(x)) - if s.state != _MSpanManual { + if s.state != mSpanManual { throw("freeing stack not in a stack span") } if s.manualFreeList.ptr() == nil { @@ -350,7 +350,7 @@ func stackalloc(n uint32) stack { } var x gclinkptr c := thisg.m.mcache - if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 { + if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" { // c == nil can happen in the guts of exitsyscall or // procresize. Just get a stack from the global pool. // Also don't touch stackcache during gc @@ -445,7 +445,7 @@ func stackfree(stk stack) { } x := gclinkptr(v) c := gp.m.mcache - if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 { + if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" { lock(&stackpoolmu) stackpoolfree(x, order) unlock(&stackpoolmu) @@ -459,7 +459,7 @@ func stackfree(stk stack) { } } else { s := spanOfUnchecked(uintptr(v)) - if s.state != _MSpanManual { + if s.state != mSpanManual { println(hex(s.base()), v) throw("bad span state") } @@ -625,7 +625,7 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { return true } - locals, args := getStackMap(frame, &adjinfo.cache, true) + locals, args, objs := getStackMap(frame, &adjinfo.cache, true) // Adjust local variables if stack frame has been allocated. if locals.n > 0 { @@ -663,6 +663,42 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool { } adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{}) } + + // Adjust pointers in all stack objects (whether they are live or not). + // See comments in mgcmark.go:scanframeworker. + if frame.varp != 0 { + for _, obj := range objs { + off := obj.off + base := frame.varp // locals base pointer + if off >= 0 { + base = frame.argp // arguments and return values base pointer + } + p := base + uintptr(off) + if p < frame.sp { + // Object hasn't been allocated in the frame yet. + // (Happens when the stack bounds check fails and + // we call into morestack.) + continue + } + t := obj.typ + gcdata := t.gcdata + var s *mspan + if t.kind&kindGCProg != 0 { + // See comments in mgcmark.go:scanstack + s = materializeGCProg(t.ptrdata, gcdata) + gcdata = (*byte)(unsafe.Pointer(s.startAddr)) + } + for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize { + if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 { + adjustpointer(adjinfo, unsafe.Pointer(p+i)) + } + } + if s != nil { + dematerializeGCProg(s) + } + } + } + return true } @@ -981,9 +1017,6 @@ func newstack() { // system stack. gcw := &gp.m.p.ptr().gcw scanstack(gp, gcw) - if gcBlackenPromptly { - gcw.dispose() - } gp.gcscandone = true } gp.preemptscan = false @@ -1139,9 +1172,9 @@ func freeStackSpans() { unlock(&stackLarge.lock) } -// getStackMap returns the locals and arguments live pointer maps for -// frame. -func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector) { +// getStackMap returns the locals and arguments live pointer maps, and +// stack object list for frame. +func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) { targetpc := frame.continpc if targetpc == 0 { // Frame is dead. Return empty bitvectors. @@ -1238,9 +1271,33 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args } } } + + // stack objects. + p := funcdata(f, _FUNCDATA_StackObjects) + if p != nil { + n := *(*uintptr)(p) + p = add(p, sys.PtrSize) + *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)} + // Note: the noescape above is needed to keep + // getStackMap from from "leaking param content: + // frame". That leak propagates up to getgcmask, then + // GCMask, then verifyGCInfo, which converts the stack + // gcinfo tests into heap gcinfo tests :( + } + return } +// A stackObjectRecord is generated by the compiler for each stack object in a stack frame. +// This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects. +type stackObjectRecord struct { + // offset in frame + // if negative, offset from varp + // if non-negative, offset from argp + off int + typ *_type +} + //go:nosplit func morestackc() { throw("attempt to execute system stack code on user stack") diff --git a/src/runtime/stubs2.go b/src/runtime/stubs2.go index 02249d0aad..57134f7354 100644 --- a/src/runtime/stubs2.go +++ b/src/runtime/stubs2.go @@ -8,6 +8,7 @@ // +build !nacl // +build !js // +build !darwin +// +build !aix package runtime @@ -25,7 +26,8 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32 //go:noescape func open(name *byte, mode, perm int32) int32 -func madvise(addr unsafe.Pointer, n uintptr, flags int32) +// return value is only set on linux to be used in osinit() +func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32 // exitThread terminates the current thread, writing *wait = 0 when // the stack is safe to reclaim. diff --git a/src/runtime/stubs3.go b/src/runtime/stubs3.go index 5c0786e411..a9ff689e79 100644 --- a/src/runtime/stubs3.go +++ b/src/runtime/stubs3.go @@ -8,6 +8,7 @@ // +build !nacl // +build !freebsd // +build !darwin +// +build !aix package runtime diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index d90ab86ffa..1dc7ab740e 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -348,6 +348,7 @@ const ( _FUNCDATA_LocalsPointerMaps = 1 _FUNCDATA_InlTree = 2 _FUNCDATA_RegPointerMaps = 3 + _FUNCDATA_StackObjects = 4 _ArgsSizeUnknown = -0x80000000 ) @@ -356,7 +357,7 @@ const ( // Note that in some situations involving plugins, there may be multiple // copies of a particular special runtime function. // Note: this list must match the list in cmd/internal/objabi/funcid.go. -type funcID uint32 +type funcID uint8 const ( funcID_normal funcID = iota // not a special function @@ -855,7 +856,7 @@ func pcdatavalue(f funcInfo, table int32, targetpc uintptr, cache *pcvalueCache) return pcvalue(f, off, targetpc, cache, true) } -func funcdata(f funcInfo, i int32) unsafe.Pointer { +func funcdata(f funcInfo, i uint8) unsafe.Pointer { if i < 0 || i >= f.nfuncdata { return nil } diff --git a/src/runtime/sys_aix_ppc64.s b/src/runtime/sys_aix_ppc64.s new file mode 100644 index 0000000000..38e60f99eb --- /dev/null +++ b/src/runtime/sys_aix_ppc64.s @@ -0,0 +1,201 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix +// +build ppc64 ppc64le + +// +// System calls and other sys.stuff for ppc64, Aix +// + +#include "go_asm.h" +#include "go_tls.h" +#include "textflag.h" +#include "asm_ppc64x.h" + +// This function calls a C function with the function descriptor in R12 +TEXT runtime·callCfunction(SB), NOSPLIT|NOFRAME,$0 + MOVD 0(R12), R12 + MOVD R2, 40(R1) + MOVD 0(R12), R0 + MOVD 8(R12), R2 + MOVD R0, CTR + BR (CTR) + + +// asmsyscall6 calls a library function with a function descriptor +// stored in libcall_fn and store the results in libcall struture +// Up to 6 arguments can be passed to this C function +// Called by runtime.asmcgocall +// It reserves a stack of 288 bytes for the C function. +// NOT USING GO CALLING CONVENTION +TEXT runtime·asmsyscall6(SB),NOSPLIT,$256 + MOVD R3, 48(R1) // Save libcall for later + MOVD libcall_fn(R3), R12 + MOVD libcall_args(R3), R9 + MOVD 0(R9), R3 + MOVD 8(R9), R4 + MOVD 16(R9), R5 + MOVD 24(R9), R6 + MOVD 32(R9), R7 + MOVD 40(R9), R8 + BL runtime·callCfunction(SB) + + // Restore R0 and TOC + XOR R0, R0 + MOVD 40(R1), R2 + + // Store result in libcall + MOVD 48(R1), R5 + MOVD R3, (libcall_r1)(R5) + MOVD $-1, R6 + CMP R6, R3 + BNE skiperrno + + // Save errno in libcall + BL runtime·load_g(SB) + MOVD g_m(g), R4 + MOVD (m_mOS + mOS_perrno)(R4), R9 + MOVW 0(R9), R9 + MOVD R9, (libcall_err)(R5) + RET +skiperrno: + // Reset errno if no error has been returned + MOVD R0, (libcall_err)(R5) + RET + + +TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 + MOVW sig+8(FP), R3 + MOVD info+16(FP), R4 + MOVD ctx+24(FP), R5 + MOVD fn+0(FP), R12 + MOVD R12, CTR + BL (CTR) + RET + + +// runtime.sigtramp is a function descriptor to the real sigtramp. +DATA runtime·sigtramp+0(SB)/8, $runtime·_sigtramp(SB) +DATA runtime·sigtramp+8(SB)/8, $TOC(SB) +DATA runtime·sigtramp+16(SB)/8, $0 +GLOBL runtime·sigtramp(SB), NOPTR, $24 + +// This funcion must not have any frame as we want to control how +// every registers are used. +TEXT runtime·_sigtramp(SB),NOSPLIT|NOFRAME,$0 + MOVD LR, R0 + MOVD R0, 16(R1) + // initialize essential registers (just in case) + BL runtime·reginit(SB) + + // Note that we are executing on altsigstack here, so we have + // more stack available than NOSPLIT would have us believe. + // To defeat the linker, we make our own stack frame with + // more space. + SUB $128+FIXED_FRAME, R1 + + // Save registers + MOVD R31, 56(R1) + MOVD g, 64(R1) + MOVD R29, 72(R1) + + BL runtime·load_g(SB) + + // Save m->libcall. We need to do this because we + // might get interrupted by a signal in runtime·asmcgocall. + + // save m->libcall + MOVD g_m(g), R6 + MOVD (m_libcall+libcall_fn)(R6), R7 + MOVD R7, 80(R1) + MOVD (m_libcall+libcall_args)(R6), R7 + MOVD R7, 88(R1) + MOVD (m_libcall+libcall_n)(R6), R7 + MOVD R7, 96(R1) + MOVD (m_libcall+libcall_r1)(R6), R7 + MOVD R7, 104(R1) + MOVD (m_libcall+libcall_r2)(R6), R7 + MOVD R7, 112(R1) + + // save errno, it might be EINTR; stuff we do here might reset it. + MOVD (m_mOS+mOS_perrno)(R6), R8 + MOVD 0(R8), R8 + MOVD R8, 120(R1) + + MOVW R3, FIXED_FRAME+0(R1) + MOVD R4, FIXED_FRAME+8(R1) + MOVD R5, FIXED_FRAME+16(R1) + MOVD $runtime·sigtrampgo(SB), R12 + MOVD R12, CTR + BL (CTR) + + MOVD g_m(g), R6 + // restore libcall + MOVD 80(R1), R7 + MOVD R7, (m_libcall+libcall_fn)(R6) + MOVD 88(R1), R7 + MOVD R7, (m_libcall+libcall_args)(R6) + MOVD 96(R1), R7 + MOVD R7, (m_libcall+libcall_n)(R6) + MOVD 104(R1), R7 + MOVD R7, (m_libcall+libcall_r1)(R6) + MOVD 112(R1), R7 + MOVD R7, (m_libcall+libcall_r2)(R6) + + // restore errno + MOVD (m_mOS+mOS_perrno)(R6), R7 + MOVD 120(R1), R8 + MOVD R8, 0(R7) + + // restore registers + MOVD 56(R1),R31 + MOVD 64(R1),g + MOVD 72(R1),R29 + + // Don't use RET because we need to restore R31 ! + ADD $128+FIXED_FRAME, R1 + MOVD 16(R1), R0 + MOVD R0, LR + BR (LR) + +// runtime.tstart is a function descriptor to the real tstart. +DATA runtime·tstart+0(SB)/8, $runtime·_tstart(SB) +DATA runtime·tstart+8(SB)/8, $TOC(SB) +DATA runtime·tstart+16(SB)/8, $0 +GLOBL runtime·tstart(SB), NOPTR, $24 + +TEXT runtime·_tstart(SB),NOSPLIT,$0 + XOR R0, R0 // reset R0 + + // set g + MOVD m_g0(R3), g + BL runtime·save_g(SB) + MOVD R3, g_m(g) + + // Layout new m scheduler stack on os stack. + MOVD R1, R3 + MOVD R3, (g_stack+stack_hi)(g) + SUB $(const_threadStackSize), R3 // stack size + MOVD R3, (g_stack+stack_lo)(g) + ADD $const__StackGuard, R3 + MOVD R3, g_stackguard0(g) + MOVD R3, g_stackguard1(g) + + BL runtime·mstart(SB) + + MOVD R0, R3 + RET + +// Runs on OS stack, called from runtime·osyield. +TEXT runtime·osyield1(SB),NOSPLIT,$0 + MOVD $libc_sched_yield(SB), R12 + MOVD 0(R12), R12 + MOVD R2, 40(R1) + MOVD 0(R12), R0 + MOVD 8(R12), R2 + MOVD R0, CTR + BL (CTR) + MOVD 40(R1), R2 + RET diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go index 7efbef746c..9b0cc6f935 100644 --- a/src/runtime/sys_darwin.go +++ b/src/runtime/sys_darwin.go @@ -370,5 +370,5 @@ func closeonexec(fd int32) { //go:cgo_import_dynamic libc_pthread_cond_signal pthread_cond_signal "/usr/lib/libSystem.B.dylib" // Magic incantation to get libSystem actually dynamically linked. -// TODO: Why does the code require this? See cmd/compile/internal/ld/go.go:210 +// TODO: Why does the code require this? See cmd/link/internal/ld/go.go //go:cgo_import_dynamic _ _ "/usr/lib/libSystem.B.dylib" diff --git a/src/runtime/sys_dragonfly_amd64.s b/src/runtime/sys_dragonfly_amd64.s index f0eb5f4e21..b771850aaf 100644 --- a/src/runtime/sys_dragonfly_amd64.s +++ b/src/runtime/sys_dragonfly_amd64.s @@ -9,7 +9,7 @@ #include "go_asm.h" #include "go_tls.h" #include "textflag.h" - + TEXT runtime·sys_umtx_sleep(SB),NOSPLIT,$0 MOVQ addr+0(FP), DI // arg 1 - ptr MOVL val+8(FP), SI // arg 2 - value @@ -260,9 +260,11 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX MOVQ $75, AX // madvise SYSCALL - // ignore failure - maybe pages are locked + JCC 2(PC) + MOVL $-1, AX + MOVL AX, ret+24(FP) RET - + TEXT runtime·sigaltstack(SB),NOSPLIT,$-8 MOVQ new+0(FP), DI MOVQ old+8(FP), SI diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s index b8f685a323..bc309ba453 100644 --- a/src/runtime/sys_freebsd_386.s +++ b/src/runtime/sys_freebsd_386.s @@ -9,7 +9,7 @@ #include "go_asm.h" #include "go_tls.h" #include "textflag.h" - + TEXT runtime·sys_umtx_op(SB),NOSPLIT,$-4 MOVL $454, AX INT $0x80 @@ -39,7 +39,7 @@ TEXT runtime·thr_start(SB),NOSPLIT,$0 POPAL get_tls(CX) MOVL BX, g(CX) - + MOVL AX, g_m(BX) CALL runtime·stackcheck(SB) // smashes AX CALL runtime·mstart(SB) @@ -163,7 +163,9 @@ TEXT runtime·munmap(SB),NOSPLIT,$-4 TEXT runtime·madvise(SB),NOSPLIT,$-4 MOVL $75, AX // madvise INT $0x80 - // ignore failure - maybe pages are locked + JAE 2(PC) + MOVL $-1, AX + MOVL AX, ret+12(FP) RET TEXT runtime·setitimer(SB), NOSPLIT, $-4 diff --git a/src/runtime/sys_freebsd_amd64.s b/src/runtime/sys_freebsd_amd64.s index be191a0784..55959b3e3a 100644 --- a/src/runtime/sys_freebsd_amd64.s +++ b/src/runtime/sys_freebsd_amd64.s @@ -337,9 +337,11 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX MOVQ $75, AX // madvise SYSCALL - // ignore failure - maybe pages are locked + JCC 2(PC) + MOVL $-1, AX + MOVL AX, ret+24(FP) RET - + TEXT runtime·sigaltstack(SB),NOSPLIT,$-8 MOVQ new+0(FP), DI MOVQ old+8(FP), SI diff --git a/src/runtime/sys_freebsd_arm.s b/src/runtime/sys_freebsd_arm.s index 93bf569367..f347b9fa96 100644 --- a/src/runtime/sys_freebsd_arm.s +++ b/src/runtime/sys_freebsd_arm.s @@ -264,14 +264,15 @@ TEXT runtime·munmap(SB),NOSPLIT,$0 RET TEXT runtime·madvise(SB),NOSPLIT,$0 - MOVW addr+0(FP), R0 // arg 1 addr - MOVW n+4(FP), R1 // arg 2 len - MOVW flags+8(FP), R2 // arg 3 flags - MOVW $SYS_madvise, R7 - SWI $0 - // ignore failure - maybe pages are locked + MOVW addr+0(FP), R0 // arg 1 addr + MOVW n+4(FP), R1 // arg 2 len + MOVW flags+8(FP), R2 // arg 3 flags + MOVW $SYS_madvise, R7 + SWI $0 + MOVW.CS $-1, R0 + MOVW R0, ret+12(FP) RET - + TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 MOVW new+0(FP), R0 MOVW old+4(FP), R1 diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s index 8d5a4ff977..40b55a67eb 100644 --- a/src/runtime/sys_linux_386.s +++ b/src/runtime/sys_linux_386.s @@ -48,7 +48,6 @@ #define SYS_mincore 218 #define SYS_madvise 219 #define SYS_gettid 224 -#define SYS_tkill 238 #define SYS_futex 240 #define SYS_sched_getaffinity 242 #define SYS_set_thread_area 243 @@ -57,6 +56,7 @@ #define SYS_epoll_ctl 255 #define SYS_epoll_wait 256 #define SYS_clock_gettime 265 +#define SYS_tgkill 270 #define SYS_epoll_create1 329 TEXT runtime·exit(SB),NOSPLIT,$0 @@ -155,11 +155,14 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT,$12 + MOVL $SYS_getpid, AX + INVOKE_SYSCALL + MOVL AX, BX // arg 1 pid MOVL $SYS_gettid, AX INVOKE_SYSCALL - MOVL AX, BX // arg 1 tid - MOVL sig+0(FP), CX // arg 2 signal - MOVL $SYS_tkill, AX + MOVL AX, CX // arg 2 tid + MOVL sig+0(FP), DX // arg 3 signal + MOVL $SYS_tgkill, AX INVOKE_SYSCALL RET @@ -424,7 +427,7 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL n+4(FP), CX MOVL flags+8(FP), DX INVOKE_SYSCALL - // ignore failure - maybe pages are locked + MOVL AX, ret+12(FP) RET // int32 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s index 62d80247be..b709f77060 100644 --- a/src/runtime/sys_linux_amd64.s +++ b/src/runtime/sys_linux_amd64.s @@ -36,12 +36,12 @@ #define SYS_sigaltstack 131 #define SYS_arch_prctl 158 #define SYS_gettid 186 -#define SYS_tkill 200 #define SYS_futex 202 #define SYS_sched_getaffinity 204 #define SYS_epoll_create 213 #define SYS_exit_group 231 #define SYS_epoll_ctl 233 +#define SYS_tgkill 234 #define SYS_openat 257 #define SYS_faccessat 269 #define SYS_epoll_pwait 281 @@ -137,11 +137,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT,$0 + MOVL $SYS_getpid, AX + SYSCALL + MOVL AX, R12 MOVL $SYS_gettid, AX SYSCALL - MOVL AX, DI // arg 1 tid - MOVL sig+0(FP), SI // arg 2 - MOVL $SYS_tkill, AX + MOVL AX, SI // arg 2 tid + MOVL R12, DI // arg 1 pid + MOVL sig+0(FP), DX // arg 3 + MOVL $SYS_tgkill, AX SYSCALL RET @@ -515,7 +519,7 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX MOVQ $SYS_madvise, AX SYSCALL - // ignore failure - maybe pages are locked + MOVL AX, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, @@ -604,7 +608,7 @@ TEXT runtime·settls(SB),NOSPLIT,$32 // Same as in sys_darwin_386.s:/ugliness, different constant. // DI currently holds m->tls, which must be fs:0x1d0. // See cgo/gcc_android_amd64.c for the derivation of the constant. - SUBQ $0x1d0, DI // In android, the tls base + SUBQ $0x1d0, DI // In android, the tls base #else ADDQ $8, DI // ELF wants to use -8(FS) #endif diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s index aa39732cfb..43a58335c8 100644 --- a/src/runtime/sys_linux_arm.s +++ b/src/runtime/sys_linux_arm.s @@ -36,7 +36,7 @@ #define SYS_setitimer (SYS_BASE + 104) #define SYS_mincore (SYS_BASE + 219) #define SYS_gettid (SYS_BASE + 224) -#define SYS_tkill (SYS_BASE + 238) +#define SYS_tgkill (SYS_BASE + 268) #define SYS_sched_yield (SYS_BASE + 158) #define SYS_nanosleep (SYS_BASE + 162) #define SYS_sched_getaffinity (SYS_BASE + 242) @@ -138,11 +138,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVW $SYS_getpid, R7 + SWI $0 + MOVW R0, R4 MOVW $SYS_gettid, R7 SWI $0 - // arg 1 tid already in R0 from gettid - MOVW sig+0(FP), R1 // arg 2 - signal - MOVW $SYS_tkill, R7 + MOVW R0, R1 // arg 2 tid + MOVW R4, R0 // arg 1 pid + MOVW sig+0(FP), R2 // arg 3 + MOVW $SYS_tgkill, R7 SWI $0 RET @@ -191,7 +195,7 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVW flags+8(FP), R2 MOVW $SYS_madvise, R7 SWI $0 - // ignore failure - maybe pages are locked + MOVW R0, ret+12(FP) RET TEXT runtime·setitimer(SB),NOSPLIT,$0 diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s index 1c8fce3db6..8b344be8f8 100644 --- a/src/runtime/sys_linux_arm64.s +++ b/src/runtime/sys_linux_arm64.s @@ -36,7 +36,7 @@ #define SYS_getpid 172 #define SYS_gettid 178 #define SYS_kill 129 -#define SYS_tkill 130 +#define SYS_tgkill 131 #define SYS_futex 98 #define SYS_sched_getaffinity 123 #define SYS_exit_group 94 @@ -143,11 +143,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVD $SYS_getpid, R8 + SVC + MOVW R0, R19 MOVD $SYS_gettid, R8 SVC - MOVW R0, R0 // arg 1 tid - MOVW sig+0(FP), R1 // arg 2 - MOVD $SYS_tkill, R8 + MOVW R0, R1 // arg 2 tid + MOVW R19, R0 // arg 1 pid + MOVW sig+0(FP), R2 // arg 3 + MOVD $SYS_tgkill, R8 SVC RET @@ -397,7 +401,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVW flags+16(FP), R2 MOVD $SYS_madvise, R8 SVC - // ignore failure - maybe pages are locked + MOVW R0, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_linux_mips64x.s b/src/runtime/sys_linux_mips64x.s index 8e64f1c562..c45703d228 100644 --- a/src/runtime/sys_linux_mips64x.s +++ b/src/runtime/sys_linux_mips64x.s @@ -35,12 +35,12 @@ #define SYS_madvise 5027 #define SYS_mincore 5026 #define SYS_gettid 5178 -#define SYS_tkill 5192 #define SYS_futex 5194 #define SYS_sched_getaffinity 5196 #define SYS_exit_group 5205 #define SYS_epoll_create 5207 #define SYS_epoll_ctl 5208 +#define SYS_tgkill 5225 #define SYS_openat 5247 #define SYS_epoll_pwait 5272 #define SYS_clock_gettime 5222 @@ -137,11 +137,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVV $SYS_getpid, R2 + SYSCALL + MOVW R2, R16 MOVV $SYS_gettid, R2 SYSCALL - MOVW R2, R4 // arg 1 tid - MOVW sig+0(FP), R5 // arg 2 - MOVV $SYS_tkill, R2 + MOVW R2, R5 // arg 2 tid + MOVW R16, R4 // arg 1 pid + MOVW sig+0(FP), R6 // arg 3 + MOVV $SYS_tgkill, R2 SYSCALL RET @@ -287,7 +291,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVW flags+16(FP), R6 MOVV $SYS_madvise, R2 SYSCALL - // ignore failure - maybe pages are locked + MOVW R2, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_linux_mipsx.s b/src/runtime/sys_linux_mipsx.s index a6bca3bebd..f362b0f3f1 100644 --- a/src/runtime/sys_linux_mipsx.s +++ b/src/runtime/sys_linux_mipsx.s @@ -35,7 +35,6 @@ #define SYS_madvise 4218 #define SYS_mincore 4217 #define SYS_gettid 4222 -#define SYS_tkill 4236 #define SYS_futex 4238 #define SYS_sched_getaffinity 4240 #define SYS_exit_group 4246 @@ -43,6 +42,7 @@ #define SYS_epoll_ctl 4249 #define SYS_epoll_wait 4250 #define SYS_clock_gettime 4263 +#define SYS_tgkill 4266 #define SYS_epoll_create1 4326 TEXT runtime·exit(SB),NOSPLIT,$0-4 @@ -135,11 +135,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT,$0-4 + MOVW $SYS_getpid, R2 + SYSCALL + MOVW R2, R16 MOVW $SYS_gettid, R2 SYSCALL - MOVW R2, R4 // arg 1 tid - MOVW sig+0(FP), R5 // arg 2 - MOVW $SYS_tkill, R2 + MOVW R2, R5 // arg 2 tid + MOVW R16, R4 // arg 1 pid + MOVW sig+0(FP), R6 // arg 3 + MOVW $SYS_tgkill, R2 SYSCALL RET @@ -298,13 +302,13 @@ TEXT runtime·munmap(SB),NOSPLIT,$0-8 UNDEF // crash RET -TEXT runtime·madvise(SB),NOSPLIT,$0-12 +TEXT runtime·madvise(SB),NOSPLIT,$0-16 MOVW addr+0(FP), R4 MOVW n+4(FP), R5 MOVW flags+8(FP), R6 MOVW $SYS_madvise, R2 SYSCALL - // ignore failure - maybe pages are locked + MOVW R2, ret+12(FP) RET // int32 futex(int32 *uaddr, int32 op, int32 val, struct timespec *timeout, int32 *uaddr2, int32 val2); diff --git a/src/runtime/sys_linux_ppc64x.s b/src/runtime/sys_linux_ppc64x.s index 075adf2368..ed79b69257 100644 --- a/src/runtime/sys_linux_ppc64x.s +++ b/src/runtime/sys_linux_ppc64x.s @@ -36,7 +36,6 @@ #define SYS_madvise 205 #define SYS_mincore 206 #define SYS_gettid 207 -#define SYS_tkill 208 #define SYS_futex 221 #define SYS_sched_getaffinity 223 #define SYS_exit_group 234 @@ -44,6 +43,7 @@ #define SYS_epoll_ctl 237 #define SYS_epoll_wait 238 #define SYS_clock_gettime 246 +#define SYS_tgkill 250 #define SYS_epoll_create1 315 TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4 @@ -123,10 +123,13 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + SYSCALL $SYS_getpid + MOVW R3, R14 SYSCALL $SYS_gettid - MOVW R3, R3 // arg 1 tid - MOVW sig+0(FP), R4 // arg 2 - SYSCALL $SYS_tkill + MOVW R3, R4 // arg 2 tid + MOVW R14, R3 // arg 1 pid + MOVW sig+0(FP), R5 // arg 3 + SYSCALL $SYS_tgkill RET TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0 @@ -451,7 +454,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVD n+8(FP), R4 MOVW flags+16(FP), R5 SYSCALL $SYS_madvise - // ignore failure - maybe pages are locked + MOVW R3, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_linux_s390x.s b/src/runtime/sys_linux_s390x.s index 1ff110c232..c79ceea751 100644 --- a/src/runtime/sys_linux_s390x.s +++ b/src/runtime/sys_linux_s390x.s @@ -31,9 +31,9 @@ #define SYS_madvise 219 #define SYS_mincore 218 #define SYS_gettid 236 -#define SYS_tkill 237 #define SYS_futex 238 #define SYS_sched_getaffinity 240 +#define SYS_tgkill 241 #define SYS_exit_group 248 #define SYS_epoll_create 249 #define SYS_epoll_ctl 250 @@ -129,11 +129,15 @@ TEXT runtime·gettid(SB),NOSPLIT,$0-4 RET TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 + MOVW $SYS_getpid, R1 + SYSCALL + MOVW R2, R10 MOVW $SYS_gettid, R1 SYSCALL - MOVW R2, R2 // arg 1 tid - MOVW sig+0(FP), R3 // arg 2 - MOVW $SYS_tkill, R1 + MOVW R2, R3 // arg 2 tid + MOVW R10, R2 // arg 1 pid + MOVW sig+0(FP), R4 // arg 2 + MOVW $SYS_tgkill, R1 SYSCALL RET @@ -286,7 +290,7 @@ TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0 MOVW flags+16(FP), R4 MOVW $SYS_madvise, R1 SYSCALL - // ignore failure - maybe pages are locked + MOVW R2, ret+24(FP) RET // int64 futex(int32 *uaddr, int32 op, int32 val, diff --git a/src/runtime/sys_nacl_386.s b/src/runtime/sys_nacl_386.s index cdc8ff1a02..24eaeb238c 100644 --- a/src/runtime/sys_nacl_386.s +++ b/src/runtime/sys_nacl_386.s @@ -266,7 +266,7 @@ TEXT runtime·nacl_clock_gettime(SB),NOSPLIT,$8 NACL_SYSCALL(SYS_clock_gettime) MOVL AX, ret+8(FP) RET - + TEXT runtime·nanotime(SB),NOSPLIT,$20 MOVL $0, 0(SP) // real time clock LEAL 8(SP), AX @@ -308,12 +308,12 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0 // save g MOVL DI, 20(SP) - + // g = m->gsignal MOVL g_m(DI), BX MOVL m_gsignal(BX), BX MOVL BX, g(CX) - + // copy arguments for sighandler MOVL $11, 0(SP) // signal MOVL $0, 4(SP) // siginfo @@ -356,7 +356,7 @@ ret: // Today those registers are just PC and SP, but in case additional registers // are relevant in the future (for example DX is the Go func context register) // we restore as many registers as possible. - // + // // We smash BP, because that's what the linker smashes during RET. // LEAL ctxt+4(FP), BP diff --git a/src/runtime/sys_nacl_amd64p32.s b/src/runtime/sys_nacl_amd64p32.s index 4c4d509576..b4a108346d 100644 --- a/src/runtime/sys_nacl_amd64p32.s +++ b/src/runtime/sys_nacl_amd64p32.s @@ -334,13 +334,13 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$80 // check that g exists get_tls(CX) MOVL g(CX), DI - + CMPL DI, $0 JEQ nog // save g MOVL DI, 20(SP) - + // g = m->gsignal MOVL g_m(DI), BX MOVL m_gsignal(BX), BX diff --git a/src/runtime/sys_netbsd_386.s b/src/runtime/sys_netbsd_386.s index 4042ab4f8a..66f4620cab 100644 --- a/src/runtime/sys_netbsd_386.s +++ b/src/runtime/sys_netbsd_386.s @@ -135,7 +135,9 @@ TEXT runtime·munmap(SB),NOSPLIT,$-4 TEXT runtime·madvise(SB),NOSPLIT,$-4 MOVL $75, AX // sys_madvise INT $0x80 - // ignore failure - maybe pages are locked + JAE 2(PC) + MOVL $-1, AX + MOVL AX, ret+12(FP) RET TEXT runtime·setitimer(SB),NOSPLIT,$-4 diff --git a/src/runtime/sys_netbsd_amd64.s b/src/runtime/sys_netbsd_amd64.s index 11b9c1b417..531c227a7b 100644 --- a/src/runtime/sys_netbsd_amd64.s +++ b/src/runtime/sys_netbsd_amd64.s @@ -23,7 +23,7 @@ TEXT runtime·lwp_create(SB),NOSPLIT,$0 RET TEXT runtime·lwp_tramp(SB),NOSPLIT,$0 - + // Set FS to point at m->tls. LEAQ m_tls(R8), DI CALL runtime·settls(SB) @@ -319,7 +319,9 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX // arg 3 - behav MOVQ $75, AX // sys_madvise SYSCALL - // ignore failure - maybe pages are locked + JCC 2(PC) + MOVL $-1, AX + MOVL AX, ret+24(FP) RET TEXT runtime·sigaltstack(SB),NOSPLIT,$-8 diff --git a/src/runtime/sys_netbsd_arm.s b/src/runtime/sys_netbsd_arm.s index 6b2c5a8357..304075f295 100644 --- a/src/runtime/sys_netbsd_arm.s +++ b/src/runtime/sys_netbsd_arm.s @@ -284,11 +284,12 @@ TEXT runtime·munmap(SB),NOSPLIT,$0 RET TEXT runtime·madvise(SB),NOSPLIT,$0 - MOVW addr+0(FP), R0 // arg 1 - addr - MOVW n+4(FP), R1 // arg 2 - len - MOVW flags+8(FP), R2 // arg 3 - behav - SWI $0xa0004b // sys_madvise - // ignore failure - maybe pages are locked + MOVW addr+0(FP), R0 // arg 1 - addr + MOVW n+4(FP), R1 // arg 2 - len + MOVW flags+8(FP), R2 // arg 3 - behav + SWI $0xa0004b // sys_madvise + MOVW.CS $-1, R0 + MOVW R0, ret+12(FP) RET TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0 diff --git a/src/runtime/sys_openbsd_386.s b/src/runtime/sys_openbsd_386.s index 21f13c806e..d555edb71f 100644 --- a/src/runtime/sys_openbsd_386.s +++ b/src/runtime/sys_openbsd_386.s @@ -136,7 +136,8 @@ TEXT runtime·madvise(SB),NOSPLIT,$-4 MOVL $75, AX // sys_madvise INT $0x80 JAE 2(PC) - MOVL $0xf1, 0xf1 // crash + MOVL $-1, AX + MOVL AX, ret+12(FP) RET TEXT runtime·setitimer(SB),NOSPLIT,$-4 @@ -294,7 +295,7 @@ TEXT runtime·tfork(SB),NOSPLIT,$12 CALL runtime·settls(SB) POPL AX POPAL - + // Now segment is established. Initialize m, g. get_tls(AX) MOVL DX, g(AX) diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s index 38ac38d9bf..227e81869c 100644 --- a/src/runtime/sys_openbsd_amd64.s +++ b/src/runtime/sys_openbsd_amd64.s @@ -305,7 +305,9 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVL flags+16(FP), DX // arg 3 - behav MOVQ $75, AX // sys_madvise SYSCALL - // ignore failure - maybe pages are locked + JCC 2(PC) + MOVL $-1, AX + MOVL AX, ret+24(FP) RET TEXT runtime·sigaltstack(SB),NOSPLIT,$-8 diff --git a/src/runtime/sys_openbsd_arm.s b/src/runtime/sys_openbsd_arm.s index ff1c1da9b9..52d3638bc1 100644 --- a/src/runtime/sys_openbsd_arm.s +++ b/src/runtime/sys_openbsd_arm.s @@ -143,8 +143,8 @@ TEXT runtime·madvise(SB),NOSPLIT,$0 MOVW flags+8(FP), R2 // arg 2 - flags MOVW $75, R12 // sys_madvise SWI $0 - MOVW.CS $0, R8 // crash on syscall failure - MOVW.CS R8, (R8) + MOVW.CS $-1, R0 + MOVW R0, ret+12(FP) RET TEXT runtime·setitimer(SB),NOSPLIT,$0 diff --git a/src/runtime/sys_plan9_386.s b/src/runtime/sys_plan9_386.s index 47dcb8db04..a7fb9fe6f7 100644 --- a/src/runtime/sys_plan9_386.s +++ b/src/runtime/sys_plan9_386.s @@ -126,7 +126,7 @@ TEXT runtime·noted(SB),NOSPLIT,$0 INT $64 MOVL AX, ret+4(FP) RET - + TEXT runtime·plan9_semrelease(SB),NOSPLIT,$0 MOVL $38, AX INT $64 diff --git a/src/runtime/sys_plan9_amd64.s b/src/runtime/sys_plan9_amd64.s index 8077d6d324..4ef4aab376 100644 --- a/src/runtime/sys_plan9_amd64.s +++ b/src/runtime/sys_plan9_amd64.s @@ -123,7 +123,7 @@ TEXT runtime·noted(SB),NOSPLIT,$0 SYSCALL MOVL AX, ret+8(FP) RET - + TEXT runtime·plan9_semrelease(SB),NOSPLIT,$0 MOVQ $38, BP SYSCALL diff --git a/src/runtime/sys_solaris_amd64.s b/src/runtime/sys_solaris_amd64.s index 2b6dabab99..930fc88997 100644 --- a/src/runtime/sys_solaris_amd64.s +++ b/src/runtime/sys_solaris_amd64.s @@ -63,9 +63,9 @@ TEXT runtime·pipe1(SB),NOSPLIT,$0 // Call a library function with SysV calling conventions. // The called function can take a maximum of 6 INTEGER class arguments, -// see +// see // Michael Matz, Jan Hubicka, Andreas Jaeger, and Mark Mitchell -// System V Application Binary Interface +// System V Application Binary Interface // AMD64 Architecture Processor Supplement // section 3.2.3. // @@ -119,7 +119,7 @@ skipargs: MOVL 0(AX), AX MOVQ AX, libcall_err(DI) -skiperrno2: +skiperrno2: RET // uint32 tstart_sysvicall(M *newm); @@ -186,7 +186,7 @@ allgood: // Save m->libcall and m->scratch. We need to do this because we // might get interrupted by a signal in runtime·asmcgocall. - // save m->libcall + // save m->libcall MOVQ g_m(R10), BP LEAQ m_libcall(BP), R11 MOVQ libcall_fn(R11), R10 diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s index 3c091adcb1..babd91c936 100644 --- a/src/runtime/sys_windows_386.s +++ b/src/runtime/sys_windows_386.s @@ -494,13 +494,13 @@ wall: MOVL (_SYSTEM_TIME+time_hi2), DX CMPL CX, DX JNE wall - + // w = DX:AX // convert to Unix epoch (but still 100ns units) #define delta 116444736000000000 SUBL $(delta & 0xFFFFFFFF), AX SBBL $(delta >> 32), DX - + // nano/100 = DX:AX // split into two decimal halves by div 1e9. // (decimal point is two spots over from correct place, @@ -509,7 +509,7 @@ wall: DIVL CX MOVL AX, DI MOVL DX, SI - + // DI = nano/100/1e9 = nano/1e11 = sec/100, DX = SI = nano/100%1e9 // split DX into seconds and nanoseconds by div 1e7 magic multiply. MOVL DX, AX @@ -520,7 +520,7 @@ wall: IMULL $10000000, DX MOVL SI, CX SUBL DX, CX - + // DI = sec/100 (still) // BX = (nano/100%1e9)/1e7 = (nano/1e9)%100 = sec%100 // CX = (nano/100%1e9)%1e7 = (nano%1e9)/100 = nsec/100 diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s index c9127ac2d2..ec49caa43e 100644 --- a/src/runtime/sys_windows_amd64.s +++ b/src/runtime/sys_windows_amd64.s @@ -89,7 +89,7 @@ TEXT runtime·badsignal2(SB),NOSPLIT|NOFRAME,$48 MOVQ $0, 32(SP) // overlapped MOVQ runtime·_WriteFile(SB), AX CALL AX - + RET // faster get/set last error diff --git a/src/runtime/sys_windows_arm.s b/src/runtime/sys_windows_arm.s new file mode 100644 index 0000000000..409c72c554 --- /dev/null +++ b/src/runtime/sys_windows_arm.s @@ -0,0 +1,605 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "go_tls.h" +#include "textflag.h" + +// void runtime·asmstdcall(void *c); +TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr} + MOVW R0, R4 // put libcall * in r4 + MOVW R13, R5 // save stack pointer in r5 + + // SetLastError(0) + MOVW $0, R0 + MRC 15, 0, R1, C13, C0, 2 + MOVW R0, 0x34(R1) + + MOVW 8(R4), R12 // libcall->args + + // Do we have more than 4 arguments? + MOVW 4(R4), R0 // libcall->n + SUB.S $4, R0, R2 + BLE loadregs + + // Reserve stack space for remaining args + SUB R2<<2, R13 + BIC $0x7, R13 // alignment for ABI + + // R0: count of arguments + // R1: + // R2: loop counter, from 0 to (n-4) + // R3: scratch + // R4: pointer to libcall struct + // R12: libcall->args + MOVW $0, R2 +stackargs: + ADD $4, R2, R3 // r3 = args[4 + i] + MOVW R3<<2(R12), R3 + MOVW R3, R2<<2(R13) // stack[i] = r3 + + ADD $1, R2 // i++ + SUB $4, R0, R3 // while (i < (n - 4)) + CMP R3, R2 + BLT stackargs + +loadregs: + CMP $3, R0 + MOVW.GT 12(R12), R3 + + CMP $2, R0 + MOVW.GT 8(R12), R2 + + CMP $1, R0 + MOVW.GT 4(R12), R1 + + CMP $0, R0 + MOVW.GT 0(R12), R0 + + BIC $0x7, R13 // alignment for ABI + MOVW 0(R4), R12 // branch to libcall->fn + BL (R12) + + MOVW R5, R13 // free stack space + MOVW R0, 12(R4) // save return value to libcall->r1 + MOVW R1, 16(R4) + + // GetLastError + MRC 15, 0, R1, C13, C0, 2 + MOVW 0x34(R1), R0 + MOVW R0, 20(R4) // store in libcall->err + + MOVM.IA.W (R13), [R4, R5, R15] + +TEXT runtime·badsignal2(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R14], (R13) // push {r4, lr} + MOVW R13, R4 // save original stack pointer + SUB $8, R13 // space for 2 variables + BIC $0x7, R13 // alignment for ABI + + // stderr + MOVW runtime·_GetStdHandle(SB), R1 + MOVW $-12, R0 + BL (R1) + + MOVW $runtime·badsignalmsg(SB), R1 // lpBuffer + MOVW $runtime·badsignallen(SB), R2 // lpNumberOfBytesToWrite + MOVW (R2), R2 + ADD $0x4, R13, R3 // lpNumberOfBytesWritten + MOVW $0, R12 // lpOverlapped + MOVW R12, (R13) + + MOVW runtime·_WriteFile(SB), R12 + BL (R12) + + MOVW R4, R13 // restore SP + MOVM.IA.W (R13), [R4, R15] // pop {r4, pc} + +TEXT runtime·getlasterror(SB),NOSPLIT,$0 + MRC 15, 0, R0, C13, C0, 2 + MOVW 0x34(R0), R0 + MOVW R0, ret+0(FP) + RET + +TEXT runtime·setlasterror(SB),NOSPLIT|NOFRAME,$0 + MRC 15, 0, R1, C13, C0, 2 + MOVW R0, 0x34(R1) + RET + +// Called by Windows as a Vectored Exception Handler (VEH). +// First argument is pointer to struct containing +// exception record and context pointers. +// Handler function is stored in R1 +// Return 0 for 'not handled', -1 for handled. +// int32_t sigtramp( +// PEXCEPTION_POINTERS ExceptionInfo, +// func *GoExceptionHandler); +TEXT runtime·sigtramp(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R0, R4-R11, R14], (R13) // push {r0, r4-r11, lr} (SP-=40) + SUB $(8+20), R13 // reserve space for g, sp, and + // parameters/retval to go call + + MOVW R0, R6 // Save param0 + MOVW R1, R7 // Save param1 + + BL runtime·load_g(SB) + CMP $0, g // is there a current g? + BL.EQ runtime·badsignal2(SB) + + // save g and SP in case of stack switch + MOVW R13, 24(R13) + MOVW g, 20(R13) + + // do we need to switch to the g0 stack? + MOVW g, R5 // R5 = g + MOVW g_m(R5), R2 // R2 = m + MOVW m_g0(R2), R4 // R4 = g0 + CMP R5, R4 // if curg == g0 + BEQ g0 + + // switch to g0 stack + MOVW R4, g // g = g0 + MOVW (g_sched+gobuf_sp)(g), R3 // R3 = g->gobuf.sp + BL runtime·save_g(SB) + + // traceback will think that we've done PUSH and SUB + // on this stack, so subtract them here to match. + // (we need room for sighandler arguments anyway). + // and re-save old SP for restoring later. + SUB $(40+8+20), R3 + MOVW R13, 24(R3) // save old stack pointer + MOVW R3, R13 // switch stack + +g0: + MOVW 0(R6), R2 // R2 = ExceptionPointers->ExceptionRecord + MOVW 4(R6), R3 // R3 = ExceptionPointers->ContextRecord + + // make it look like mstart called us on g0, to stop traceback + MOVW $runtime·mstart(SB), R4 + + MOVW R4, 0(R13) // Save link register for traceback + MOVW R2, 4(R13) // Move arg0 (ExceptionRecord) into position + MOVW R3, 8(R13) // Move arg1 (ContextRecord) into position + MOVW R5, 12(R13) // Move arg2 (original g) into position + BL (R7) // Call the go routine + MOVW 16(R13), R4 // Fetch return value from stack + + // Compute the value of the g0 stack pointer after deallocating + // this frame, then allocating 8 bytes. We may need to store + // the resume SP and PC on the g0 stack to work around + // control flow guard when we resume from the exception. + ADD $(40+20), R13, R12 + + // switch back to original stack and g + MOVW 24(R13), R13 + MOVW 20(R13), g + BL runtime·save_g(SB) + +done: + MOVW R4, R0 // move retval into position + ADD $(8 + 20), R13 // free locals + MOVM.IA.W (R13), [R3, R4-R11, R14] // pop {r3, r4-r11, lr} + + // if return value is CONTINUE_SEARCH, do not set up control + // flow guard workaround + CMP $0, R0 + BEQ return + + // Check if we need to set up the control flow guard workaround. + // On Windows/ARM, the stack pointer must lie within system + // stack limits when we resume from exception. + // Store the resume SP and PC on the g0 stack, + // and return to returntramp on the g0 stack. returntramp + // pops the saved PC and SP from the g0 stack, resuming execution + // at the desired location. + // If returntramp has already been set up by a previous exception + // handler, don't clobber the stored SP and PC on the stack. + MOVW 4(R3), R3 // PEXCEPTION_POINTERS->Context + MOVW 0x40(R3), R2 // load PC from context record + MOVW $runtime·returntramp(SB), R1 + CMP R1, R2 + B.EQ return // do not clobber saved SP/PC + + // Save resume SP and PC on g0 stack + MOVW 0x38(R3), R2 // load SP from context record + MOVW R2, 0(R12) // Store resume SP on g0 stack + MOVW 0x40(R3), R2 // load PC from context record + MOVW R2, 4(R12) // Store resume PC on g0 stack + + // Set up context record to return to returntramp on g0 stack + MOVW R12, 0x38(R3) // save g0 stack pointer + // in context record + MOVW $runtime·returntramp(SB), R2 // save resume address + MOVW R2, 0x40(R3) // in context record + +return: + B (R14) // return + +// +// Trampoline to resume execution from exception handler. +// This is part of the control flow guard workaround. +// It switches stacks and jumps to the continuation address. +// +TEXT runtime·returntramp(SB),NOSPLIT|NOFRAME,$0 + MOVM.IA (R13), [R13, R15] // ldm sp, [sp, pc] + +TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·exceptionhandler(SB), R1 + B runtime·sigtramp(SB) + +TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·firstcontinuehandler(SB), R1 + B runtime·sigtramp(SB) + +TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·lastcontinuehandler(SB), R1 + B runtime·sigtramp(SB) + +TEXT runtime·ctrlhandler(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·ctrlhandler1(SB), R1 + B runtime·externalthreadhandler(SB) + +TEXT runtime·profileloop(SB),NOSPLIT|NOFRAME,$0 + MOVW $runtime·profileloop1(SB), R1 + B runtime·externalthreadhandler(SB) + +// int32 externalthreadhandler(uint32 arg, int (*func)(uint32)) +// stack layout: +// +----------------+ +// | callee-save | +// | registers | +// +----------------+ +// | m | +// +----------------+ +// 20| g | +// +----------------+ +// 16| func ptr (r1) | +// +----------------+ +// 12| argument (r0) | +//---+----------------+ +// 8 | param1 | +// +----------------+ +// 4 | param0 | +// +----------------+ +// 0 | retval | +// +----------------+ +// +TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr} + SUB $(m__size + g__size + 20), R13 // space for locals + MOVW R0, 12(R13) + MOVW R1, 16(R13) + + // zero out m and g structures + ADD $20, R13, R0 // compute pointer to g + MOVW R0, 4(R13) + MOVW $(m__size + g__size), R0 + MOVW R0, 8(R13) + BL runtime·memclrNoHeapPointers(SB) + + // initialize m and g structures + ADD $20, R13, R2 // R2 = g + ADD $(20 + g__size), R13, R3 // R3 = m + MOVW R2, m_g0(R3) // m->g0 = g + MOVW R3, g_m(R2) // g->m = m + MOVW R2, m_curg(R3) // m->curg = g + + MOVW R2, g + BL runtime·save_g(SB) + + // set up stackguard stuff + MOVW R13, R0 + MOVW R0, g_stack+stack_hi(g) + SUB $(32*1024), R0 + MOVW R0, (g_stack+stack_lo)(g) + MOVW R0, g_stackguard0(g) + MOVW R0, g_stackguard1(g) + + // move argument into position and call function + MOVW 12(R13), R0 + MOVW R0, 4(R13) + MOVW 16(R13), R1 + BL (R1) + + // clear g + MOVW $0, g + BL runtime·save_g(SB) + + MOVW 0(R13), R0 // load return value + ADD $(m__size + g__size + 20), R13 // free locals + MOVM.IA.W (R13), [R4-R11, R15] // pop {r4-r11, pc} + +GLOBL runtime·cbctxts(SB), NOPTR, $4 + +TEXT runtime·callbackasm1(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr} + SUB $36, R13 // space for locals + + // save callback arguments to stack. We currently support up to 4 arguments + ADD $16, R13, R4 + MOVM.IA [R0-R3], (R4) + + // load cbctxts[i]. The trampoline in zcallback_windows.s puts the callback + // index in R12 + MOVW runtime·cbctxts(SB), R4 + MOVW R12<<2(R4), R4 // R4 holds pointer to wincallbackcontext structure + + // extract callback context + MOVW wincallbackcontext_argsize(R4), R5 + MOVW wincallbackcontext_gobody(R4), R4 + + // we currently support up to 4 arguments + CMP $(4 * 4), R5 + BL.GT runtime·abort(SB) + + // extend argsize by size of return value + ADD $4, R5 + + // Build 'type args struct' + MOVW R4, 4(R13) // fn + ADD $16, R13, R0 // arg (points to r0-r3, ret on stack) + MOVW R0, 8(R13) + MOVW R5, 12(R13) // argsize + + BL runtime·load_g(SB) + BL runtime·cgocallback_gofunc(SB) + + ADD $16, R13, R0 // load arg + MOVW 12(R13), R1 // load argsize + SUB $4, R1 // offset to return value + MOVW R1<<0(R0), R0 // load return value + + ADD $36, R13 // free locals + MOVM.IA.W (R13), [R4-R11, R15] // pop {r4-r11, pc} + +// uint32 tstart_stdcall(M *newm); +TEXT runtime·tstart_stdcall(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr} + + MOVW m_g0(R0), g + MOVW R0, g_m(g) + BL runtime·save_g(SB) + + // Layout new m scheduler stack on os stack. + MOVW R13, R0 + MOVW R0, g_stack+stack_hi(g) + SUB $(64*1024), R0 + MOVW R0, (g_stack+stack_lo)(g) + MOVW R0, g_stackguard0(g) + MOVW R0, g_stackguard1(g) + + BL runtime·emptyfunc(SB) // fault if stack check is wrong + BL runtime·mstart(SB) + + // Exit the thread. + MOVW $0, R0 + MOVM.IA.W (R13), [R4-R11, R15] // pop {r4-r11, pc} + +// onosstack calls fn on OS stack. +// adapted from asm_arm.s : systemstack +// func onosstack(fn unsafe.Pointer, arg uint32) +TEXT runtime·onosstack(SB),NOSPLIT,$0 + MOVW fn+0(FP), R5 // R5 = fn + MOVW arg+4(FP), R6 // R6 = arg + + // This function can be called when there is no g, + // for example, when we are handling a callback on a non-go thread. + // In this case we're already on the system stack. + CMP $0, g + BEQ noswitch + + MOVW g_m(g), R1 // R1 = m + + MOVW m_gsignal(R1), R2 // R2 = gsignal + CMP g, R2 + B.EQ noswitch + + MOVW m_g0(R1), R2 // R2 = g0 + CMP g, R2 + B.EQ noswitch + + MOVW m_curg(R1), R3 + CMP g, R3 + B.EQ switch + + // Bad: g is not gsignal, not g0, not curg. What is it? + // Hide call from linker nosplit analysis. + MOVW $runtime·badsystemstack(SB), R0 + BL (R0) + B runtime·abort(SB) + +switch: + // save our state in g->sched. Pretend to + // be systemstack_switch if the G stack is scanned. + MOVW $runtime·systemstack_switch(SB), R3 + ADD $4, R3, R3 // get past push {lr} + MOVW R3, (g_sched+gobuf_pc)(g) + MOVW R13, (g_sched+gobuf_sp)(g) + MOVW LR, (g_sched+gobuf_lr)(g) + MOVW g, (g_sched+gobuf_g)(g) + + // switch to g0 + MOVW R2, g + MOVW (g_sched+gobuf_sp)(R2), R3 + // make it look like mstart called systemstack on g0, to stop traceback + SUB $4, R3, R3 + MOVW $runtime·mstart(SB), R4 + MOVW R4, 0(R3) + MOVW R3, R13 + + // call target function + MOVW R6, R0 // arg + BL (R5) + + // switch back to g + MOVW g_m(g), R1 + MOVW m_curg(R1), g + MOVW (g_sched+gobuf_sp)(g), R13 + MOVW $0, R3 + MOVW R3, (g_sched+gobuf_sp)(g) + RET + +noswitch: + // Using a tail call here cleans up tracebacks since we won't stop + // at an intermediate systemstack. + MOVW.P 4(R13), R14 // restore LR + MOVW R6, R0 // arg + B (R5) + +// Runs on OS stack. Duration (in 100ns units) is in R0. +TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R14], (R13) // push {r4, lr} + MOVW R13, R4 // Save SP + SUB $8, R13 // R13 = R13 - 8 + BIC $0x7, R13 // Align SP for ABI + RSB $0, R0, R3 // R3 = -R0 + MOVW $0, R1 // R1 = FALSE (alertable) + MOVW $-1, R0 // R0 = handle + MOVW R13, R2 // R2 = pTime + MOVW R3, 0(R2) // time_lo + MOVW R0, 4(R2) // time_hi + MOVW runtime·_NtWaitForSingleObject(SB), R3 + BL (R3) + MOVW R4, R13 // Restore SP + MOVM.IA.W (R13), [R4, R15] // pop {R4, pc} + +// Runs on OS stack. +TEXT runtime·switchtothread(SB),NOSPLIT|NOFRAME,$0 + MOVM.DB.W [R4, R14], (R13) // push {R4, lr} + MOVW R13, R4 + BIC $0x7, R13 // alignment for ABI + MOVW runtime·_SwitchToThread(SB), R0 + BL (R0) + MOVW R4, R13 // restore stack pointer + MOVM.IA.W (R13), [R4, R15] // pop {R4, pc} + +TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 + B runtime·armPublicationBarrier(SB) + +// never called (cgo not supported) +TEXT runtime·read_tls_fallback(SB),NOSPLIT|NOFRAME,$0 + MOVW $0xabcd, R0 + MOVW R0, (R0) + RET + +// See http://www.dcl.hpi.uni-potsdam.de/research/WRK/2007/08/getting-os-information-the-kuser_shared_data-structure/ +// Must read hi1, then lo, then hi2. The snapshot is valid if hi1 == hi2. +#define _INTERRUPT_TIME 0x7ffe0008 +#define _SYSTEM_TIME 0x7ffe0014 +#define time_lo 0 +#define time_hi1 4 +#define time_hi2 8 + +TEXT runtime·nanotime(SB),NOSPLIT,$0-8 + MOVW $0, R0 + MOVB runtime·useQPCTime(SB), R0 + CMP $0, R0 + BNE useQPC + MOVW $_INTERRUPT_TIME, R3 +loop: + MOVW time_hi1(R3), R1 + MOVW time_lo(R3), R0 + MOVW time_hi2(R3), R2 + CMP R1, R2 + BNE loop + + // wintime = R1:R0, multiply by 100 + MOVW $100, R2 + MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2 + MULA R1, R2, R4, R4 + + // wintime*100 = R4:R3, subtract startNano and return + MOVW runtime·startNano+0(SB), R0 + MOVW runtime·startNano+4(SB), R1 + SUB.S R0, R3 + SBC R1, R4 + MOVW R3, ret_lo+0(FP) + MOVW R4, ret_hi+4(FP) + RET +useQPC: + B runtime·nanotimeQPC(SB) // tail call + RET + +TEXT time·now(SB),NOSPLIT,$0-20 + MOVW $0, R0 + MOVB runtime·useQPCTime(SB), R0 + CMP $0, R0 + BNE useQPC + MOVW $_INTERRUPT_TIME, R3 +loop: + MOVW time_hi1(R3), R1 + MOVW time_lo(R3), R0 + MOVW time_hi2(R3), R2 + CMP R1, R2 + BNE loop + + // wintime = R1:R0, multiply by 100 + MOVW $100, R2 + MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2 + MULA R1, R2, R4, R4 + + // wintime*100 = R4:R3, subtract startNano and return + MOVW runtime·startNano+0(SB), R0 + MOVW runtime·startNano+4(SB), R1 + SUB.S R0, R3 + SBC R1, R4 + MOVW R3, mono+12(FP) + MOVW R4, mono+16(FP) + + MOVW $_SYSTEM_TIME, R3 +wall: + MOVW time_hi1(R3), R1 + MOVW time_lo(R3), R0 + MOVW time_hi2(R3), R2 + CMP R1, R2 + BNE wall + + // w = R1:R0 in 100ns untis + // convert to Unix epoch (but still 100ns units) + #define delta 116444736000000000 + SUB.S $(delta & 0xFFFFFFFF), R0 + SBC $(delta >> 32), R1 + + // Convert to nSec + MOVW $100, R2 + MULLU R0, R2, (R4, R3) // R4:R3 = R1:R0 * R2 + MULA R1, R2, R4, R4 + // w = R2:R1 in nSec + MOVW R3, R1 // R4:R3 -> R2:R1 + MOVW R4, R2 + + // multiply nanoseconds by reciprocal of 10**9 (scaled by 2**61) + // to get seconds (96 bit scaled result) + MOVW $0x89705f41, R3 // 2**61 * 10**-9 + MULLU R1,R3,(R6,R5) // R7:R6:R5 = R2:R1 * R3 + MOVW $0,R7 + MULALU R2,R3,(R7,R6) + + // unscale by discarding low 32 bits, shifting the rest by 29 + MOVW R6>>29,R6 // R7:R6 = (R7:R6:R5 >> 61) + ORR R7<<3,R6 + MOVW R7>>29,R7 + + // subtract (10**9 * sec) from nsec to get nanosecond remainder + MOVW $1000000000, R5 // 10**9 + MULLU R6,R5,(R9,R8) // R9:R8 = R7:R6 * R5 + MULA R7,R5,R9,R9 + SUB.S R8,R1 // R2:R1 -= R9:R8 + SBC R9,R2 + + // because reciprocal was a truncated repeating fraction, quotient + // may be slightly too small -- adjust to make remainder < 10**9 + CMP R5,R1 // if remainder > 10**9 + SUB.HS R5,R1 // remainder -= 10**9 + ADD.HS $1,R6 // sec += 1 + + MOVW R6,sec_lo+0(FP) + MOVW R7,sec_hi+4(FP) + MOVW R1,nsec+8(FP) + RET +useQPC: + B runtime·nanotimeQPC(SB) // tail call + RET + diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 8264070569..0858efaf61 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -25,18 +25,32 @@ func (c *wincallbackcontext) setCleanstack(cleanstack bool) { var ( cbs callbacks cbctxts **wincallbackcontext = &cbs.ctxt[0] // to simplify access to cbs.ctxt in sys_windows_*.s - - callbackasm byte // type isn't really byte, it's code in runtime ) +func callbackasm() + // callbackasmAddr returns address of runtime.callbackasm // function adjusted by i. -// runtime.callbackasm is just a series of CALL instructions -// (each is 5 bytes long), and we want callback to arrive at +// On x86 and amd64, runtime.callbackasm is a series of CALL instructions, +// and we want callback to arrive at // correspondent call instruction instead of start of // runtime.callbackasm. +// On ARM, runtime.callbackasm is a series of mov and branch instructions. +// R12 is loaded with the callback index. Each entry is two instructions, +// hence 8 bytes. func callbackasmAddr(i int) uintptr { - return uintptr(add(unsafe.Pointer(&callbackasm), uintptr(i*5))) + var entrySize int + switch GOARCH { + default: + panic("unsupported architecture") + case "386", "amd64": + entrySize = 5 + case "arm": + // On ARM, each entry is a MOV instruction + // followed by a branch instruction + entrySize = 8 + } + return funcPC(callbackasm) + uintptr(i*entrySize) } //go:linkname compileCallback syscall.compileCallback diff --git a/src/runtime/testdata/testprogcgo/exec.go b/src/runtime/testdata/testprogcgo/exec.go index 2e948401c8..94da5dc526 100644 --- a/src/runtime/testdata/testprogcgo/exec.go +++ b/src/runtime/testdata/testprogcgo/exec.go @@ -75,6 +75,14 @@ func CgoExecSignalMask() { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { + // An overloaded system + // may fail with EAGAIN. + // This doesn't tell us + // anything useful; ignore it. + // Issue #27731. + if isEAGAIN(err) { + return + } fmt.Printf("iteration %d: %v\n", j, err) os.Exit(1) } @@ -87,3 +95,11 @@ func CgoExecSignalMask() { fmt.Println("OK") } + +// isEAGAIN reports whether err is an EAGAIN error from a process execution. +func isEAGAIN(err error) bool { + if p, ok := err.(*os.PathError); ok { + err = p.Err + } + return err == syscall.EAGAIN +} diff --git a/src/runtime/timestub2.go b/src/runtime/timestub2.go index 9ddc6fed91..00c2c55f46 100644 --- a/src/runtime/timestub2.go +++ b/src/runtime/timestub2.go @@ -5,6 +5,7 @@ // +build !darwin // +build !windows // +build !freebsd +// +build !aix package runtime diff --git a/src/runtime/tls_arm.s b/src/runtime/tls_arm.s index cc547a5db1..e2c945d183 100644 --- a/src/runtime/tls_arm.s +++ b/src/runtime/tls_arm.s @@ -23,6 +23,9 @@ #ifdef GOOS_darwin #define TLSG_IS_VARIABLE #endif +#ifdef GOOS_windows +#define TLSG_IS_VARIABLE +#endif // save_g saves the g register into pthread-provided // thread-local memory, so that we can call externally compiled @@ -36,6 +39,17 @@ TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0 MOVW g, R0 // preserve R0 across call to setg<> RET #else +#ifdef GOOS_windows + // Save the value in the _TEB->TlsSlots array. + // Effectively implements TlsSetValue(). + MRC 15, 0, R0, C13, C0, 2 + ADD $0xe10, R0 + MOVW $runtime·tls_g(SB), R11 + MOVW (R11), R11 + MOVW g, R11<<2(R0) + MOVW g, R0 // preserve R0 accross call to setg<> + RET +#else // If the host does not support MRC the linker will replace it with // a call to runtime.read_tls_fallback which jumps to __kuser_get_tls. // The replacement function saves LR in R11 over the call to read_tls_fallback. @@ -47,6 +61,7 @@ TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0 MOVW g, R0 // preserve R0 across call to setg<> RET #endif +#endif // load_g loads the g register from pthread-provided // thread-local memory, for use after calling externally compiled @@ -56,6 +71,16 @@ TEXT runtime·load_g(SB),NOSPLIT,$0 // nothing to do as nacl/arm does not use TLS at all. RET #else +#ifdef GOOS_windows + // Get the value from the _TEB->TlsSlots array. + // Effectively implements TlsGetValue(). + MRC 15, 0, R0, C13, C0, 2 + ADD $0xe10, R0 + MOVW $runtime·tls_g(SB), g + MOVW (g), g + MOVW g<<2(R0), g + RET +#else // See save_g MRC 15, 0, R0, C13, C0, 3 // fetch TLS base pointer BIC $3, R0 // Darwin/ARM might return unaligned pointer @@ -64,6 +89,7 @@ TEXT runtime·load_g(SB),NOSPLIT,$0 MOVW 0(R0), g RET #endif +#endif // This is called from rt0_go, which runs on the system stack // using the initial stack allocated by the OS. @@ -76,6 +102,20 @@ TEXT runtime·load_g(SB),NOSPLIT,$0 // Declare a dummy word ($4, not $0) to make sure the // frame is 8 bytes and stays 8-byte-aligned. TEXT runtime·_initcgo(SB),NOSPLIT,$4 +#ifdef GOOS_windows + MOVW R13, R4 + BIC $0x7, R13 + MOVW $runtime·_TlsAlloc(SB), R0 + MOVW (R0), R0 + BL (R0) + // Assert that slot is less than 64 so we can use _TEB->TlsSlots + CMP $64, R0 + MOVW $runtime·abort(SB), R1 + BL.GE (R1) + MOVW $runtime·tls_g(SB), R1 + MOVW R0, (R1) + MOVW R4, R13 +#else #ifndef GOOS_nacl // if there is an _cgo_init, call it. MOVW _cgo_init(SB), R4 @@ -91,7 +131,8 @@ TEXT runtime·_initcgo(SB),NOSPLIT,$4 MOVW $setg_gcc<>(SB), R1 // arg 1: setg MOVW g, R0 // arg 0: G BL (R4) // will clobber R0-R3 -#endif +#endif // GOOS_nacl +#endif // GOOS_windows nocgo: RET diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 4c2010493a..d7265b2bb9 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -312,8 +312,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // the function either doesn't return at all (if it has no defers or if the // defers do not recover) or it returns from one of the calls to // deferproc a second time (if the corresponding deferred func recovers). - // It suffices to assume that the most recent deferproc is the one that - // returns; everything live at earlier deferprocs is still live at that one. + // In the latter case, use a deferreturn call site as the continuation pc. frame.continpc = frame.pc if waspanic { // We match up defers with frames using the SP. @@ -324,7 +323,10 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // can't push a defer, the defer can't belong // to that frame. if _defer != nil && _defer.sp == frame.sp && frame.sp != frame.fp { - frame.continpc = _defer.pc + frame.continpc = frame.fn.entry + uintptr(frame.fn.deferreturn) + 1 + // Note: the +1 is to offset the -1 that + // stack.go:getStackMap does to back up a return + // address make sure the pc is in the CALL instruction. } else { frame.continpc = 0 } @@ -571,8 +573,9 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in // reflectMethodValue is a partial duplicate of reflect.makeFuncImpl // and reflect.methodValue. type reflectMethodValue struct { - fn uintptr - stack *bitvector // args bitmap + fn uintptr + stack *bitvector // ptrmap for both args and results + argLen uintptr // just args } // getArgInfoFast returns the argument frame information for a call to f. @@ -601,6 +604,7 @@ func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (ar // These take a *reflect.methodValue as their // context register. var mv *reflectMethodValue + var retValid bool if ctxt != nil { // This is not an actual call, but a // deferred call. The function value @@ -614,6 +618,10 @@ func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (ar // 0(SP). arg0 := frame.sp + sys.MinFrameSize mv = *(**reflectMethodValue)(unsafe.Pointer(arg0)) + // Figure out whether the return values are valid. + // Reflect will update this value after it copies + // in the return values. + retValid = *(*bool)(unsafe.Pointer(arg0 + 3*sys.PtrSize)) } if mv.fn != f.entry { print("runtime: confused by ", funcname(f), "\n") @@ -621,6 +629,9 @@ func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (ar } bv := mv.stack arglen = uintptr(bv.n * sys.PtrSize) + if !retValid { + arglen = uintptr(mv.argLen) &^ (sys.PtrSize - 1) + } argmap = bv } } @@ -936,7 +947,7 @@ func tracebackothers(me *g) { lock(&allglock) for _, gp := range allgs { - if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp) && level < 2 { + if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 { continue } print("\n") @@ -1022,7 +1033,11 @@ func topofstack(f funcInfo, g0 bool) bool { // in stack dumps and deadlock detector. This is any goroutine that // starts at a runtime.* entry point, except for runtime.main and // sometimes runtime.runfinq. -func isSystemGoroutine(gp *g) bool { +// +// If fixed is true, any goroutine that can vary between user and +// system (that is, the finalizer goroutine) is considered a user +// goroutine. +func isSystemGoroutine(gp *g, fixed bool) bool { // Keep this in sync with cmd/trace/trace.go:isSystemGoroutine. f := findfunc(gp.startpc) if !f.valid() { @@ -1034,6 +1049,11 @@ func isSystemGoroutine(gp *g) bool { if f.funcID == funcID_runfinq { // We include the finalizer goroutine if it's calling // back into user code. + if fixed { + // This goroutine can vary. In fixed mode, + // always consider it a user goroutine. + return false + } return !fingRunning } return hasPrefix(funcname(f), "runtime.") diff --git a/src/runtime/vlop_arm.s b/src/runtime/vlop_arm.s index 729653488f..41d285874d 100644 --- a/src/runtime/vlop_arm.s +++ b/src/runtime/vlop_arm.s @@ -30,7 +30,7 @@ // func runtime·udiv(n, d uint32) (q, r uint32) // compiler knowns the register usage of this function -// Reference: +// Reference: // Sloss, Andrew et. al; ARM System Developer's Guide: Designing and Optimizing System Software // Morgan Kaufmann; 1 edition (April 8, 2004), ISBN 978-1558608740 #define Rq R0 // input d, output q diff --git a/src/runtime/wincallback.go b/src/runtime/wincallback.go index 9f003aed05..c022916422 100644 --- a/src/runtime/wincallback.go +++ b/src/runtime/wincallback.go @@ -17,11 +17,12 @@ import ( const maxCallback = 2000 -func genasm() { +func genasm386Amd64() { var buf bytes.Buffer - buf.WriteString(`// generated by wincallback.go; run go generate + buf.WriteString(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. +// +build 386 amd64 // runtime·callbackasm is called by external code to // execute Go implemented callback function. It is not // called from the start, instead runtime·compilecallback @@ -29,13 +30,43 @@ func genasm() { // appropriately so different callbacks start with different // CALL instruction in runtime·callbackasm. This determines // which Go callback function is executed later on. + TEXT runtime·callbackasm(SB),7,$0 `) for i := 0; i < maxCallback; i++ { buf.WriteString("\tCALL\truntime·callbackasm1(SB)\n") } - err := ioutil.WriteFile("zcallback_windows.s", buf.Bytes(), 0666) + filename := fmt.Sprintf("zcallback_windows.s") + err := ioutil.WriteFile(filename, buf.Bytes(), 0666) + if err != nil { + fmt.Fprintf(os.Stderr, "wincallback: %s\n", err) + os.Exit(2) + } +} + +func genasmArm() { + var buf bytes.Buffer + + buf.WriteString(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOV and B instructions. +// The MOV instruction loads R12 with the callback index, and the +// B instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0 +`) + for i := 0; i < maxCallback; i++ { + buf.WriteString(fmt.Sprintf("\tMOVW\t$%d, R12\n", i)) + buf.WriteString("\tB\truntime·callbackasm1(SB)\n") + } + + err := ioutil.WriteFile("zcallback_windows_arm.s", buf.Bytes(), 0666) if err != nil { fmt.Fprintf(os.Stderr, "wincallback: %s\n", err) os.Exit(2) @@ -45,7 +76,7 @@ TEXT runtime·callbackasm(SB),7,$0 func gengo() { var buf bytes.Buffer - buf.WriteString(fmt.Sprintf(`// generated by wincallback.go; run go generate + buf.WriteString(fmt.Sprintf(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. package runtime @@ -59,6 +90,7 @@ const cb_max = %d // maximum number of windows callbacks allowed } func main() { - genasm() + genasm386Amd64() + genasmArm() gengo() } diff --git a/src/runtime/zcallback_windows.go b/src/runtime/zcallback_windows.go index 9908d4ec23..2c3cb28518 100644 --- a/src/runtime/zcallback_windows.go +++ b/src/runtime/zcallback_windows.go @@ -1,4 +1,4 @@ -// generated by wincallback.go; run go generate +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. package runtime diff --git a/src/runtime/zcallback_windows.s b/src/runtime/zcallback_windows.s index b9a3a30811..7772eef329 100644 --- a/src/runtime/zcallback_windows.s +++ b/src/runtime/zcallback_windows.s @@ -1,5 +1,6 @@ -// generated by wincallback.go; run go generate +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. +// +build 386 amd64 // runtime·callbackasm is called by external code to // execute Go implemented callback function. It is not // called from the start, instead runtime·compilecallback @@ -7,6 +8,7 @@ // appropriately so different callbacks start with different // CALL instruction in runtime·callbackasm. This determines // which Go callback function is executed later on. + TEXT runtime·callbackasm(SB),7,$0 CALL runtime·callbackasm1(SB) CALL runtime·callbackasm1(SB) diff --git a/src/runtime/zcallback_windows_arm.s b/src/runtime/zcallback_windows_arm.s new file mode 100644 index 0000000000..f943d84cbf --- /dev/null +++ b/src/runtime/zcallback_windows_arm.s @@ -0,0 +1,4012 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOV and B instructions. +// The MOV instruction loads R12 with the callback index, and the +// B instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0 + MOVW $0, R12 + B runtime·callbackasm1(SB) + MOVW $1, R12 + B runtime·callbackasm1(SB) + MOVW $2, R12 + B runtime·callbackasm1(SB) + MOVW $3, R12 + B runtime·callbackasm1(SB) + MOVW $4, R12 + B runtime·callbackasm1(SB) + MOVW $5, R12 + B runtime·callbackasm1(SB) + MOVW $6, R12 + B runtime·callbackasm1(SB) + MOVW $7, R12 + B runtime·callbackasm1(SB) + MOVW $8, R12 + B runtime·callbackasm1(SB) + MOVW $9, R12 + B runtime·callbackasm1(SB) + MOVW $10, R12 + B runtime·callbackasm1(SB) + MOVW $11, R12 + B runtime·callbackasm1(SB) + MOVW $12, R12 + B runtime·callbackasm1(SB) + MOVW $13, R12 + B runtime·callbackasm1(SB) + MOVW $14, R12 + B runtime·callbackasm1(SB) + MOVW $15, R12 + B runtime·callbackasm1(SB) + MOVW $16, R12 + B runtime·callbackasm1(SB) + MOVW $17, R12 + B runtime·callbackasm1(SB) + MOVW $18, R12 + B runtime·callbackasm1(SB) + MOVW $19, R12 + B runtime·callbackasm1(SB) + MOVW $20, R12 + B runtime·callbackasm1(SB) + MOVW $21, R12 + B runtime·callbackasm1(SB) + MOVW $22, R12 + B runtime·callbackasm1(SB) + MOVW $23, R12 + B runtime·callbackasm1(SB) + MOVW $24, R12 + B runtime·callbackasm1(SB) + MOVW $25, R12 + B runtime·callbackasm1(SB) + MOVW $26, R12 + B runtime·callbackasm1(SB) + MOVW $27, R12 + B runtime·callbackasm1(SB) + MOVW $28, R12 + B runtime·callbackasm1(SB) + MOVW $29, R12 + B runtime·callbackasm1(SB) + MOVW $30, R12 + B runtime·callbackasm1(SB) + MOVW $31, R12 + B runtime·callbackasm1(SB) + MOVW $32, R12 + B runtime·callbackasm1(SB) + MOVW $33, R12 + B runtime·callbackasm1(SB) + MOVW $34, R12 + B runtime·callbackasm1(SB) + MOVW $35, R12 + B runtime·callbackasm1(SB) + MOVW $36, R12 + B runtime·callbackasm1(SB) + MOVW $37, R12 + B runtime·callbackasm1(SB) + MOVW $38, R12 + B runtime·callbackasm1(SB) + MOVW $39, R12 + B runtime·callbackasm1(SB) + MOVW $40, R12 + B runtime·callbackasm1(SB) + MOVW $41, R12 + B runtime·callbackasm1(SB) + MOVW $42, R12 + B runtime·callbackasm1(SB) + MOVW $43, R12 + B runtime·callbackasm1(SB) + MOVW $44, R12 + B runtime·callbackasm1(SB) + MOVW $45, R12 + B runtime·callbackasm1(SB) + MOVW $46, R12 + B runtime·callbackasm1(SB) + MOVW $47, R12 + B runtime·callbackasm1(SB) + MOVW $48, R12 + B runtime·callbackasm1(SB) + MOVW $49, R12 + B runtime·callbackasm1(SB) + MOVW $50, R12 + B runtime·callbackasm1(SB) + MOVW $51, R12 + B runtime·callbackasm1(SB) + MOVW $52, R12 + B runtime·callbackasm1(SB) + MOVW $53, R12 + B runtime·callbackasm1(SB) + MOVW $54, R12 + B runtime·callbackasm1(SB) + MOVW $55, R12 + B runtime·callbackasm1(SB) + MOVW $56, R12 + B runtime·callbackasm1(SB) + MOVW $57, R12 + B runtime·callbackasm1(SB) + MOVW $58, R12 + B runtime·callbackasm1(SB) + MOVW $59, R12 + B runtime·callbackasm1(SB) + MOVW $60, R12 + B runtime·callbackasm1(SB) + MOVW $61, R12 + B runtime·callbackasm1(SB) + MOVW $62, R12 + B runtime·callbackasm1(SB) + MOVW $63, R12 + B runtime·callbackasm1(SB) + MOVW $64, R12 + B runtime·callbackasm1(SB) + MOVW $65, R12 + B runtime·callbackasm1(SB) + MOVW $66, R12 + B runtime·callbackasm1(SB) + MOVW $67, R12 + B runtime·callbackasm1(SB) + MOVW $68, R12 + B runtime·callbackasm1(SB) + MOVW $69, R12 + B runtime·callbackasm1(SB) + MOVW $70, R12 + B runtime·callbackasm1(SB) + MOVW $71, R12 + B runtime·callbackasm1(SB) + MOVW $72, R12 + B runtime·callbackasm1(SB) + MOVW $73, R12 + B runtime·callbackasm1(SB) + MOVW $74, R12 + B runtime·callbackasm1(SB) + MOVW $75, R12 + B runtime·callbackasm1(SB) + MOVW $76, R12 + B runtime·callbackasm1(SB) + MOVW $77, R12 + B runtime·callbackasm1(SB) + MOVW $78, R12 + B runtime·callbackasm1(SB) + MOVW $79, R12 + B runtime·callbackasm1(SB) + MOVW $80, R12 + B runtime·callbackasm1(SB) + MOVW $81, R12 + B runtime·callbackasm1(SB) + MOVW $82, R12 + B runtime·callbackasm1(SB) + MOVW $83, R12 + B runtime·callbackasm1(SB) + MOVW $84, R12 + B runtime·callbackasm1(SB) + MOVW $85, R12 + B runtime·callbackasm1(SB) + MOVW $86, R12 + B runtime·callbackasm1(SB) + MOVW $87, R12 + B runtime·callbackasm1(SB) + MOVW $88, R12 + B runtime·callbackasm1(SB) + MOVW $89, R12 + B runtime·callbackasm1(SB) + MOVW $90, R12 + B runtime·callbackasm1(SB) + MOVW $91, R12 + B runtime·callbackasm1(SB) + MOVW $92, R12 + B runtime·callbackasm1(SB) + MOVW $93, R12 + B runtime·callbackasm1(SB) + MOVW $94, R12 + B runtime·callbackasm1(SB) + MOVW $95, R12 + B runtime·callbackasm1(SB) + MOVW $96, R12 + B runtime·callbackasm1(SB) + MOVW $97, R12 + B runtime·callbackasm1(SB) + MOVW $98, R12 + B runtime·callbackasm1(SB) + MOVW $99, R12 + B runtime·callbackasm1(SB) + MOVW $100, R12 + B runtime·callbackasm1(SB) + MOVW $101, R12 + B runtime·callbackasm1(SB) + MOVW $102, R12 + B runtime·callbackasm1(SB) + MOVW $103, R12 + B runtime·callbackasm1(SB) + MOVW $104, R12 + B runtime·callbackasm1(SB) + MOVW $105, R12 + B runtime·callbackasm1(SB) + MOVW $106, R12 + B runtime·callbackasm1(SB) + MOVW $107, R12 + B runtime·callbackasm1(SB) + MOVW $108, R12 + B runtime·callbackasm1(SB) + MOVW $109, R12 + B runtime·callbackasm1(SB) + MOVW $110, R12 + B runtime·callbackasm1(SB) + MOVW $111, R12 + B runtime·callbackasm1(SB) + MOVW $112, R12 + B runtime·callbackasm1(SB) + MOVW $113, R12 + B runtime·callbackasm1(SB) + MOVW $114, R12 + B runtime·callbackasm1(SB) + MOVW $115, R12 + B runtime·callbackasm1(SB) + MOVW $116, R12 + B runtime·callbackasm1(SB) + MOVW $117, R12 + B runtime·callbackasm1(SB) + MOVW $118, R12 + B runtime·callbackasm1(SB) + MOVW $119, R12 + B runtime·callbackasm1(SB) + MOVW $120, R12 + B runtime·callbackasm1(SB) + MOVW $121, R12 + B runtime·callbackasm1(SB) + MOVW $122, R12 + B runtime·callbackasm1(SB) + MOVW $123, R12 + B runtime·callbackasm1(SB) + MOVW $124, R12 + B runtime·callbackasm1(SB) + MOVW $125, R12 + B runtime·callbackasm1(SB) + MOVW $126, R12 + B runtime·callbackasm1(SB) + MOVW $127, R12 + B runtime·callbackasm1(SB) + MOVW $128, R12 + B runtime·callbackasm1(SB) + MOVW $129, R12 + B runtime·callbackasm1(SB) + MOVW $130, R12 + B runtime·callbackasm1(SB) + MOVW $131, R12 + B runtime·callbackasm1(SB) + MOVW $132, R12 + B runtime·callbackasm1(SB) + MOVW $133, R12 + B runtime·callbackasm1(SB) + MOVW $134, R12 + B runtime·callbackasm1(SB) + MOVW $135, R12 + B runtime·callbackasm1(SB) + MOVW $136, R12 + B runtime·callbackasm1(SB) + MOVW $137, R12 + B runtime·callbackasm1(SB) + MOVW $138, R12 + B runtime·callbackasm1(SB) + MOVW $139, R12 + B runtime·callbackasm1(SB) + MOVW $140, R12 + B runtime·callbackasm1(SB) + MOVW $141, R12 + B runtime·callbackasm1(SB) + MOVW $142, R12 + B runtime·callbackasm1(SB) + MOVW $143, R12 + B runtime·callbackasm1(SB) + MOVW $144, R12 + B runtime·callbackasm1(SB) + MOVW $145, R12 + B runtime·callbackasm1(SB) + MOVW $146, R12 + B runtime·callbackasm1(SB) + MOVW $147, R12 + B runtime·callbackasm1(SB) + MOVW $148, R12 + B runtime·callbackasm1(SB) + MOVW $149, R12 + B runtime·callbackasm1(SB) + MOVW $150, R12 + B runtime·callbackasm1(SB) + MOVW $151, R12 + B runtime·callbackasm1(SB) + MOVW $152, R12 + B runtime·callbackasm1(SB) + MOVW $153, R12 + B runtime·callbackasm1(SB) + MOVW $154, R12 + B runtime·callbackasm1(SB) + MOVW $155, R12 + B runtime·callbackasm1(SB) + MOVW $156, R12 + B runtime·callbackasm1(SB) + MOVW $157, R12 + B runtime·callbackasm1(SB) + MOVW $158, R12 + B runtime·callbackasm1(SB) + MOVW $159, R12 + B runtime·callbackasm1(SB) + MOVW $160, R12 + B runtime·callbackasm1(SB) + MOVW $161, R12 + B runtime·callbackasm1(SB) + MOVW $162, R12 + B runtime·callbackasm1(SB) + MOVW $163, R12 + B runtime·callbackasm1(SB) + MOVW $164, R12 + B runtime·callbackasm1(SB) + MOVW $165, R12 + B runtime·callbackasm1(SB) + MOVW $166, R12 + B runtime·callbackasm1(SB) + MOVW $167, R12 + B runtime·callbackasm1(SB) + MOVW $168, R12 + B runtime·callbackasm1(SB) + MOVW $169, R12 + B runtime·callbackasm1(SB) + MOVW $170, R12 + B runtime·callbackasm1(SB) + MOVW $171, R12 + B runtime·callbackasm1(SB) + MOVW $172, R12 + B runtime·callbackasm1(SB) + MOVW $173, R12 + B runtime·callbackasm1(SB) + MOVW $174, R12 + B runtime·callbackasm1(SB) + MOVW $175, R12 + B runtime·callbackasm1(SB) + MOVW $176, R12 + B runtime·callbackasm1(SB) + MOVW $177, R12 + B runtime·callbackasm1(SB) + MOVW $178, R12 + B runtime·callbackasm1(SB) + MOVW $179, R12 + B runtime·callbackasm1(SB) + MOVW $180, R12 + B runtime·callbackasm1(SB) + MOVW $181, R12 + B runtime·callbackasm1(SB) + MOVW $182, R12 + B runtime·callbackasm1(SB) + MOVW $183, R12 + B runtime·callbackasm1(SB) + MOVW $184, R12 + B runtime·callbackasm1(SB) + MOVW $185, R12 + B runtime·callbackasm1(SB) + MOVW $186, R12 + B runtime·callbackasm1(SB) + MOVW $187, R12 + B runtime·callbackasm1(SB) + MOVW $188, R12 + B runtime·callbackasm1(SB) + MOVW $189, R12 + B runtime·callbackasm1(SB) + MOVW $190, R12 + B runtime·callbackasm1(SB) + MOVW $191, R12 + B runtime·callbackasm1(SB) + MOVW $192, R12 + B runtime·callbackasm1(SB) + MOVW $193, R12 + B runtime·callbackasm1(SB) + MOVW $194, R12 + B runtime·callbackasm1(SB) + MOVW $195, R12 + B runtime·callbackasm1(SB) + MOVW $196, R12 + B runtime·callbackasm1(SB) + MOVW $197, R12 + B runtime·callbackasm1(SB) + MOVW $198, R12 + B runtime·callbackasm1(SB) + MOVW $199, R12 + B runtime·callbackasm1(SB) + MOVW $200, R12 + B runtime·callbackasm1(SB) + MOVW $201, R12 + B runtime·callbackasm1(SB) + MOVW $202, R12 + B runtime·callbackasm1(SB) + MOVW $203, R12 + B runtime·callbackasm1(SB) + MOVW $204, R12 + B runtime·callbackasm1(SB) + MOVW $205, R12 + B runtime·callbackasm1(SB) + MOVW $206, R12 + B runtime·callbackasm1(SB) + MOVW $207, R12 + B runtime·callbackasm1(SB) + MOVW $208, R12 + B runtime·callbackasm1(SB) + MOVW $209, R12 + B runtime·callbackasm1(SB) + MOVW $210, R12 + B runtime·callbackasm1(SB) + MOVW $211, R12 + B runtime·callbackasm1(SB) + MOVW $212, R12 + B runtime·callbackasm1(SB) + MOVW $213, R12 + B runtime·callbackasm1(SB) + MOVW $214, R12 + B runtime·callbackasm1(SB) + MOVW $215, R12 + B runtime·callbackasm1(SB) + MOVW $216, R12 + B runtime·callbackasm1(SB) + MOVW $217, R12 + B runtime·callbackasm1(SB) + MOVW $218, R12 + B runtime·callbackasm1(SB) + MOVW $219, R12 + B runtime·callbackasm1(SB) + MOVW $220, R12 + B runtime·callbackasm1(SB) + MOVW $221, R12 + B runtime·callbackasm1(SB) + MOVW $222, R12 + B runtime·callbackasm1(SB) + MOVW $223, R12 + B runtime·callbackasm1(SB) + MOVW $224, R12 + B runtime·callbackasm1(SB) + MOVW $225, R12 + B runtime·callbackasm1(SB) + MOVW $226, R12 + B runtime·callbackasm1(SB) + MOVW $227, R12 + B runtime·callbackasm1(SB) + MOVW $228, R12 + B runtime·callbackasm1(SB) + MOVW $229, R12 + B runtime·callbackasm1(SB) + MOVW $230, R12 + B runtime·callbackasm1(SB) + MOVW $231, R12 + B runtime·callbackasm1(SB) + MOVW $232, R12 + B runtime·callbackasm1(SB) + MOVW $233, R12 + B runtime·callbackasm1(SB) + MOVW $234, R12 + B runtime·callbackasm1(SB) + MOVW $235, R12 + B runtime·callbackasm1(SB) + MOVW $236, R12 + B runtime·callbackasm1(SB) + MOVW $237, R12 + B runtime·callbackasm1(SB) + MOVW $238, R12 + B runtime·callbackasm1(SB) + MOVW $239, R12 + B runtime·callbackasm1(SB) + MOVW $240, R12 + B runtime·callbackasm1(SB) + MOVW $241, R12 + B runtime·callbackasm1(SB) + MOVW $242, R12 + B runtime·callbackasm1(SB) + MOVW $243, R12 + B runtime·callbackasm1(SB) + MOVW $244, R12 + B runtime·callbackasm1(SB) + MOVW $245, R12 + B runtime·callbackasm1(SB) + MOVW $246, R12 + B runtime·callbackasm1(SB) + MOVW $247, R12 + B runtime·callbackasm1(SB) + MOVW $248, R12 + B runtime·callbackasm1(SB) + MOVW $249, R12 + B runtime·callbackasm1(SB) + MOVW $250, R12 + B runtime·callbackasm1(SB) + MOVW $251, R12 + B runtime·callbackasm1(SB) + MOVW $252, R12 + B runtime·callbackasm1(SB) + MOVW $253, R12 + B runtime·callbackasm1(SB) + MOVW $254, R12 + B runtime·callbackasm1(SB) + MOVW $255, R12 + B runtime·callbackasm1(SB) + MOVW $256, R12 + B runtime·callbackasm1(SB) + MOVW $257, R12 + B runtime·callbackasm1(SB) + MOVW $258, R12 + B runtime·callbackasm1(SB) + MOVW $259, R12 + B runtime·callbackasm1(SB) + MOVW $260, R12 + B runtime·callbackasm1(SB) + MOVW $261, R12 + B runtime·callbackasm1(SB) + MOVW $262, R12 + B runtime·callbackasm1(SB) + MOVW $263, R12 + B runtime·callbackasm1(SB) + MOVW $264, R12 + B runtime·callbackasm1(SB) + MOVW $265, R12 + B runtime·callbackasm1(SB) + MOVW $266, R12 + B runtime·callbackasm1(SB) + MOVW $267, R12 + B runtime·callbackasm1(SB) + MOVW $268, R12 + B runtime·callbackasm1(SB) + MOVW $269, R12 + B runtime·callbackasm1(SB) + MOVW $270, R12 + B runtime·callbackasm1(SB) + MOVW $271, R12 + B runtime·callbackasm1(SB) + MOVW $272, R12 + B runtime·callbackasm1(SB) + MOVW $273, R12 + B runtime·callbackasm1(SB) + MOVW $274, R12 + B runtime·callbackasm1(SB) + MOVW $275, R12 + B runtime·callbackasm1(SB) + MOVW $276, R12 + B runtime·callbackasm1(SB) + MOVW $277, R12 + B runtime·callbackasm1(SB) + MOVW $278, R12 + B runtime·callbackasm1(SB) + MOVW $279, R12 + B runtime·callbackasm1(SB) + MOVW $280, R12 + B runtime·callbackasm1(SB) + MOVW $281, R12 + B runtime·callbackasm1(SB) + MOVW $282, R12 + B runtime·callbackasm1(SB) + MOVW $283, R12 + B runtime·callbackasm1(SB) + MOVW $284, R12 + B runtime·callbackasm1(SB) + MOVW $285, R12 + B runtime·callbackasm1(SB) + MOVW $286, R12 + B runtime·callbackasm1(SB) + MOVW $287, R12 + B runtime·callbackasm1(SB) + MOVW $288, R12 + B runtime·callbackasm1(SB) + MOVW $289, R12 + B runtime·callbackasm1(SB) + MOVW $290, R12 + B runtime·callbackasm1(SB) + MOVW $291, R12 + B runtime·callbackasm1(SB) + MOVW $292, R12 + B runtime·callbackasm1(SB) + MOVW $293, R12 + B runtime·callbackasm1(SB) + MOVW $294, R12 + B runtime·callbackasm1(SB) + MOVW $295, R12 + B runtime·callbackasm1(SB) + MOVW $296, R12 + B runtime·callbackasm1(SB) + MOVW $297, R12 + B runtime·callbackasm1(SB) + MOVW $298, R12 + B runtime·callbackasm1(SB) + MOVW $299, R12 + B runtime·callbackasm1(SB) + MOVW $300, R12 + B runtime·callbackasm1(SB) + MOVW $301, R12 + B runtime·callbackasm1(SB) + MOVW $302, R12 + B runtime·callbackasm1(SB) + MOVW $303, R12 + B runtime·callbackasm1(SB) + MOVW $304, R12 + B runtime·callbackasm1(SB) + MOVW $305, R12 + B runtime·callbackasm1(SB) + MOVW $306, R12 + B runtime·callbackasm1(SB) + MOVW $307, R12 + B runtime·callbackasm1(SB) + MOVW $308, R12 + B runtime·callbackasm1(SB) + MOVW $309, R12 + B runtime·callbackasm1(SB) + MOVW $310, R12 + B runtime·callbackasm1(SB) + MOVW $311, R12 + B runtime·callbackasm1(SB) + MOVW $312, R12 + B runtime·callbackasm1(SB) + MOVW $313, R12 + B runtime·callbackasm1(SB) + MOVW $314, R12 + B runtime·callbackasm1(SB) + MOVW $315, R12 + B runtime·callbackasm1(SB) + MOVW $316, R12 + B runtime·callbackasm1(SB) + MOVW $317, R12 + B runtime·callbackasm1(SB) + MOVW $318, R12 + B runtime·callbackasm1(SB) + MOVW $319, R12 + B runtime·callbackasm1(SB) + MOVW $320, R12 + B runtime·callbackasm1(SB) + MOVW $321, R12 + B runtime·callbackasm1(SB) + MOVW $322, R12 + B runtime·callbackasm1(SB) + MOVW $323, R12 + B runtime·callbackasm1(SB) + MOVW $324, R12 + B runtime·callbackasm1(SB) + MOVW $325, R12 + B runtime·callbackasm1(SB) + MOVW $326, R12 + B runtime·callbackasm1(SB) + MOVW $327, R12 + B runtime·callbackasm1(SB) + MOVW $328, R12 + B runtime·callbackasm1(SB) + MOVW $329, R12 + B runtime·callbackasm1(SB) + MOVW $330, R12 + B runtime·callbackasm1(SB) + MOVW $331, R12 + B runtime·callbackasm1(SB) + MOVW $332, R12 + B runtime·callbackasm1(SB) + MOVW $333, R12 + B runtime·callbackasm1(SB) + MOVW $334, R12 + B runtime·callbackasm1(SB) + MOVW $335, R12 + B runtime·callbackasm1(SB) + MOVW $336, R12 + B runtime·callbackasm1(SB) + MOVW $337, R12 + B runtime·callbackasm1(SB) + MOVW $338, R12 + B runtime·callbackasm1(SB) + MOVW $339, R12 + B runtime·callbackasm1(SB) + MOVW $340, R12 + B runtime·callbackasm1(SB) + MOVW $341, R12 + B runtime·callbackasm1(SB) + MOVW $342, R12 + B runtime·callbackasm1(SB) + MOVW $343, R12 + B runtime·callbackasm1(SB) + MOVW $344, R12 + B runtime·callbackasm1(SB) + MOVW $345, R12 + B runtime·callbackasm1(SB) + MOVW $346, R12 + B runtime·callbackasm1(SB) + MOVW $347, R12 + B runtime·callbackasm1(SB) + MOVW $348, R12 + B runtime·callbackasm1(SB) + MOVW $349, R12 + B runtime·callbackasm1(SB) + MOVW $350, R12 + B runtime·callbackasm1(SB) + MOVW $351, R12 + B runtime·callbackasm1(SB) + MOVW $352, R12 + B runtime·callbackasm1(SB) + MOVW $353, R12 + B runtime·callbackasm1(SB) + MOVW $354, R12 + B runtime·callbackasm1(SB) + MOVW $355, R12 + B runtime·callbackasm1(SB) + MOVW $356, R12 + B runtime·callbackasm1(SB) + MOVW $357, R12 + B runtime·callbackasm1(SB) + MOVW $358, R12 + B runtime·callbackasm1(SB) + MOVW $359, R12 + B runtime·callbackasm1(SB) + MOVW $360, R12 + B runtime·callbackasm1(SB) + MOVW $361, R12 + B runtime·callbackasm1(SB) + MOVW $362, R12 + B runtime·callbackasm1(SB) + MOVW $363, R12 + B runtime·callbackasm1(SB) + MOVW $364, R12 + B runtime·callbackasm1(SB) + MOVW $365, R12 + B runtime·callbackasm1(SB) + MOVW $366, R12 + B runtime·callbackasm1(SB) + MOVW $367, R12 + B runtime·callbackasm1(SB) + MOVW $368, R12 + B runtime·callbackasm1(SB) + MOVW $369, R12 + B runtime·callbackasm1(SB) + MOVW $370, R12 + B runtime·callbackasm1(SB) + MOVW $371, R12 + B runtime·callbackasm1(SB) + MOVW $372, R12 + B runtime·callbackasm1(SB) + MOVW $373, R12 + B runtime·callbackasm1(SB) + MOVW $374, R12 + B runtime·callbackasm1(SB) + MOVW $375, R12 + B runtime·callbackasm1(SB) + MOVW $376, R12 + B runtime·callbackasm1(SB) + MOVW $377, R12 + B runtime·callbackasm1(SB) + MOVW $378, R12 + B runtime·callbackasm1(SB) + MOVW $379, R12 + B runtime·callbackasm1(SB) + MOVW $380, R12 + B runtime·callbackasm1(SB) + MOVW $381, R12 + B runtime·callbackasm1(SB) + MOVW $382, R12 + B runtime·callbackasm1(SB) + MOVW $383, R12 + B runtime·callbackasm1(SB) + MOVW $384, R12 + B runtime·callbackasm1(SB) + MOVW $385, R12 + B runtime·callbackasm1(SB) + MOVW $386, R12 + B runtime·callbackasm1(SB) + MOVW $387, R12 + B runtime·callbackasm1(SB) + MOVW $388, R12 + B runtime·callbackasm1(SB) + MOVW $389, R12 + B runtime·callbackasm1(SB) + MOVW $390, R12 + B runtime·callbackasm1(SB) + MOVW $391, R12 + B runtime·callbackasm1(SB) + MOVW $392, R12 + B runtime·callbackasm1(SB) + MOVW $393, R12 + B runtime·callbackasm1(SB) + MOVW $394, R12 + B runtime·callbackasm1(SB) + MOVW $395, R12 + B runtime·callbackasm1(SB) + MOVW $396, R12 + B runtime·callbackasm1(SB) + MOVW $397, R12 + B runtime·callbackasm1(SB) + MOVW $398, R12 + B runtime·callbackasm1(SB) + MOVW $399, R12 + B runtime·callbackasm1(SB) + MOVW $400, R12 + B runtime·callbackasm1(SB) + MOVW $401, R12 + B runtime·callbackasm1(SB) + MOVW $402, R12 + B runtime·callbackasm1(SB) + MOVW $403, R12 + B runtime·callbackasm1(SB) + MOVW $404, R12 + B runtime·callbackasm1(SB) + MOVW $405, R12 + B runtime·callbackasm1(SB) + MOVW $406, R12 + B runtime·callbackasm1(SB) + MOVW $407, R12 + B runtime·callbackasm1(SB) + MOVW $408, R12 + B runtime·callbackasm1(SB) + MOVW $409, R12 + B runtime·callbackasm1(SB) + MOVW $410, R12 + B runtime·callbackasm1(SB) + MOVW $411, R12 + B runtime·callbackasm1(SB) + MOVW $412, R12 + B runtime·callbackasm1(SB) + MOVW $413, R12 + B runtime·callbackasm1(SB) + MOVW $414, R12 + B runtime·callbackasm1(SB) + MOVW $415, R12 + B runtime·callbackasm1(SB) + MOVW $416, R12 + B runtime·callbackasm1(SB) + MOVW $417, R12 + B runtime·callbackasm1(SB) + MOVW $418, R12 + B runtime·callbackasm1(SB) + MOVW $419, R12 + B runtime·callbackasm1(SB) + MOVW $420, R12 + B runtime·callbackasm1(SB) + MOVW $421, R12 + B runtime·callbackasm1(SB) + MOVW $422, R12 + B runtime·callbackasm1(SB) + MOVW $423, R12 + B runtime·callbackasm1(SB) + MOVW $424, R12 + B runtime·callbackasm1(SB) + MOVW $425, R12 + B runtime·callbackasm1(SB) + MOVW $426, R12 + B runtime·callbackasm1(SB) + MOVW $427, R12 + B runtime·callbackasm1(SB) + MOVW $428, R12 + B runtime·callbackasm1(SB) + MOVW $429, R12 + B runtime·callbackasm1(SB) + MOVW $430, R12 + B runtime·callbackasm1(SB) + MOVW $431, R12 + B runtime·callbackasm1(SB) + MOVW $432, R12 + B runtime·callbackasm1(SB) + MOVW $433, R12 + B runtime·callbackasm1(SB) + MOVW $434, R12 + B runtime·callbackasm1(SB) + MOVW $435, R12 + B runtime·callbackasm1(SB) + MOVW $436, R12 + B runtime·callbackasm1(SB) + MOVW $437, R12 + B runtime·callbackasm1(SB) + MOVW $438, R12 + B runtime·callbackasm1(SB) + MOVW $439, R12 + B runtime·callbackasm1(SB) + MOVW $440, R12 + B runtime·callbackasm1(SB) + MOVW $441, R12 + B runtime·callbackasm1(SB) + MOVW $442, R12 + B runtime·callbackasm1(SB) + MOVW $443, R12 + B runtime·callbackasm1(SB) + MOVW $444, R12 + B runtime·callbackasm1(SB) + MOVW $445, R12 + B runtime·callbackasm1(SB) + MOVW $446, R12 + B runtime·callbackasm1(SB) + MOVW $447, R12 + B runtime·callbackasm1(SB) + MOVW $448, R12 + B runtime·callbackasm1(SB) + MOVW $449, R12 + B runtime·callbackasm1(SB) + MOVW $450, R12 + B runtime·callbackasm1(SB) + MOVW $451, R12 + B runtime·callbackasm1(SB) + MOVW $452, R12 + B runtime·callbackasm1(SB) + MOVW $453, R12 + B runtime·callbackasm1(SB) + MOVW $454, R12 + B runtime·callbackasm1(SB) + MOVW $455, R12 + B runtime·callbackasm1(SB) + MOVW $456, R12 + B runtime·callbackasm1(SB) + MOVW $457, R12 + B runtime·callbackasm1(SB) + MOVW $458, R12 + B runtime·callbackasm1(SB) + MOVW $459, R12 + B runtime·callbackasm1(SB) + MOVW $460, R12 + B runtime·callbackasm1(SB) + MOVW $461, R12 + B runtime·callbackasm1(SB) + MOVW $462, R12 + B runtime·callbackasm1(SB) + MOVW $463, R12 + B runtime·callbackasm1(SB) + MOVW $464, R12 + B runtime·callbackasm1(SB) + MOVW $465, R12 + B runtime·callbackasm1(SB) + MOVW $466, R12 + B runtime·callbackasm1(SB) + MOVW $467, R12 + B runtime·callbackasm1(SB) + MOVW $468, R12 + B runtime·callbackasm1(SB) + MOVW $469, R12 + B runtime·callbackasm1(SB) + MOVW $470, R12 + B runtime·callbackasm1(SB) + MOVW $471, R12 + B runtime·callbackasm1(SB) + MOVW $472, R12 + B runtime·callbackasm1(SB) + MOVW $473, R12 + B runtime·callbackasm1(SB) + MOVW $474, R12 + B runtime·callbackasm1(SB) + MOVW $475, R12 + B runtime·callbackasm1(SB) + MOVW $476, R12 + B runtime·callbackasm1(SB) + MOVW $477, R12 + B runtime·callbackasm1(SB) + MOVW $478, R12 + B runtime·callbackasm1(SB) + MOVW $479, R12 + B runtime·callbackasm1(SB) + MOVW $480, R12 + B runtime·callbackasm1(SB) + MOVW $481, R12 + B runtime·callbackasm1(SB) + MOVW $482, R12 + B runtime·callbackasm1(SB) + MOVW $483, R12 + B runtime·callbackasm1(SB) + MOVW $484, R12 + B runtime·callbackasm1(SB) + MOVW $485, R12 + B runtime·callbackasm1(SB) + MOVW $486, R12 + B runtime·callbackasm1(SB) + MOVW $487, R12 + B runtime·callbackasm1(SB) + MOVW $488, R12 + B runtime·callbackasm1(SB) + MOVW $489, R12 + B runtime·callbackasm1(SB) + MOVW $490, R12 + B runtime·callbackasm1(SB) + MOVW $491, R12 + B runtime·callbackasm1(SB) + MOVW $492, R12 + B runtime·callbackasm1(SB) + MOVW $493, R12 + B runtime·callbackasm1(SB) + MOVW $494, R12 + B runtime·callbackasm1(SB) + MOVW $495, R12 + B runtime·callbackasm1(SB) + MOVW $496, R12 + B runtime·callbackasm1(SB) + MOVW $497, R12 + B runtime·callbackasm1(SB) + MOVW $498, R12 + B runtime·callbackasm1(SB) + MOVW $499, R12 + B runtime·callbackasm1(SB) + MOVW $500, R12 + B runtime·callbackasm1(SB) + MOVW $501, R12 + B runtime·callbackasm1(SB) + MOVW $502, R12 + B runtime·callbackasm1(SB) + MOVW $503, R12 + B runtime·callbackasm1(SB) + MOVW $504, R12 + B runtime·callbackasm1(SB) + MOVW $505, R12 + B runtime·callbackasm1(SB) + MOVW $506, R12 + B runtime·callbackasm1(SB) + MOVW $507, R12 + B runtime·callbackasm1(SB) + MOVW $508, R12 + B runtime·callbackasm1(SB) + MOVW $509, R12 + B runtime·callbackasm1(SB) + MOVW $510, R12 + B runtime·callbackasm1(SB) + MOVW $511, R12 + B runtime·callbackasm1(SB) + MOVW $512, R12 + B runtime·callbackasm1(SB) + MOVW $513, R12 + B runtime·callbackasm1(SB) + MOVW $514, R12 + B runtime·callbackasm1(SB) + MOVW $515, R12 + B runtime·callbackasm1(SB) + MOVW $516, R12 + B runtime·callbackasm1(SB) + MOVW $517, R12 + B runtime·callbackasm1(SB) + MOVW $518, R12 + B runtime·callbackasm1(SB) + MOVW $519, R12 + B runtime·callbackasm1(SB) + MOVW $520, R12 + B runtime·callbackasm1(SB) + MOVW $521, R12 + B runtime·callbackasm1(SB) + MOVW $522, R12 + B runtime·callbackasm1(SB) + MOVW $523, R12 + B runtime·callbackasm1(SB) + MOVW $524, R12 + B runtime·callbackasm1(SB) + MOVW $525, R12 + B runtime·callbackasm1(SB) + MOVW $526, R12 + B runtime·callbackasm1(SB) + MOVW $527, R12 + B runtime·callbackasm1(SB) + MOVW $528, R12 + B runtime·callbackasm1(SB) + MOVW $529, R12 + B runtime·callbackasm1(SB) + MOVW $530, R12 + B runtime·callbackasm1(SB) + MOVW $531, R12 + B runtime·callbackasm1(SB) + MOVW $532, R12 + B runtime·callbackasm1(SB) + MOVW $533, R12 + B runtime·callbackasm1(SB) + MOVW $534, R12 + B runtime·callbackasm1(SB) + MOVW $535, R12 + B runtime·callbackasm1(SB) + MOVW $536, R12 + B runtime·callbackasm1(SB) + MOVW $537, R12 + B runtime·callbackasm1(SB) + MOVW $538, R12 + B runtime·callbackasm1(SB) + MOVW $539, R12 + B runtime·callbackasm1(SB) + MOVW $540, R12 + B runtime·callbackasm1(SB) + MOVW $541, R12 + B runtime·callbackasm1(SB) + MOVW $542, R12 + B runtime·callbackasm1(SB) + MOVW $543, R12 + B runtime·callbackasm1(SB) + MOVW $544, R12 + B runtime·callbackasm1(SB) + MOVW $545, R12 + B runtime·callbackasm1(SB) + MOVW $546, R12 + B runtime·callbackasm1(SB) + MOVW $547, R12 + B runtime·callbackasm1(SB) + MOVW $548, R12 + B runtime·callbackasm1(SB) + MOVW $549, R12 + B runtime·callbackasm1(SB) + MOVW $550, R12 + B runtime·callbackasm1(SB) + MOVW $551, R12 + B runtime·callbackasm1(SB) + MOVW $552, R12 + B runtime·callbackasm1(SB) + MOVW $553, R12 + B runtime·callbackasm1(SB) + MOVW $554, R12 + B runtime·callbackasm1(SB) + MOVW $555, R12 + B runtime·callbackasm1(SB) + MOVW $556, R12 + B runtime·callbackasm1(SB) + MOVW $557, R12 + B runtime·callbackasm1(SB) + MOVW $558, R12 + B runtime·callbackasm1(SB) + MOVW $559, R12 + B runtime·callbackasm1(SB) + MOVW $560, R12 + B runtime·callbackasm1(SB) + MOVW $561, R12 + B runtime·callbackasm1(SB) + MOVW $562, R12 + B runtime·callbackasm1(SB) + MOVW $563, R12 + B runtime·callbackasm1(SB) + MOVW $564, R12 + B runtime·callbackasm1(SB) + MOVW $565, R12 + B runtime·callbackasm1(SB) + MOVW $566, R12 + B runtime·callbackasm1(SB) + MOVW $567, R12 + B runtime·callbackasm1(SB) + MOVW $568, R12 + B runtime·callbackasm1(SB) + MOVW $569, R12 + B runtime·callbackasm1(SB) + MOVW $570, R12 + B runtime·callbackasm1(SB) + MOVW $571, R12 + B runtime·callbackasm1(SB) + MOVW $572, R12 + B runtime·callbackasm1(SB) + MOVW $573, R12 + B runtime·callbackasm1(SB) + MOVW $574, R12 + B runtime·callbackasm1(SB) + MOVW $575, R12 + B runtime·callbackasm1(SB) + MOVW $576, R12 + B runtime·callbackasm1(SB) + MOVW $577, R12 + B runtime·callbackasm1(SB) + MOVW $578, R12 + B runtime·callbackasm1(SB) + MOVW $579, R12 + B runtime·callbackasm1(SB) + MOVW $580, R12 + B runtime·callbackasm1(SB) + MOVW $581, R12 + B runtime·callbackasm1(SB) + MOVW $582, R12 + B runtime·callbackasm1(SB) + MOVW $583, R12 + B runtime·callbackasm1(SB) + MOVW $584, R12 + B runtime·callbackasm1(SB) + MOVW $585, R12 + B runtime·callbackasm1(SB) + MOVW $586, R12 + B runtime·callbackasm1(SB) + MOVW $587, R12 + B runtime·callbackasm1(SB) + MOVW $588, R12 + B runtime·callbackasm1(SB) + MOVW $589, R12 + B runtime·callbackasm1(SB) + MOVW $590, R12 + B runtime·callbackasm1(SB) + MOVW $591, R12 + B runtime·callbackasm1(SB) + MOVW $592, R12 + B runtime·callbackasm1(SB) + MOVW $593, R12 + B runtime·callbackasm1(SB) + MOVW $594, R12 + B runtime·callbackasm1(SB) + MOVW $595, R12 + B runtime·callbackasm1(SB) + MOVW $596, R12 + B runtime·callbackasm1(SB) + MOVW $597, R12 + B runtime·callbackasm1(SB) + MOVW $598, R12 + B runtime·callbackasm1(SB) + MOVW $599, R12 + B runtime·callbackasm1(SB) + MOVW $600, R12 + B runtime·callbackasm1(SB) + MOVW $601, R12 + B runtime·callbackasm1(SB) + MOVW $602, R12 + B runtime·callbackasm1(SB) + MOVW $603, R12 + B runtime·callbackasm1(SB) + MOVW $604, R12 + B runtime·callbackasm1(SB) + MOVW $605, R12 + B runtime·callbackasm1(SB) + MOVW $606, R12 + B runtime·callbackasm1(SB) + MOVW $607, R12 + B runtime·callbackasm1(SB) + MOVW $608, R12 + B runtime·callbackasm1(SB) + MOVW $609, R12 + B runtime·callbackasm1(SB) + MOVW $610, R12 + B runtime·callbackasm1(SB) + MOVW $611, R12 + B runtime·callbackasm1(SB) + MOVW $612, R12 + B runtime·callbackasm1(SB) + MOVW $613, R12 + B runtime·callbackasm1(SB) + MOVW $614, R12 + B runtime·callbackasm1(SB) + MOVW $615, R12 + B runtime·callbackasm1(SB) + MOVW $616, R12 + B runtime·callbackasm1(SB) + MOVW $617, R12 + B runtime·callbackasm1(SB) + MOVW $618, R12 + B runtime·callbackasm1(SB) + MOVW $619, R12 + B runtime·callbackasm1(SB) + MOVW $620, R12 + B runtime·callbackasm1(SB) + MOVW $621, R12 + B runtime·callbackasm1(SB) + MOVW $622, R12 + B runtime·callbackasm1(SB) + MOVW $623, R12 + B runtime·callbackasm1(SB) + MOVW $624, R12 + B runtime·callbackasm1(SB) + MOVW $625, R12 + B runtime·callbackasm1(SB) + MOVW $626, R12 + B runtime·callbackasm1(SB) + MOVW $627, R12 + B runtime·callbackasm1(SB) + MOVW $628, R12 + B runtime·callbackasm1(SB) + MOVW $629, R12 + B runtime·callbackasm1(SB) + MOVW $630, R12 + B runtime·callbackasm1(SB) + MOVW $631, R12 + B runtime·callbackasm1(SB) + MOVW $632, R12 + B runtime·callbackasm1(SB) + MOVW $633, R12 + B runtime·callbackasm1(SB) + MOVW $634, R12 + B runtime·callbackasm1(SB) + MOVW $635, R12 + B runtime·callbackasm1(SB) + MOVW $636, R12 + B runtime·callbackasm1(SB) + MOVW $637, R12 + B runtime·callbackasm1(SB) + MOVW $638, R12 + B runtime·callbackasm1(SB) + MOVW $639, R12 + B runtime·callbackasm1(SB) + MOVW $640, R12 + B runtime·callbackasm1(SB) + MOVW $641, R12 + B runtime·callbackasm1(SB) + MOVW $642, R12 + B runtime·callbackasm1(SB) + MOVW $643, R12 + B runtime·callbackasm1(SB) + MOVW $644, R12 + B runtime·callbackasm1(SB) + MOVW $645, R12 + B runtime·callbackasm1(SB) + MOVW $646, R12 + B runtime·callbackasm1(SB) + MOVW $647, R12 + B runtime·callbackasm1(SB) + MOVW $648, R12 + B runtime·callbackasm1(SB) + MOVW $649, R12 + B runtime·callbackasm1(SB) + MOVW $650, R12 + B runtime·callbackasm1(SB) + MOVW $651, R12 + B runtime·callbackasm1(SB) + MOVW $652, R12 + B runtime·callbackasm1(SB) + MOVW $653, R12 + B runtime·callbackasm1(SB) + MOVW $654, R12 + B runtime·callbackasm1(SB) + MOVW $655, R12 + B runtime·callbackasm1(SB) + MOVW $656, R12 + B runtime·callbackasm1(SB) + MOVW $657, R12 + B runtime·callbackasm1(SB) + MOVW $658, R12 + B runtime·callbackasm1(SB) + MOVW $659, R12 + B runtime·callbackasm1(SB) + MOVW $660, R12 + B runtime·callbackasm1(SB) + MOVW $661, R12 + B runtime·callbackasm1(SB) + MOVW $662, R12 + B runtime·callbackasm1(SB) + MOVW $663, R12 + B runtime·callbackasm1(SB) + MOVW $664, R12 + B runtime·callbackasm1(SB) + MOVW $665, R12 + B runtime·callbackasm1(SB) + MOVW $666, R12 + B runtime·callbackasm1(SB) + MOVW $667, R12 + B runtime·callbackasm1(SB) + MOVW $668, R12 + B runtime·callbackasm1(SB) + MOVW $669, R12 + B runtime·callbackasm1(SB) + MOVW $670, R12 + B runtime·callbackasm1(SB) + MOVW $671, R12 + B runtime·callbackasm1(SB) + MOVW $672, R12 + B runtime·callbackasm1(SB) + MOVW $673, R12 + B runtime·callbackasm1(SB) + MOVW $674, R12 + B runtime·callbackasm1(SB) + MOVW $675, R12 + B runtime·callbackasm1(SB) + MOVW $676, R12 + B runtime·callbackasm1(SB) + MOVW $677, R12 + B runtime·callbackasm1(SB) + MOVW $678, R12 + B runtime·callbackasm1(SB) + MOVW $679, R12 + B runtime·callbackasm1(SB) + MOVW $680, R12 + B runtime·callbackasm1(SB) + MOVW $681, R12 + B runtime·callbackasm1(SB) + MOVW $682, R12 + B runtime·callbackasm1(SB) + MOVW $683, R12 + B runtime·callbackasm1(SB) + MOVW $684, R12 + B runtime·callbackasm1(SB) + MOVW $685, R12 + B runtime·callbackasm1(SB) + MOVW $686, R12 + B runtime·callbackasm1(SB) + MOVW $687, R12 + B runtime·callbackasm1(SB) + MOVW $688, R12 + B runtime·callbackasm1(SB) + MOVW $689, R12 + B runtime·callbackasm1(SB) + MOVW $690, R12 + B runtime·callbackasm1(SB) + MOVW $691, R12 + B runtime·callbackasm1(SB) + MOVW $692, R12 + B runtime·callbackasm1(SB) + MOVW $693, R12 + B runtime·callbackasm1(SB) + MOVW $694, R12 + B runtime·callbackasm1(SB) + MOVW $695, R12 + B runtime·callbackasm1(SB) + MOVW $696, R12 + B runtime·callbackasm1(SB) + MOVW $697, R12 + B runtime·callbackasm1(SB) + MOVW $698, R12 + B runtime·callbackasm1(SB) + MOVW $699, R12 + B runtime·callbackasm1(SB) + MOVW $700, R12 + B runtime·callbackasm1(SB) + MOVW $701, R12 + B runtime·callbackasm1(SB) + MOVW $702, R12 + B runtime·callbackasm1(SB) + MOVW $703, R12 + B runtime·callbackasm1(SB) + MOVW $704, R12 + B runtime·callbackasm1(SB) + MOVW $705, R12 + B runtime·callbackasm1(SB) + MOVW $706, R12 + B runtime·callbackasm1(SB) + MOVW $707, R12 + B runtime·callbackasm1(SB) + MOVW $708, R12 + B runtime·callbackasm1(SB) + MOVW $709, R12 + B runtime·callbackasm1(SB) + MOVW $710, R12 + B runtime·callbackasm1(SB) + MOVW $711, R12 + B runtime·callbackasm1(SB) + MOVW $712, R12 + B runtime·callbackasm1(SB) + MOVW $713, R12 + B runtime·callbackasm1(SB) + MOVW $714, R12 + B runtime·callbackasm1(SB) + MOVW $715, R12 + B runtime·callbackasm1(SB) + MOVW $716, R12 + B runtime·callbackasm1(SB) + MOVW $717, R12 + B runtime·callbackasm1(SB) + MOVW $718, R12 + B runtime·callbackasm1(SB) + MOVW $719, R12 + B runtime·callbackasm1(SB) + MOVW $720, R12 + B runtime·callbackasm1(SB) + MOVW $721, R12 + B runtime·callbackasm1(SB) + MOVW $722, R12 + B runtime·callbackasm1(SB) + MOVW $723, R12 + B runtime·callbackasm1(SB) + MOVW $724, R12 + B runtime·callbackasm1(SB) + MOVW $725, R12 + B runtime·callbackasm1(SB) + MOVW $726, R12 + B runtime·callbackasm1(SB) + MOVW $727, R12 + B runtime·callbackasm1(SB) + MOVW $728, R12 + B runtime·callbackasm1(SB) + MOVW $729, R12 + B runtime·callbackasm1(SB) + MOVW $730, R12 + B runtime·callbackasm1(SB) + MOVW $731, R12 + B runtime·callbackasm1(SB) + MOVW $732, R12 + B runtime·callbackasm1(SB) + MOVW $733, R12 + B runtime·callbackasm1(SB) + MOVW $734, R12 + B runtime·callbackasm1(SB) + MOVW $735, R12 + B runtime·callbackasm1(SB) + MOVW $736, R12 + B runtime·callbackasm1(SB) + MOVW $737, R12 + B runtime·callbackasm1(SB) + MOVW $738, R12 + B runtime·callbackasm1(SB) + MOVW $739, R12 + B runtime·callbackasm1(SB) + MOVW $740, R12 + B runtime·callbackasm1(SB) + MOVW $741, R12 + B runtime·callbackasm1(SB) + MOVW $742, R12 + B runtime·callbackasm1(SB) + MOVW $743, R12 + B runtime·callbackasm1(SB) + MOVW $744, R12 + B runtime·callbackasm1(SB) + MOVW $745, R12 + B runtime·callbackasm1(SB) + MOVW $746, R12 + B runtime·callbackasm1(SB) + MOVW $747, R12 + B runtime·callbackasm1(SB) + MOVW $748, R12 + B runtime·callbackasm1(SB) + MOVW $749, R12 + B runtime·callbackasm1(SB) + MOVW $750, R12 + B runtime·callbackasm1(SB) + MOVW $751, R12 + B runtime·callbackasm1(SB) + MOVW $752, R12 + B runtime·callbackasm1(SB) + MOVW $753, R12 + B runtime·callbackasm1(SB) + MOVW $754, R12 + B runtime·callbackasm1(SB) + MOVW $755, R12 + B runtime·callbackasm1(SB) + MOVW $756, R12 + B runtime·callbackasm1(SB) + MOVW $757, R12 + B runtime·callbackasm1(SB) + MOVW $758, R12 + B runtime·callbackasm1(SB) + MOVW $759, R12 + B runtime·callbackasm1(SB) + MOVW $760, R12 + B runtime·callbackasm1(SB) + MOVW $761, R12 + B runtime·callbackasm1(SB) + MOVW $762, R12 + B runtime·callbackasm1(SB) + MOVW $763, R12 + B runtime·callbackasm1(SB) + MOVW $764, R12 + B runtime·callbackasm1(SB) + MOVW $765, R12 + B runtime·callbackasm1(SB) + MOVW $766, R12 + B runtime·callbackasm1(SB) + MOVW $767, R12 + B runtime·callbackasm1(SB) + MOVW $768, R12 + B runtime·callbackasm1(SB) + MOVW $769, R12 + B runtime·callbackasm1(SB) + MOVW $770, R12 + B runtime·callbackasm1(SB) + MOVW $771, R12 + B runtime·callbackasm1(SB) + MOVW $772, R12 + B runtime·callbackasm1(SB) + MOVW $773, R12 + B runtime·callbackasm1(SB) + MOVW $774, R12 + B runtime·callbackasm1(SB) + MOVW $775, R12 + B runtime·callbackasm1(SB) + MOVW $776, R12 + B runtime·callbackasm1(SB) + MOVW $777, R12 + B runtime·callbackasm1(SB) + MOVW $778, R12 + B runtime·callbackasm1(SB) + MOVW $779, R12 + B runtime·callbackasm1(SB) + MOVW $780, R12 + B runtime·callbackasm1(SB) + MOVW $781, R12 + B runtime·callbackasm1(SB) + MOVW $782, R12 + B runtime·callbackasm1(SB) + MOVW $783, R12 + B runtime·callbackasm1(SB) + MOVW $784, R12 + B runtime·callbackasm1(SB) + MOVW $785, R12 + B runtime·callbackasm1(SB) + MOVW $786, R12 + B runtime·callbackasm1(SB) + MOVW $787, R12 + B runtime·callbackasm1(SB) + MOVW $788, R12 + B runtime·callbackasm1(SB) + MOVW $789, R12 + B runtime·callbackasm1(SB) + MOVW $790, R12 + B runtime·callbackasm1(SB) + MOVW $791, R12 + B runtime·callbackasm1(SB) + MOVW $792, R12 + B runtime·callbackasm1(SB) + MOVW $793, R12 + B runtime·callbackasm1(SB) + MOVW $794, R12 + B runtime·callbackasm1(SB) + MOVW $795, R12 + B runtime·callbackasm1(SB) + MOVW $796, R12 + B runtime·callbackasm1(SB) + MOVW $797, R12 + B runtime·callbackasm1(SB) + MOVW $798, R12 + B runtime·callbackasm1(SB) + MOVW $799, R12 + B runtime·callbackasm1(SB) + MOVW $800, R12 + B runtime·callbackasm1(SB) + MOVW $801, R12 + B runtime·callbackasm1(SB) + MOVW $802, R12 + B runtime·callbackasm1(SB) + MOVW $803, R12 + B runtime·callbackasm1(SB) + MOVW $804, R12 + B runtime·callbackasm1(SB) + MOVW $805, R12 + B runtime·callbackasm1(SB) + MOVW $806, R12 + B runtime·callbackasm1(SB) + MOVW $807, R12 + B runtime·callbackasm1(SB) + MOVW $808, R12 + B runtime·callbackasm1(SB) + MOVW $809, R12 + B runtime·callbackasm1(SB) + MOVW $810, R12 + B runtime·callbackasm1(SB) + MOVW $811, R12 + B runtime·callbackasm1(SB) + MOVW $812, R12 + B runtime·callbackasm1(SB) + MOVW $813, R12 + B runtime·callbackasm1(SB) + MOVW $814, R12 + B runtime·callbackasm1(SB) + MOVW $815, R12 + B runtime·callbackasm1(SB) + MOVW $816, R12 + B runtime·callbackasm1(SB) + MOVW $817, R12 + B runtime·callbackasm1(SB) + MOVW $818, R12 + B runtime·callbackasm1(SB) + MOVW $819, R12 + B runtime·callbackasm1(SB) + MOVW $820, R12 + B runtime·callbackasm1(SB) + MOVW $821, R12 + B runtime·callbackasm1(SB) + MOVW $822, R12 + B runtime·callbackasm1(SB) + MOVW $823, R12 + B runtime·callbackasm1(SB) + MOVW $824, R12 + B runtime·callbackasm1(SB) + MOVW $825, R12 + B runtime·callbackasm1(SB) + MOVW $826, R12 + B runtime·callbackasm1(SB) + MOVW $827, R12 + B runtime·callbackasm1(SB) + MOVW $828, R12 + B runtime·callbackasm1(SB) + MOVW $829, R12 + B runtime·callbackasm1(SB) + MOVW $830, R12 + B runtime·callbackasm1(SB) + MOVW $831, R12 + B runtime·callbackasm1(SB) + MOVW $832, R12 + B runtime·callbackasm1(SB) + MOVW $833, R12 + B runtime·callbackasm1(SB) + MOVW $834, R12 + B runtime·callbackasm1(SB) + MOVW $835, R12 + B runtime·callbackasm1(SB) + MOVW $836, R12 + B runtime·callbackasm1(SB) + MOVW $837, R12 + B runtime·callbackasm1(SB) + MOVW $838, R12 + B runtime·callbackasm1(SB) + MOVW $839, R12 + B runtime·callbackasm1(SB) + MOVW $840, R12 + B runtime·callbackasm1(SB) + MOVW $841, R12 + B runtime·callbackasm1(SB) + MOVW $842, R12 + B runtime·callbackasm1(SB) + MOVW $843, R12 + B runtime·callbackasm1(SB) + MOVW $844, R12 + B runtime·callbackasm1(SB) + MOVW $845, R12 + B runtime·callbackasm1(SB) + MOVW $846, R12 + B runtime·callbackasm1(SB) + MOVW $847, R12 + B runtime·callbackasm1(SB) + MOVW $848, R12 + B runtime·callbackasm1(SB) + MOVW $849, R12 + B runtime·callbackasm1(SB) + MOVW $850, R12 + B runtime·callbackasm1(SB) + MOVW $851, R12 + B runtime·callbackasm1(SB) + MOVW $852, R12 + B runtime·callbackasm1(SB) + MOVW $853, R12 + B runtime·callbackasm1(SB) + MOVW $854, R12 + B runtime·callbackasm1(SB) + MOVW $855, R12 + B runtime·callbackasm1(SB) + MOVW $856, R12 + B runtime·callbackasm1(SB) + MOVW $857, R12 + B runtime·callbackasm1(SB) + MOVW $858, R12 + B runtime·callbackasm1(SB) + MOVW $859, R12 + B runtime·callbackasm1(SB) + MOVW $860, R12 + B runtime·callbackasm1(SB) + MOVW $861, R12 + B runtime·callbackasm1(SB) + MOVW $862, R12 + B runtime·callbackasm1(SB) + MOVW $863, R12 + B runtime·callbackasm1(SB) + MOVW $864, R12 + B runtime·callbackasm1(SB) + MOVW $865, R12 + B runtime·callbackasm1(SB) + MOVW $866, R12 + B runtime·callbackasm1(SB) + MOVW $867, R12 + B runtime·callbackasm1(SB) + MOVW $868, R12 + B runtime·callbackasm1(SB) + MOVW $869, R12 + B runtime·callbackasm1(SB) + MOVW $870, R12 + B runtime·callbackasm1(SB) + MOVW $871, R12 + B runtime·callbackasm1(SB) + MOVW $872, R12 + B runtime·callbackasm1(SB) + MOVW $873, R12 + B runtime·callbackasm1(SB) + MOVW $874, R12 + B runtime·callbackasm1(SB) + MOVW $875, R12 + B runtime·callbackasm1(SB) + MOVW $876, R12 + B runtime·callbackasm1(SB) + MOVW $877, R12 + B runtime·callbackasm1(SB) + MOVW $878, R12 + B runtime·callbackasm1(SB) + MOVW $879, R12 + B runtime·callbackasm1(SB) + MOVW $880, R12 + B runtime·callbackasm1(SB) + MOVW $881, R12 + B runtime·callbackasm1(SB) + MOVW $882, R12 + B runtime·callbackasm1(SB) + MOVW $883, R12 + B runtime·callbackasm1(SB) + MOVW $884, R12 + B runtime·callbackasm1(SB) + MOVW $885, R12 + B runtime·callbackasm1(SB) + MOVW $886, R12 + B runtime·callbackasm1(SB) + MOVW $887, R12 + B runtime·callbackasm1(SB) + MOVW $888, R12 + B runtime·callbackasm1(SB) + MOVW $889, R12 + B runtime·callbackasm1(SB) + MOVW $890, R12 + B runtime·callbackasm1(SB) + MOVW $891, R12 + B runtime·callbackasm1(SB) + MOVW $892, R12 + B runtime·callbackasm1(SB) + MOVW $893, R12 + B runtime·callbackasm1(SB) + MOVW $894, R12 + B runtime·callbackasm1(SB) + MOVW $895, R12 + B runtime·callbackasm1(SB) + MOVW $896, R12 + B runtime·callbackasm1(SB) + MOVW $897, R12 + B runtime·callbackasm1(SB) + MOVW $898, R12 + B runtime·callbackasm1(SB) + MOVW $899, R12 + B runtime·callbackasm1(SB) + MOVW $900, R12 + B runtime·callbackasm1(SB) + MOVW $901, R12 + B runtime·callbackasm1(SB) + MOVW $902, R12 + B runtime·callbackasm1(SB) + MOVW $903, R12 + B runtime·callbackasm1(SB) + MOVW $904, R12 + B runtime·callbackasm1(SB) + MOVW $905, R12 + B runtime·callbackasm1(SB) + MOVW $906, R12 + B runtime·callbackasm1(SB) + MOVW $907, R12 + B runtime·callbackasm1(SB) + MOVW $908, R12 + B runtime·callbackasm1(SB) + MOVW $909, R12 + B runtime·callbackasm1(SB) + MOVW $910, R12 + B runtime·callbackasm1(SB) + MOVW $911, R12 + B runtime·callbackasm1(SB) + MOVW $912, R12 + B runtime·callbackasm1(SB) + MOVW $913, R12 + B runtime·callbackasm1(SB) + MOVW $914, R12 + B runtime·callbackasm1(SB) + MOVW $915, R12 + B runtime·callbackasm1(SB) + MOVW $916, R12 + B runtime·callbackasm1(SB) + MOVW $917, R12 + B runtime·callbackasm1(SB) + MOVW $918, R12 + B runtime·callbackasm1(SB) + MOVW $919, R12 + B runtime·callbackasm1(SB) + MOVW $920, R12 + B runtime·callbackasm1(SB) + MOVW $921, R12 + B runtime·callbackasm1(SB) + MOVW $922, R12 + B runtime·callbackasm1(SB) + MOVW $923, R12 + B runtime·callbackasm1(SB) + MOVW $924, R12 + B runtime·callbackasm1(SB) + MOVW $925, R12 + B runtime·callbackasm1(SB) + MOVW $926, R12 + B runtime·callbackasm1(SB) + MOVW $927, R12 + B runtime·callbackasm1(SB) + MOVW $928, R12 + B runtime·callbackasm1(SB) + MOVW $929, R12 + B runtime·callbackasm1(SB) + MOVW $930, R12 + B runtime·callbackasm1(SB) + MOVW $931, R12 + B runtime·callbackasm1(SB) + MOVW $932, R12 + B runtime·callbackasm1(SB) + MOVW $933, R12 + B runtime·callbackasm1(SB) + MOVW $934, R12 + B runtime·callbackasm1(SB) + MOVW $935, R12 + B runtime·callbackasm1(SB) + MOVW $936, R12 + B runtime·callbackasm1(SB) + MOVW $937, R12 + B runtime·callbackasm1(SB) + MOVW $938, R12 + B runtime·callbackasm1(SB) + MOVW $939, R12 + B runtime·callbackasm1(SB) + MOVW $940, R12 + B runtime·callbackasm1(SB) + MOVW $941, R12 + B runtime·callbackasm1(SB) + MOVW $942, R12 + B runtime·callbackasm1(SB) + MOVW $943, R12 + B runtime·callbackasm1(SB) + MOVW $944, R12 + B runtime·callbackasm1(SB) + MOVW $945, R12 + B runtime·callbackasm1(SB) + MOVW $946, R12 + B runtime·callbackasm1(SB) + MOVW $947, R12 + B runtime·callbackasm1(SB) + MOVW $948, R12 + B runtime·callbackasm1(SB) + MOVW $949, R12 + B runtime·callbackasm1(SB) + MOVW $950, R12 + B runtime·callbackasm1(SB) + MOVW $951, R12 + B runtime·callbackasm1(SB) + MOVW $952, R12 + B runtime·callbackasm1(SB) + MOVW $953, R12 + B runtime·callbackasm1(SB) + MOVW $954, R12 + B runtime·callbackasm1(SB) + MOVW $955, R12 + B runtime·callbackasm1(SB) + MOVW $956, R12 + B runtime·callbackasm1(SB) + MOVW $957, R12 + B runtime·callbackasm1(SB) + MOVW $958, R12 + B runtime·callbackasm1(SB) + MOVW $959, R12 + B runtime·callbackasm1(SB) + MOVW $960, R12 + B runtime·callbackasm1(SB) + MOVW $961, R12 + B runtime·callbackasm1(SB) + MOVW $962, R12 + B runtime·callbackasm1(SB) + MOVW $963, R12 + B runtime·callbackasm1(SB) + MOVW $964, R12 + B runtime·callbackasm1(SB) + MOVW $965, R12 + B runtime·callbackasm1(SB) + MOVW $966, R12 + B runtime·callbackasm1(SB) + MOVW $967, R12 + B runtime·callbackasm1(SB) + MOVW $968, R12 + B runtime·callbackasm1(SB) + MOVW $969, R12 + B runtime·callbackasm1(SB) + MOVW $970, R12 + B runtime·callbackasm1(SB) + MOVW $971, R12 + B runtime·callbackasm1(SB) + MOVW $972, R12 + B runtime·callbackasm1(SB) + MOVW $973, R12 + B runtime·callbackasm1(SB) + MOVW $974, R12 + B runtime·callbackasm1(SB) + MOVW $975, R12 + B runtime·callbackasm1(SB) + MOVW $976, R12 + B runtime·callbackasm1(SB) + MOVW $977, R12 + B runtime·callbackasm1(SB) + MOVW $978, R12 + B runtime·callbackasm1(SB) + MOVW $979, R12 + B runtime·callbackasm1(SB) + MOVW $980, R12 + B runtime·callbackasm1(SB) + MOVW $981, R12 + B runtime·callbackasm1(SB) + MOVW $982, R12 + B runtime·callbackasm1(SB) + MOVW $983, R12 + B runtime·callbackasm1(SB) + MOVW $984, R12 + B runtime·callbackasm1(SB) + MOVW $985, R12 + B runtime·callbackasm1(SB) + MOVW $986, R12 + B runtime·callbackasm1(SB) + MOVW $987, R12 + B runtime·callbackasm1(SB) + MOVW $988, R12 + B runtime·callbackasm1(SB) + MOVW $989, R12 + B runtime·callbackasm1(SB) + MOVW $990, R12 + B runtime·callbackasm1(SB) + MOVW $991, R12 + B runtime·callbackasm1(SB) + MOVW $992, R12 + B runtime·callbackasm1(SB) + MOVW $993, R12 + B runtime·callbackasm1(SB) + MOVW $994, R12 + B runtime·callbackasm1(SB) + MOVW $995, R12 + B runtime·callbackasm1(SB) + MOVW $996, R12 + B runtime·callbackasm1(SB) + MOVW $997, R12 + B runtime·callbackasm1(SB) + MOVW $998, R12 + B runtime·callbackasm1(SB) + MOVW $999, R12 + B runtime·callbackasm1(SB) + MOVW $1000, R12 + B runtime·callbackasm1(SB) + MOVW $1001, R12 + B runtime·callbackasm1(SB) + MOVW $1002, R12 + B runtime·callbackasm1(SB) + MOVW $1003, R12 + B runtime·callbackasm1(SB) + MOVW $1004, R12 + B runtime·callbackasm1(SB) + MOVW $1005, R12 + B runtime·callbackasm1(SB) + MOVW $1006, R12 + B runtime·callbackasm1(SB) + MOVW $1007, R12 + B runtime·callbackasm1(SB) + MOVW $1008, R12 + B runtime·callbackasm1(SB) + MOVW $1009, R12 + B runtime·callbackasm1(SB) + MOVW $1010, R12 + B runtime·callbackasm1(SB) + MOVW $1011, R12 + B runtime·callbackasm1(SB) + MOVW $1012, R12 + B runtime·callbackasm1(SB) + MOVW $1013, R12 + B runtime·callbackasm1(SB) + MOVW $1014, R12 + B runtime·callbackasm1(SB) + MOVW $1015, R12 + B runtime·callbackasm1(SB) + MOVW $1016, R12 + B runtime·callbackasm1(SB) + MOVW $1017, R12 + B runtime·callbackasm1(SB) + MOVW $1018, R12 + B runtime·callbackasm1(SB) + MOVW $1019, R12 + B runtime·callbackasm1(SB) + MOVW $1020, R12 + B runtime·callbackasm1(SB) + MOVW $1021, R12 + B runtime·callbackasm1(SB) + MOVW $1022, R12 + B runtime·callbackasm1(SB) + MOVW $1023, R12 + B runtime·callbackasm1(SB) + MOVW $1024, R12 + B runtime·callbackasm1(SB) + MOVW $1025, R12 + B runtime·callbackasm1(SB) + MOVW $1026, R12 + B runtime·callbackasm1(SB) + MOVW $1027, R12 + B runtime·callbackasm1(SB) + MOVW $1028, R12 + B runtime·callbackasm1(SB) + MOVW $1029, R12 + B runtime·callbackasm1(SB) + MOVW $1030, R12 + B runtime·callbackasm1(SB) + MOVW $1031, R12 + B runtime·callbackasm1(SB) + MOVW $1032, R12 + B runtime·callbackasm1(SB) + MOVW $1033, R12 + B runtime·callbackasm1(SB) + MOVW $1034, R12 + B runtime·callbackasm1(SB) + MOVW $1035, R12 + B runtime·callbackasm1(SB) + MOVW $1036, R12 + B runtime·callbackasm1(SB) + MOVW $1037, R12 + B runtime·callbackasm1(SB) + MOVW $1038, R12 + B runtime·callbackasm1(SB) + MOVW $1039, R12 + B runtime·callbackasm1(SB) + MOVW $1040, R12 + B runtime·callbackasm1(SB) + MOVW $1041, R12 + B runtime·callbackasm1(SB) + MOVW $1042, R12 + B runtime·callbackasm1(SB) + MOVW $1043, R12 + B runtime·callbackasm1(SB) + MOVW $1044, R12 + B runtime·callbackasm1(SB) + MOVW $1045, R12 + B runtime·callbackasm1(SB) + MOVW $1046, R12 + B runtime·callbackasm1(SB) + MOVW $1047, R12 + B runtime·callbackasm1(SB) + MOVW $1048, R12 + B runtime·callbackasm1(SB) + MOVW $1049, R12 + B runtime·callbackasm1(SB) + MOVW $1050, R12 + B runtime·callbackasm1(SB) + MOVW $1051, R12 + B runtime·callbackasm1(SB) + MOVW $1052, R12 + B runtime·callbackasm1(SB) + MOVW $1053, R12 + B runtime·callbackasm1(SB) + MOVW $1054, R12 + B runtime·callbackasm1(SB) + MOVW $1055, R12 + B runtime·callbackasm1(SB) + MOVW $1056, R12 + B runtime·callbackasm1(SB) + MOVW $1057, R12 + B runtime·callbackasm1(SB) + MOVW $1058, R12 + B runtime·callbackasm1(SB) + MOVW $1059, R12 + B runtime·callbackasm1(SB) + MOVW $1060, R12 + B runtime·callbackasm1(SB) + MOVW $1061, R12 + B runtime·callbackasm1(SB) + MOVW $1062, R12 + B runtime·callbackasm1(SB) + MOVW $1063, R12 + B runtime·callbackasm1(SB) + MOVW $1064, R12 + B runtime·callbackasm1(SB) + MOVW $1065, R12 + B runtime·callbackasm1(SB) + MOVW $1066, R12 + B runtime·callbackasm1(SB) + MOVW $1067, R12 + B runtime·callbackasm1(SB) + MOVW $1068, R12 + B runtime·callbackasm1(SB) + MOVW $1069, R12 + B runtime·callbackasm1(SB) + MOVW $1070, R12 + B runtime·callbackasm1(SB) + MOVW $1071, R12 + B runtime·callbackasm1(SB) + MOVW $1072, R12 + B runtime·callbackasm1(SB) + MOVW $1073, R12 + B runtime·callbackasm1(SB) + MOVW $1074, R12 + B runtime·callbackasm1(SB) + MOVW $1075, R12 + B runtime·callbackasm1(SB) + MOVW $1076, R12 + B runtime·callbackasm1(SB) + MOVW $1077, R12 + B runtime·callbackasm1(SB) + MOVW $1078, R12 + B runtime·callbackasm1(SB) + MOVW $1079, R12 + B runtime·callbackasm1(SB) + MOVW $1080, R12 + B runtime·callbackasm1(SB) + MOVW $1081, R12 + B runtime·callbackasm1(SB) + MOVW $1082, R12 + B runtime·callbackasm1(SB) + MOVW $1083, R12 + B runtime·callbackasm1(SB) + MOVW $1084, R12 + B runtime·callbackasm1(SB) + MOVW $1085, R12 + B runtime·callbackasm1(SB) + MOVW $1086, R12 + B runtime·callbackasm1(SB) + MOVW $1087, R12 + B runtime·callbackasm1(SB) + MOVW $1088, R12 + B runtime·callbackasm1(SB) + MOVW $1089, R12 + B runtime·callbackasm1(SB) + MOVW $1090, R12 + B runtime·callbackasm1(SB) + MOVW $1091, R12 + B runtime·callbackasm1(SB) + MOVW $1092, R12 + B runtime·callbackasm1(SB) + MOVW $1093, R12 + B runtime·callbackasm1(SB) + MOVW $1094, R12 + B runtime·callbackasm1(SB) + MOVW $1095, R12 + B runtime·callbackasm1(SB) + MOVW $1096, R12 + B runtime·callbackasm1(SB) + MOVW $1097, R12 + B runtime·callbackasm1(SB) + MOVW $1098, R12 + B runtime·callbackasm1(SB) + MOVW $1099, R12 + B runtime·callbackasm1(SB) + MOVW $1100, R12 + B runtime·callbackasm1(SB) + MOVW $1101, R12 + B runtime·callbackasm1(SB) + MOVW $1102, R12 + B runtime·callbackasm1(SB) + MOVW $1103, R12 + B runtime·callbackasm1(SB) + MOVW $1104, R12 + B runtime·callbackasm1(SB) + MOVW $1105, R12 + B runtime·callbackasm1(SB) + MOVW $1106, R12 + B runtime·callbackasm1(SB) + MOVW $1107, R12 + B runtime·callbackasm1(SB) + MOVW $1108, R12 + B runtime·callbackasm1(SB) + MOVW $1109, R12 + B runtime·callbackasm1(SB) + MOVW $1110, R12 + B runtime·callbackasm1(SB) + MOVW $1111, R12 + B runtime·callbackasm1(SB) + MOVW $1112, R12 + B runtime·callbackasm1(SB) + MOVW $1113, R12 + B runtime·callbackasm1(SB) + MOVW $1114, R12 + B runtime·callbackasm1(SB) + MOVW $1115, R12 + B runtime·callbackasm1(SB) + MOVW $1116, R12 + B runtime·callbackasm1(SB) + MOVW $1117, R12 + B runtime·callbackasm1(SB) + MOVW $1118, R12 + B runtime·callbackasm1(SB) + MOVW $1119, R12 + B runtime·callbackasm1(SB) + MOVW $1120, R12 + B runtime·callbackasm1(SB) + MOVW $1121, R12 + B runtime·callbackasm1(SB) + MOVW $1122, R12 + B runtime·callbackasm1(SB) + MOVW $1123, R12 + B runtime·callbackasm1(SB) + MOVW $1124, R12 + B runtime·callbackasm1(SB) + MOVW $1125, R12 + B runtime·callbackasm1(SB) + MOVW $1126, R12 + B runtime·callbackasm1(SB) + MOVW $1127, R12 + B runtime·callbackasm1(SB) + MOVW $1128, R12 + B runtime·callbackasm1(SB) + MOVW $1129, R12 + B runtime·callbackasm1(SB) + MOVW $1130, R12 + B runtime·callbackasm1(SB) + MOVW $1131, R12 + B runtime·callbackasm1(SB) + MOVW $1132, R12 + B runtime·callbackasm1(SB) + MOVW $1133, R12 + B runtime·callbackasm1(SB) + MOVW $1134, R12 + B runtime·callbackasm1(SB) + MOVW $1135, R12 + B runtime·callbackasm1(SB) + MOVW $1136, R12 + B runtime·callbackasm1(SB) + MOVW $1137, R12 + B runtime·callbackasm1(SB) + MOVW $1138, R12 + B runtime·callbackasm1(SB) + MOVW $1139, R12 + B runtime·callbackasm1(SB) + MOVW $1140, R12 + B runtime·callbackasm1(SB) + MOVW $1141, R12 + B runtime·callbackasm1(SB) + MOVW $1142, R12 + B runtime·callbackasm1(SB) + MOVW $1143, R12 + B runtime·callbackasm1(SB) + MOVW $1144, R12 + B runtime·callbackasm1(SB) + MOVW $1145, R12 + B runtime·callbackasm1(SB) + MOVW $1146, R12 + B runtime·callbackasm1(SB) + MOVW $1147, R12 + B runtime·callbackasm1(SB) + MOVW $1148, R12 + B runtime·callbackasm1(SB) + MOVW $1149, R12 + B runtime·callbackasm1(SB) + MOVW $1150, R12 + B runtime·callbackasm1(SB) + MOVW $1151, R12 + B runtime·callbackasm1(SB) + MOVW $1152, R12 + B runtime·callbackasm1(SB) + MOVW $1153, R12 + B runtime·callbackasm1(SB) + MOVW $1154, R12 + B runtime·callbackasm1(SB) + MOVW $1155, R12 + B runtime·callbackasm1(SB) + MOVW $1156, R12 + B runtime·callbackasm1(SB) + MOVW $1157, R12 + B runtime·callbackasm1(SB) + MOVW $1158, R12 + B runtime·callbackasm1(SB) + MOVW $1159, R12 + B runtime·callbackasm1(SB) + MOVW $1160, R12 + B runtime·callbackasm1(SB) + MOVW $1161, R12 + B runtime·callbackasm1(SB) + MOVW $1162, R12 + B runtime·callbackasm1(SB) + MOVW $1163, R12 + B runtime·callbackasm1(SB) + MOVW $1164, R12 + B runtime·callbackasm1(SB) + MOVW $1165, R12 + B runtime·callbackasm1(SB) + MOVW $1166, R12 + B runtime·callbackasm1(SB) + MOVW $1167, R12 + B runtime·callbackasm1(SB) + MOVW $1168, R12 + B runtime·callbackasm1(SB) + MOVW $1169, R12 + B runtime·callbackasm1(SB) + MOVW $1170, R12 + B runtime·callbackasm1(SB) + MOVW $1171, R12 + B runtime·callbackasm1(SB) + MOVW $1172, R12 + B runtime·callbackasm1(SB) + MOVW $1173, R12 + B runtime·callbackasm1(SB) + MOVW $1174, R12 + B runtime·callbackasm1(SB) + MOVW $1175, R12 + B runtime·callbackasm1(SB) + MOVW $1176, R12 + B runtime·callbackasm1(SB) + MOVW $1177, R12 + B runtime·callbackasm1(SB) + MOVW $1178, R12 + B runtime·callbackasm1(SB) + MOVW $1179, R12 + B runtime·callbackasm1(SB) + MOVW $1180, R12 + B runtime·callbackasm1(SB) + MOVW $1181, R12 + B runtime·callbackasm1(SB) + MOVW $1182, R12 + B runtime·callbackasm1(SB) + MOVW $1183, R12 + B runtime·callbackasm1(SB) + MOVW $1184, R12 + B runtime·callbackasm1(SB) + MOVW $1185, R12 + B runtime·callbackasm1(SB) + MOVW $1186, R12 + B runtime·callbackasm1(SB) + MOVW $1187, R12 + B runtime·callbackasm1(SB) + MOVW $1188, R12 + B runtime·callbackasm1(SB) + MOVW $1189, R12 + B runtime·callbackasm1(SB) + MOVW $1190, R12 + B runtime·callbackasm1(SB) + MOVW $1191, R12 + B runtime·callbackasm1(SB) + MOVW $1192, R12 + B runtime·callbackasm1(SB) + MOVW $1193, R12 + B runtime·callbackasm1(SB) + MOVW $1194, R12 + B runtime·callbackasm1(SB) + MOVW $1195, R12 + B runtime·callbackasm1(SB) + MOVW $1196, R12 + B runtime·callbackasm1(SB) + MOVW $1197, R12 + B runtime·callbackasm1(SB) + MOVW $1198, R12 + B runtime·callbackasm1(SB) + MOVW $1199, R12 + B runtime·callbackasm1(SB) + MOVW $1200, R12 + B runtime·callbackasm1(SB) + MOVW $1201, R12 + B runtime·callbackasm1(SB) + MOVW $1202, R12 + B runtime·callbackasm1(SB) + MOVW $1203, R12 + B runtime·callbackasm1(SB) + MOVW $1204, R12 + B runtime·callbackasm1(SB) + MOVW $1205, R12 + B runtime·callbackasm1(SB) + MOVW $1206, R12 + B runtime·callbackasm1(SB) + MOVW $1207, R12 + B runtime·callbackasm1(SB) + MOVW $1208, R12 + B runtime·callbackasm1(SB) + MOVW $1209, R12 + B runtime·callbackasm1(SB) + MOVW $1210, R12 + B runtime·callbackasm1(SB) + MOVW $1211, R12 + B runtime·callbackasm1(SB) + MOVW $1212, R12 + B runtime·callbackasm1(SB) + MOVW $1213, R12 + B runtime·callbackasm1(SB) + MOVW $1214, R12 + B runtime·callbackasm1(SB) + MOVW $1215, R12 + B runtime·callbackasm1(SB) + MOVW $1216, R12 + B runtime·callbackasm1(SB) + MOVW $1217, R12 + B runtime·callbackasm1(SB) + MOVW $1218, R12 + B runtime·callbackasm1(SB) + MOVW $1219, R12 + B runtime·callbackasm1(SB) + MOVW $1220, R12 + B runtime·callbackasm1(SB) + MOVW $1221, R12 + B runtime·callbackasm1(SB) + MOVW $1222, R12 + B runtime·callbackasm1(SB) + MOVW $1223, R12 + B runtime·callbackasm1(SB) + MOVW $1224, R12 + B runtime·callbackasm1(SB) + MOVW $1225, R12 + B runtime·callbackasm1(SB) + MOVW $1226, R12 + B runtime·callbackasm1(SB) + MOVW $1227, R12 + B runtime·callbackasm1(SB) + MOVW $1228, R12 + B runtime·callbackasm1(SB) + MOVW $1229, R12 + B runtime·callbackasm1(SB) + MOVW $1230, R12 + B runtime·callbackasm1(SB) + MOVW $1231, R12 + B runtime·callbackasm1(SB) + MOVW $1232, R12 + B runtime·callbackasm1(SB) + MOVW $1233, R12 + B runtime·callbackasm1(SB) + MOVW $1234, R12 + B runtime·callbackasm1(SB) + MOVW $1235, R12 + B runtime·callbackasm1(SB) + MOVW $1236, R12 + B runtime·callbackasm1(SB) + MOVW $1237, R12 + B runtime·callbackasm1(SB) + MOVW $1238, R12 + B runtime·callbackasm1(SB) + MOVW $1239, R12 + B runtime·callbackasm1(SB) + MOVW $1240, R12 + B runtime·callbackasm1(SB) + MOVW $1241, R12 + B runtime·callbackasm1(SB) + MOVW $1242, R12 + B runtime·callbackasm1(SB) + MOVW $1243, R12 + B runtime·callbackasm1(SB) + MOVW $1244, R12 + B runtime·callbackasm1(SB) + MOVW $1245, R12 + B runtime·callbackasm1(SB) + MOVW $1246, R12 + B runtime·callbackasm1(SB) + MOVW $1247, R12 + B runtime·callbackasm1(SB) + MOVW $1248, R12 + B runtime·callbackasm1(SB) + MOVW $1249, R12 + B runtime·callbackasm1(SB) + MOVW $1250, R12 + B runtime·callbackasm1(SB) + MOVW $1251, R12 + B runtime·callbackasm1(SB) + MOVW $1252, R12 + B runtime·callbackasm1(SB) + MOVW $1253, R12 + B runtime·callbackasm1(SB) + MOVW $1254, R12 + B runtime·callbackasm1(SB) + MOVW $1255, R12 + B runtime·callbackasm1(SB) + MOVW $1256, R12 + B runtime·callbackasm1(SB) + MOVW $1257, R12 + B runtime·callbackasm1(SB) + MOVW $1258, R12 + B runtime·callbackasm1(SB) + MOVW $1259, R12 + B runtime·callbackasm1(SB) + MOVW $1260, R12 + B runtime·callbackasm1(SB) + MOVW $1261, R12 + B runtime·callbackasm1(SB) + MOVW $1262, R12 + B runtime·callbackasm1(SB) + MOVW $1263, R12 + B runtime·callbackasm1(SB) + MOVW $1264, R12 + B runtime·callbackasm1(SB) + MOVW $1265, R12 + B runtime·callbackasm1(SB) + MOVW $1266, R12 + B runtime·callbackasm1(SB) + MOVW $1267, R12 + B runtime·callbackasm1(SB) + MOVW $1268, R12 + B runtime·callbackasm1(SB) + MOVW $1269, R12 + B runtime·callbackasm1(SB) + MOVW $1270, R12 + B runtime·callbackasm1(SB) + MOVW $1271, R12 + B runtime·callbackasm1(SB) + MOVW $1272, R12 + B runtime·callbackasm1(SB) + MOVW $1273, R12 + B runtime·callbackasm1(SB) + MOVW $1274, R12 + B runtime·callbackasm1(SB) + MOVW $1275, R12 + B runtime·callbackasm1(SB) + MOVW $1276, R12 + B runtime·callbackasm1(SB) + MOVW $1277, R12 + B runtime·callbackasm1(SB) + MOVW $1278, R12 + B runtime·callbackasm1(SB) + MOVW $1279, R12 + B runtime·callbackasm1(SB) + MOVW $1280, R12 + B runtime·callbackasm1(SB) + MOVW $1281, R12 + B runtime·callbackasm1(SB) + MOVW $1282, R12 + B runtime·callbackasm1(SB) + MOVW $1283, R12 + B runtime·callbackasm1(SB) + MOVW $1284, R12 + B runtime·callbackasm1(SB) + MOVW $1285, R12 + B runtime·callbackasm1(SB) + MOVW $1286, R12 + B runtime·callbackasm1(SB) + MOVW $1287, R12 + B runtime·callbackasm1(SB) + MOVW $1288, R12 + B runtime·callbackasm1(SB) + MOVW $1289, R12 + B runtime·callbackasm1(SB) + MOVW $1290, R12 + B runtime·callbackasm1(SB) + MOVW $1291, R12 + B runtime·callbackasm1(SB) + MOVW $1292, R12 + B runtime·callbackasm1(SB) + MOVW $1293, R12 + B runtime·callbackasm1(SB) + MOVW $1294, R12 + B runtime·callbackasm1(SB) + MOVW $1295, R12 + B runtime·callbackasm1(SB) + MOVW $1296, R12 + B runtime·callbackasm1(SB) + MOVW $1297, R12 + B runtime·callbackasm1(SB) + MOVW $1298, R12 + B runtime·callbackasm1(SB) + MOVW $1299, R12 + B runtime·callbackasm1(SB) + MOVW $1300, R12 + B runtime·callbackasm1(SB) + MOVW $1301, R12 + B runtime·callbackasm1(SB) + MOVW $1302, R12 + B runtime·callbackasm1(SB) + MOVW $1303, R12 + B runtime·callbackasm1(SB) + MOVW $1304, R12 + B runtime·callbackasm1(SB) + MOVW $1305, R12 + B runtime·callbackasm1(SB) + MOVW $1306, R12 + B runtime·callbackasm1(SB) + MOVW $1307, R12 + B runtime·callbackasm1(SB) + MOVW $1308, R12 + B runtime·callbackasm1(SB) + MOVW $1309, R12 + B runtime·callbackasm1(SB) + MOVW $1310, R12 + B runtime·callbackasm1(SB) + MOVW $1311, R12 + B runtime·callbackasm1(SB) + MOVW $1312, R12 + B runtime·callbackasm1(SB) + MOVW $1313, R12 + B runtime·callbackasm1(SB) + MOVW $1314, R12 + B runtime·callbackasm1(SB) + MOVW $1315, R12 + B runtime·callbackasm1(SB) + MOVW $1316, R12 + B runtime·callbackasm1(SB) + MOVW $1317, R12 + B runtime·callbackasm1(SB) + MOVW $1318, R12 + B runtime·callbackasm1(SB) + MOVW $1319, R12 + B runtime·callbackasm1(SB) + MOVW $1320, R12 + B runtime·callbackasm1(SB) + MOVW $1321, R12 + B runtime·callbackasm1(SB) + MOVW $1322, R12 + B runtime·callbackasm1(SB) + MOVW $1323, R12 + B runtime·callbackasm1(SB) + MOVW $1324, R12 + B runtime·callbackasm1(SB) + MOVW $1325, R12 + B runtime·callbackasm1(SB) + MOVW $1326, R12 + B runtime·callbackasm1(SB) + MOVW $1327, R12 + B runtime·callbackasm1(SB) + MOVW $1328, R12 + B runtime·callbackasm1(SB) + MOVW $1329, R12 + B runtime·callbackasm1(SB) + MOVW $1330, R12 + B runtime·callbackasm1(SB) + MOVW $1331, R12 + B runtime·callbackasm1(SB) + MOVW $1332, R12 + B runtime·callbackasm1(SB) + MOVW $1333, R12 + B runtime·callbackasm1(SB) + MOVW $1334, R12 + B runtime·callbackasm1(SB) + MOVW $1335, R12 + B runtime·callbackasm1(SB) + MOVW $1336, R12 + B runtime·callbackasm1(SB) + MOVW $1337, R12 + B runtime·callbackasm1(SB) + MOVW $1338, R12 + B runtime·callbackasm1(SB) + MOVW $1339, R12 + B runtime·callbackasm1(SB) + MOVW $1340, R12 + B runtime·callbackasm1(SB) + MOVW $1341, R12 + B runtime·callbackasm1(SB) + MOVW $1342, R12 + B runtime·callbackasm1(SB) + MOVW $1343, R12 + B runtime·callbackasm1(SB) + MOVW $1344, R12 + B runtime·callbackasm1(SB) + MOVW $1345, R12 + B runtime·callbackasm1(SB) + MOVW $1346, R12 + B runtime·callbackasm1(SB) + MOVW $1347, R12 + B runtime·callbackasm1(SB) + MOVW $1348, R12 + B runtime·callbackasm1(SB) + MOVW $1349, R12 + B runtime·callbackasm1(SB) + MOVW $1350, R12 + B runtime·callbackasm1(SB) + MOVW $1351, R12 + B runtime·callbackasm1(SB) + MOVW $1352, R12 + B runtime·callbackasm1(SB) + MOVW $1353, R12 + B runtime·callbackasm1(SB) + MOVW $1354, R12 + B runtime·callbackasm1(SB) + MOVW $1355, R12 + B runtime·callbackasm1(SB) + MOVW $1356, R12 + B runtime·callbackasm1(SB) + MOVW $1357, R12 + B runtime·callbackasm1(SB) + MOVW $1358, R12 + B runtime·callbackasm1(SB) + MOVW $1359, R12 + B runtime·callbackasm1(SB) + MOVW $1360, R12 + B runtime·callbackasm1(SB) + MOVW $1361, R12 + B runtime·callbackasm1(SB) + MOVW $1362, R12 + B runtime·callbackasm1(SB) + MOVW $1363, R12 + B runtime·callbackasm1(SB) + MOVW $1364, R12 + B runtime·callbackasm1(SB) + MOVW $1365, R12 + B runtime·callbackasm1(SB) + MOVW $1366, R12 + B runtime·callbackasm1(SB) + MOVW $1367, R12 + B runtime·callbackasm1(SB) + MOVW $1368, R12 + B runtime·callbackasm1(SB) + MOVW $1369, R12 + B runtime·callbackasm1(SB) + MOVW $1370, R12 + B runtime·callbackasm1(SB) + MOVW $1371, R12 + B runtime·callbackasm1(SB) + MOVW $1372, R12 + B runtime·callbackasm1(SB) + MOVW $1373, R12 + B runtime·callbackasm1(SB) + MOVW $1374, R12 + B runtime·callbackasm1(SB) + MOVW $1375, R12 + B runtime·callbackasm1(SB) + MOVW $1376, R12 + B runtime·callbackasm1(SB) + MOVW $1377, R12 + B runtime·callbackasm1(SB) + MOVW $1378, R12 + B runtime·callbackasm1(SB) + MOVW $1379, R12 + B runtime·callbackasm1(SB) + MOVW $1380, R12 + B runtime·callbackasm1(SB) + MOVW $1381, R12 + B runtime·callbackasm1(SB) + MOVW $1382, R12 + B runtime·callbackasm1(SB) + MOVW $1383, R12 + B runtime·callbackasm1(SB) + MOVW $1384, R12 + B runtime·callbackasm1(SB) + MOVW $1385, R12 + B runtime·callbackasm1(SB) + MOVW $1386, R12 + B runtime·callbackasm1(SB) + MOVW $1387, R12 + B runtime·callbackasm1(SB) + MOVW $1388, R12 + B runtime·callbackasm1(SB) + MOVW $1389, R12 + B runtime·callbackasm1(SB) + MOVW $1390, R12 + B runtime·callbackasm1(SB) + MOVW $1391, R12 + B runtime·callbackasm1(SB) + MOVW $1392, R12 + B runtime·callbackasm1(SB) + MOVW $1393, R12 + B runtime·callbackasm1(SB) + MOVW $1394, R12 + B runtime·callbackasm1(SB) + MOVW $1395, R12 + B runtime·callbackasm1(SB) + MOVW $1396, R12 + B runtime·callbackasm1(SB) + MOVW $1397, R12 + B runtime·callbackasm1(SB) + MOVW $1398, R12 + B runtime·callbackasm1(SB) + MOVW $1399, R12 + B runtime·callbackasm1(SB) + MOVW $1400, R12 + B runtime·callbackasm1(SB) + MOVW $1401, R12 + B runtime·callbackasm1(SB) + MOVW $1402, R12 + B runtime·callbackasm1(SB) + MOVW $1403, R12 + B runtime·callbackasm1(SB) + MOVW $1404, R12 + B runtime·callbackasm1(SB) + MOVW $1405, R12 + B runtime·callbackasm1(SB) + MOVW $1406, R12 + B runtime·callbackasm1(SB) + MOVW $1407, R12 + B runtime·callbackasm1(SB) + MOVW $1408, R12 + B runtime·callbackasm1(SB) + MOVW $1409, R12 + B runtime·callbackasm1(SB) + MOVW $1410, R12 + B runtime·callbackasm1(SB) + MOVW $1411, R12 + B runtime·callbackasm1(SB) + MOVW $1412, R12 + B runtime·callbackasm1(SB) + MOVW $1413, R12 + B runtime·callbackasm1(SB) + MOVW $1414, R12 + B runtime·callbackasm1(SB) + MOVW $1415, R12 + B runtime·callbackasm1(SB) + MOVW $1416, R12 + B runtime·callbackasm1(SB) + MOVW $1417, R12 + B runtime·callbackasm1(SB) + MOVW $1418, R12 + B runtime·callbackasm1(SB) + MOVW $1419, R12 + B runtime·callbackasm1(SB) + MOVW $1420, R12 + B runtime·callbackasm1(SB) + MOVW $1421, R12 + B runtime·callbackasm1(SB) + MOVW $1422, R12 + B runtime·callbackasm1(SB) + MOVW $1423, R12 + B runtime·callbackasm1(SB) + MOVW $1424, R12 + B runtime·callbackasm1(SB) + MOVW $1425, R12 + B runtime·callbackasm1(SB) + MOVW $1426, R12 + B runtime·callbackasm1(SB) + MOVW $1427, R12 + B runtime·callbackasm1(SB) + MOVW $1428, R12 + B runtime·callbackasm1(SB) + MOVW $1429, R12 + B runtime·callbackasm1(SB) + MOVW $1430, R12 + B runtime·callbackasm1(SB) + MOVW $1431, R12 + B runtime·callbackasm1(SB) + MOVW $1432, R12 + B runtime·callbackasm1(SB) + MOVW $1433, R12 + B runtime·callbackasm1(SB) + MOVW $1434, R12 + B runtime·callbackasm1(SB) + MOVW $1435, R12 + B runtime·callbackasm1(SB) + MOVW $1436, R12 + B runtime·callbackasm1(SB) + MOVW $1437, R12 + B runtime·callbackasm1(SB) + MOVW $1438, R12 + B runtime·callbackasm1(SB) + MOVW $1439, R12 + B runtime·callbackasm1(SB) + MOVW $1440, R12 + B runtime·callbackasm1(SB) + MOVW $1441, R12 + B runtime·callbackasm1(SB) + MOVW $1442, R12 + B runtime·callbackasm1(SB) + MOVW $1443, R12 + B runtime·callbackasm1(SB) + MOVW $1444, R12 + B runtime·callbackasm1(SB) + MOVW $1445, R12 + B runtime·callbackasm1(SB) + MOVW $1446, R12 + B runtime·callbackasm1(SB) + MOVW $1447, R12 + B runtime·callbackasm1(SB) + MOVW $1448, R12 + B runtime·callbackasm1(SB) + MOVW $1449, R12 + B runtime·callbackasm1(SB) + MOVW $1450, R12 + B runtime·callbackasm1(SB) + MOVW $1451, R12 + B runtime·callbackasm1(SB) + MOVW $1452, R12 + B runtime·callbackasm1(SB) + MOVW $1453, R12 + B runtime·callbackasm1(SB) + MOVW $1454, R12 + B runtime·callbackasm1(SB) + MOVW $1455, R12 + B runtime·callbackasm1(SB) + MOVW $1456, R12 + B runtime·callbackasm1(SB) + MOVW $1457, R12 + B runtime·callbackasm1(SB) + MOVW $1458, R12 + B runtime·callbackasm1(SB) + MOVW $1459, R12 + B runtime·callbackasm1(SB) + MOVW $1460, R12 + B runtime·callbackasm1(SB) + MOVW $1461, R12 + B runtime·callbackasm1(SB) + MOVW $1462, R12 + B runtime·callbackasm1(SB) + MOVW $1463, R12 + B runtime·callbackasm1(SB) + MOVW $1464, R12 + B runtime·callbackasm1(SB) + MOVW $1465, R12 + B runtime·callbackasm1(SB) + MOVW $1466, R12 + B runtime·callbackasm1(SB) + MOVW $1467, R12 + B runtime·callbackasm1(SB) + MOVW $1468, R12 + B runtime·callbackasm1(SB) + MOVW $1469, R12 + B runtime·callbackasm1(SB) + MOVW $1470, R12 + B runtime·callbackasm1(SB) + MOVW $1471, R12 + B runtime·callbackasm1(SB) + MOVW $1472, R12 + B runtime·callbackasm1(SB) + MOVW $1473, R12 + B runtime·callbackasm1(SB) + MOVW $1474, R12 + B runtime·callbackasm1(SB) + MOVW $1475, R12 + B runtime·callbackasm1(SB) + MOVW $1476, R12 + B runtime·callbackasm1(SB) + MOVW $1477, R12 + B runtime·callbackasm1(SB) + MOVW $1478, R12 + B runtime·callbackasm1(SB) + MOVW $1479, R12 + B runtime·callbackasm1(SB) + MOVW $1480, R12 + B runtime·callbackasm1(SB) + MOVW $1481, R12 + B runtime·callbackasm1(SB) + MOVW $1482, R12 + B runtime·callbackasm1(SB) + MOVW $1483, R12 + B runtime·callbackasm1(SB) + MOVW $1484, R12 + B runtime·callbackasm1(SB) + MOVW $1485, R12 + B runtime·callbackasm1(SB) + MOVW $1486, R12 + B runtime·callbackasm1(SB) + MOVW $1487, R12 + B runtime·callbackasm1(SB) + MOVW $1488, R12 + B runtime·callbackasm1(SB) + MOVW $1489, R12 + B runtime·callbackasm1(SB) + MOVW $1490, R12 + B runtime·callbackasm1(SB) + MOVW $1491, R12 + B runtime·callbackasm1(SB) + MOVW $1492, R12 + B runtime·callbackasm1(SB) + MOVW $1493, R12 + B runtime·callbackasm1(SB) + MOVW $1494, R12 + B runtime·callbackasm1(SB) + MOVW $1495, R12 + B runtime·callbackasm1(SB) + MOVW $1496, R12 + B runtime·callbackasm1(SB) + MOVW $1497, R12 + B runtime·callbackasm1(SB) + MOVW $1498, R12 + B runtime·callbackasm1(SB) + MOVW $1499, R12 + B runtime·callbackasm1(SB) + MOVW $1500, R12 + B runtime·callbackasm1(SB) + MOVW $1501, R12 + B runtime·callbackasm1(SB) + MOVW $1502, R12 + B runtime·callbackasm1(SB) + MOVW $1503, R12 + B runtime·callbackasm1(SB) + MOVW $1504, R12 + B runtime·callbackasm1(SB) + MOVW $1505, R12 + B runtime·callbackasm1(SB) + MOVW $1506, R12 + B runtime·callbackasm1(SB) + MOVW $1507, R12 + B runtime·callbackasm1(SB) + MOVW $1508, R12 + B runtime·callbackasm1(SB) + MOVW $1509, R12 + B runtime·callbackasm1(SB) + MOVW $1510, R12 + B runtime·callbackasm1(SB) + MOVW $1511, R12 + B runtime·callbackasm1(SB) + MOVW $1512, R12 + B runtime·callbackasm1(SB) + MOVW $1513, R12 + B runtime·callbackasm1(SB) + MOVW $1514, R12 + B runtime·callbackasm1(SB) + MOVW $1515, R12 + B runtime·callbackasm1(SB) + MOVW $1516, R12 + B runtime·callbackasm1(SB) + MOVW $1517, R12 + B runtime·callbackasm1(SB) + MOVW $1518, R12 + B runtime·callbackasm1(SB) + MOVW $1519, R12 + B runtime·callbackasm1(SB) + MOVW $1520, R12 + B runtime·callbackasm1(SB) + MOVW $1521, R12 + B runtime·callbackasm1(SB) + MOVW $1522, R12 + B runtime·callbackasm1(SB) + MOVW $1523, R12 + B runtime·callbackasm1(SB) + MOVW $1524, R12 + B runtime·callbackasm1(SB) + MOVW $1525, R12 + B runtime·callbackasm1(SB) + MOVW $1526, R12 + B runtime·callbackasm1(SB) + MOVW $1527, R12 + B runtime·callbackasm1(SB) + MOVW $1528, R12 + B runtime·callbackasm1(SB) + MOVW $1529, R12 + B runtime·callbackasm1(SB) + MOVW $1530, R12 + B runtime·callbackasm1(SB) + MOVW $1531, R12 + B runtime·callbackasm1(SB) + MOVW $1532, R12 + B runtime·callbackasm1(SB) + MOVW $1533, R12 + B runtime·callbackasm1(SB) + MOVW $1534, R12 + B runtime·callbackasm1(SB) + MOVW $1535, R12 + B runtime·callbackasm1(SB) + MOVW $1536, R12 + B runtime·callbackasm1(SB) + MOVW $1537, R12 + B runtime·callbackasm1(SB) + MOVW $1538, R12 + B runtime·callbackasm1(SB) + MOVW $1539, R12 + B runtime·callbackasm1(SB) + MOVW $1540, R12 + B runtime·callbackasm1(SB) + MOVW $1541, R12 + B runtime·callbackasm1(SB) + MOVW $1542, R12 + B runtime·callbackasm1(SB) + MOVW $1543, R12 + B runtime·callbackasm1(SB) + MOVW $1544, R12 + B runtime·callbackasm1(SB) + MOVW $1545, R12 + B runtime·callbackasm1(SB) + MOVW $1546, R12 + B runtime·callbackasm1(SB) + MOVW $1547, R12 + B runtime·callbackasm1(SB) + MOVW $1548, R12 + B runtime·callbackasm1(SB) + MOVW $1549, R12 + B runtime·callbackasm1(SB) + MOVW $1550, R12 + B runtime·callbackasm1(SB) + MOVW $1551, R12 + B runtime·callbackasm1(SB) + MOVW $1552, R12 + B runtime·callbackasm1(SB) + MOVW $1553, R12 + B runtime·callbackasm1(SB) + MOVW $1554, R12 + B runtime·callbackasm1(SB) + MOVW $1555, R12 + B runtime·callbackasm1(SB) + MOVW $1556, R12 + B runtime·callbackasm1(SB) + MOVW $1557, R12 + B runtime·callbackasm1(SB) + MOVW $1558, R12 + B runtime·callbackasm1(SB) + MOVW $1559, R12 + B runtime·callbackasm1(SB) + MOVW $1560, R12 + B runtime·callbackasm1(SB) + MOVW $1561, R12 + B runtime·callbackasm1(SB) + MOVW $1562, R12 + B runtime·callbackasm1(SB) + MOVW $1563, R12 + B runtime·callbackasm1(SB) + MOVW $1564, R12 + B runtime·callbackasm1(SB) + MOVW $1565, R12 + B runtime·callbackasm1(SB) + MOVW $1566, R12 + B runtime·callbackasm1(SB) + MOVW $1567, R12 + B runtime·callbackasm1(SB) + MOVW $1568, R12 + B runtime·callbackasm1(SB) + MOVW $1569, R12 + B runtime·callbackasm1(SB) + MOVW $1570, R12 + B runtime·callbackasm1(SB) + MOVW $1571, R12 + B runtime·callbackasm1(SB) + MOVW $1572, R12 + B runtime·callbackasm1(SB) + MOVW $1573, R12 + B runtime·callbackasm1(SB) + MOVW $1574, R12 + B runtime·callbackasm1(SB) + MOVW $1575, R12 + B runtime·callbackasm1(SB) + MOVW $1576, R12 + B runtime·callbackasm1(SB) + MOVW $1577, R12 + B runtime·callbackasm1(SB) + MOVW $1578, R12 + B runtime·callbackasm1(SB) + MOVW $1579, R12 + B runtime·callbackasm1(SB) + MOVW $1580, R12 + B runtime·callbackasm1(SB) + MOVW $1581, R12 + B runtime·callbackasm1(SB) + MOVW $1582, R12 + B runtime·callbackasm1(SB) + MOVW $1583, R12 + B runtime·callbackasm1(SB) + MOVW $1584, R12 + B runtime·callbackasm1(SB) + MOVW $1585, R12 + B runtime·callbackasm1(SB) + MOVW $1586, R12 + B runtime·callbackasm1(SB) + MOVW $1587, R12 + B runtime·callbackasm1(SB) + MOVW $1588, R12 + B runtime·callbackasm1(SB) + MOVW $1589, R12 + B runtime·callbackasm1(SB) + MOVW $1590, R12 + B runtime·callbackasm1(SB) + MOVW $1591, R12 + B runtime·callbackasm1(SB) + MOVW $1592, R12 + B runtime·callbackasm1(SB) + MOVW $1593, R12 + B runtime·callbackasm1(SB) + MOVW $1594, R12 + B runtime·callbackasm1(SB) + MOVW $1595, R12 + B runtime·callbackasm1(SB) + MOVW $1596, R12 + B runtime·callbackasm1(SB) + MOVW $1597, R12 + B runtime·callbackasm1(SB) + MOVW $1598, R12 + B runtime·callbackasm1(SB) + MOVW $1599, R12 + B runtime·callbackasm1(SB) + MOVW $1600, R12 + B runtime·callbackasm1(SB) + MOVW $1601, R12 + B runtime·callbackasm1(SB) + MOVW $1602, R12 + B runtime·callbackasm1(SB) + MOVW $1603, R12 + B runtime·callbackasm1(SB) + MOVW $1604, R12 + B runtime·callbackasm1(SB) + MOVW $1605, R12 + B runtime·callbackasm1(SB) + MOVW $1606, R12 + B runtime·callbackasm1(SB) + MOVW $1607, R12 + B runtime·callbackasm1(SB) + MOVW $1608, R12 + B runtime·callbackasm1(SB) + MOVW $1609, R12 + B runtime·callbackasm1(SB) + MOVW $1610, R12 + B runtime·callbackasm1(SB) + MOVW $1611, R12 + B runtime·callbackasm1(SB) + MOVW $1612, R12 + B runtime·callbackasm1(SB) + MOVW $1613, R12 + B runtime·callbackasm1(SB) + MOVW $1614, R12 + B runtime·callbackasm1(SB) + MOVW $1615, R12 + B runtime·callbackasm1(SB) + MOVW $1616, R12 + B runtime·callbackasm1(SB) + MOVW $1617, R12 + B runtime·callbackasm1(SB) + MOVW $1618, R12 + B runtime·callbackasm1(SB) + MOVW $1619, R12 + B runtime·callbackasm1(SB) + MOVW $1620, R12 + B runtime·callbackasm1(SB) + MOVW $1621, R12 + B runtime·callbackasm1(SB) + MOVW $1622, R12 + B runtime·callbackasm1(SB) + MOVW $1623, R12 + B runtime·callbackasm1(SB) + MOVW $1624, R12 + B runtime·callbackasm1(SB) + MOVW $1625, R12 + B runtime·callbackasm1(SB) + MOVW $1626, R12 + B runtime·callbackasm1(SB) + MOVW $1627, R12 + B runtime·callbackasm1(SB) + MOVW $1628, R12 + B runtime·callbackasm1(SB) + MOVW $1629, R12 + B runtime·callbackasm1(SB) + MOVW $1630, R12 + B runtime·callbackasm1(SB) + MOVW $1631, R12 + B runtime·callbackasm1(SB) + MOVW $1632, R12 + B runtime·callbackasm1(SB) + MOVW $1633, R12 + B runtime·callbackasm1(SB) + MOVW $1634, R12 + B runtime·callbackasm1(SB) + MOVW $1635, R12 + B runtime·callbackasm1(SB) + MOVW $1636, R12 + B runtime·callbackasm1(SB) + MOVW $1637, R12 + B runtime·callbackasm1(SB) + MOVW $1638, R12 + B runtime·callbackasm1(SB) + MOVW $1639, R12 + B runtime·callbackasm1(SB) + MOVW $1640, R12 + B runtime·callbackasm1(SB) + MOVW $1641, R12 + B runtime·callbackasm1(SB) + MOVW $1642, R12 + B runtime·callbackasm1(SB) + MOVW $1643, R12 + B runtime·callbackasm1(SB) + MOVW $1644, R12 + B runtime·callbackasm1(SB) + MOVW $1645, R12 + B runtime·callbackasm1(SB) + MOVW $1646, R12 + B runtime·callbackasm1(SB) + MOVW $1647, R12 + B runtime·callbackasm1(SB) + MOVW $1648, R12 + B runtime·callbackasm1(SB) + MOVW $1649, R12 + B runtime·callbackasm1(SB) + MOVW $1650, R12 + B runtime·callbackasm1(SB) + MOVW $1651, R12 + B runtime·callbackasm1(SB) + MOVW $1652, R12 + B runtime·callbackasm1(SB) + MOVW $1653, R12 + B runtime·callbackasm1(SB) + MOVW $1654, R12 + B runtime·callbackasm1(SB) + MOVW $1655, R12 + B runtime·callbackasm1(SB) + MOVW $1656, R12 + B runtime·callbackasm1(SB) + MOVW $1657, R12 + B runtime·callbackasm1(SB) + MOVW $1658, R12 + B runtime·callbackasm1(SB) + MOVW $1659, R12 + B runtime·callbackasm1(SB) + MOVW $1660, R12 + B runtime·callbackasm1(SB) + MOVW $1661, R12 + B runtime·callbackasm1(SB) + MOVW $1662, R12 + B runtime·callbackasm1(SB) + MOVW $1663, R12 + B runtime·callbackasm1(SB) + MOVW $1664, R12 + B runtime·callbackasm1(SB) + MOVW $1665, R12 + B runtime·callbackasm1(SB) + MOVW $1666, R12 + B runtime·callbackasm1(SB) + MOVW $1667, R12 + B runtime·callbackasm1(SB) + MOVW $1668, R12 + B runtime·callbackasm1(SB) + MOVW $1669, R12 + B runtime·callbackasm1(SB) + MOVW $1670, R12 + B runtime·callbackasm1(SB) + MOVW $1671, R12 + B runtime·callbackasm1(SB) + MOVW $1672, R12 + B runtime·callbackasm1(SB) + MOVW $1673, R12 + B runtime·callbackasm1(SB) + MOVW $1674, R12 + B runtime·callbackasm1(SB) + MOVW $1675, R12 + B runtime·callbackasm1(SB) + MOVW $1676, R12 + B runtime·callbackasm1(SB) + MOVW $1677, R12 + B runtime·callbackasm1(SB) + MOVW $1678, R12 + B runtime·callbackasm1(SB) + MOVW $1679, R12 + B runtime·callbackasm1(SB) + MOVW $1680, R12 + B runtime·callbackasm1(SB) + MOVW $1681, R12 + B runtime·callbackasm1(SB) + MOVW $1682, R12 + B runtime·callbackasm1(SB) + MOVW $1683, R12 + B runtime·callbackasm1(SB) + MOVW $1684, R12 + B runtime·callbackasm1(SB) + MOVW $1685, R12 + B runtime·callbackasm1(SB) + MOVW $1686, R12 + B runtime·callbackasm1(SB) + MOVW $1687, R12 + B runtime·callbackasm1(SB) + MOVW $1688, R12 + B runtime·callbackasm1(SB) + MOVW $1689, R12 + B runtime·callbackasm1(SB) + MOVW $1690, R12 + B runtime·callbackasm1(SB) + MOVW $1691, R12 + B runtime·callbackasm1(SB) + MOVW $1692, R12 + B runtime·callbackasm1(SB) + MOVW $1693, R12 + B runtime·callbackasm1(SB) + MOVW $1694, R12 + B runtime·callbackasm1(SB) + MOVW $1695, R12 + B runtime·callbackasm1(SB) + MOVW $1696, R12 + B runtime·callbackasm1(SB) + MOVW $1697, R12 + B runtime·callbackasm1(SB) + MOVW $1698, R12 + B runtime·callbackasm1(SB) + MOVW $1699, R12 + B runtime·callbackasm1(SB) + MOVW $1700, R12 + B runtime·callbackasm1(SB) + MOVW $1701, R12 + B runtime·callbackasm1(SB) + MOVW $1702, R12 + B runtime·callbackasm1(SB) + MOVW $1703, R12 + B runtime·callbackasm1(SB) + MOVW $1704, R12 + B runtime·callbackasm1(SB) + MOVW $1705, R12 + B runtime·callbackasm1(SB) + MOVW $1706, R12 + B runtime·callbackasm1(SB) + MOVW $1707, R12 + B runtime·callbackasm1(SB) + MOVW $1708, R12 + B runtime·callbackasm1(SB) + MOVW $1709, R12 + B runtime·callbackasm1(SB) + MOVW $1710, R12 + B runtime·callbackasm1(SB) + MOVW $1711, R12 + B runtime·callbackasm1(SB) + MOVW $1712, R12 + B runtime·callbackasm1(SB) + MOVW $1713, R12 + B runtime·callbackasm1(SB) + MOVW $1714, R12 + B runtime·callbackasm1(SB) + MOVW $1715, R12 + B runtime·callbackasm1(SB) + MOVW $1716, R12 + B runtime·callbackasm1(SB) + MOVW $1717, R12 + B runtime·callbackasm1(SB) + MOVW $1718, R12 + B runtime·callbackasm1(SB) + MOVW $1719, R12 + B runtime·callbackasm1(SB) + MOVW $1720, R12 + B runtime·callbackasm1(SB) + MOVW $1721, R12 + B runtime·callbackasm1(SB) + MOVW $1722, R12 + B runtime·callbackasm1(SB) + MOVW $1723, R12 + B runtime·callbackasm1(SB) + MOVW $1724, R12 + B runtime·callbackasm1(SB) + MOVW $1725, R12 + B runtime·callbackasm1(SB) + MOVW $1726, R12 + B runtime·callbackasm1(SB) + MOVW $1727, R12 + B runtime·callbackasm1(SB) + MOVW $1728, R12 + B runtime·callbackasm1(SB) + MOVW $1729, R12 + B runtime·callbackasm1(SB) + MOVW $1730, R12 + B runtime·callbackasm1(SB) + MOVW $1731, R12 + B runtime·callbackasm1(SB) + MOVW $1732, R12 + B runtime·callbackasm1(SB) + MOVW $1733, R12 + B runtime·callbackasm1(SB) + MOVW $1734, R12 + B runtime·callbackasm1(SB) + MOVW $1735, R12 + B runtime·callbackasm1(SB) + MOVW $1736, R12 + B runtime·callbackasm1(SB) + MOVW $1737, R12 + B runtime·callbackasm1(SB) + MOVW $1738, R12 + B runtime·callbackasm1(SB) + MOVW $1739, R12 + B runtime·callbackasm1(SB) + MOVW $1740, R12 + B runtime·callbackasm1(SB) + MOVW $1741, R12 + B runtime·callbackasm1(SB) + MOVW $1742, R12 + B runtime·callbackasm1(SB) + MOVW $1743, R12 + B runtime·callbackasm1(SB) + MOVW $1744, R12 + B runtime·callbackasm1(SB) + MOVW $1745, R12 + B runtime·callbackasm1(SB) + MOVW $1746, R12 + B runtime·callbackasm1(SB) + MOVW $1747, R12 + B runtime·callbackasm1(SB) + MOVW $1748, R12 + B runtime·callbackasm1(SB) + MOVW $1749, R12 + B runtime·callbackasm1(SB) + MOVW $1750, R12 + B runtime·callbackasm1(SB) + MOVW $1751, R12 + B runtime·callbackasm1(SB) + MOVW $1752, R12 + B runtime·callbackasm1(SB) + MOVW $1753, R12 + B runtime·callbackasm1(SB) + MOVW $1754, R12 + B runtime·callbackasm1(SB) + MOVW $1755, R12 + B runtime·callbackasm1(SB) + MOVW $1756, R12 + B runtime·callbackasm1(SB) + MOVW $1757, R12 + B runtime·callbackasm1(SB) + MOVW $1758, R12 + B runtime·callbackasm1(SB) + MOVW $1759, R12 + B runtime·callbackasm1(SB) + MOVW $1760, R12 + B runtime·callbackasm1(SB) + MOVW $1761, R12 + B runtime·callbackasm1(SB) + MOVW $1762, R12 + B runtime·callbackasm1(SB) + MOVW $1763, R12 + B runtime·callbackasm1(SB) + MOVW $1764, R12 + B runtime·callbackasm1(SB) + MOVW $1765, R12 + B runtime·callbackasm1(SB) + MOVW $1766, R12 + B runtime·callbackasm1(SB) + MOVW $1767, R12 + B runtime·callbackasm1(SB) + MOVW $1768, R12 + B runtime·callbackasm1(SB) + MOVW $1769, R12 + B runtime·callbackasm1(SB) + MOVW $1770, R12 + B runtime·callbackasm1(SB) + MOVW $1771, R12 + B runtime·callbackasm1(SB) + MOVW $1772, R12 + B runtime·callbackasm1(SB) + MOVW $1773, R12 + B runtime·callbackasm1(SB) + MOVW $1774, R12 + B runtime·callbackasm1(SB) + MOVW $1775, R12 + B runtime·callbackasm1(SB) + MOVW $1776, R12 + B runtime·callbackasm1(SB) + MOVW $1777, R12 + B runtime·callbackasm1(SB) + MOVW $1778, R12 + B runtime·callbackasm1(SB) + MOVW $1779, R12 + B runtime·callbackasm1(SB) + MOVW $1780, R12 + B runtime·callbackasm1(SB) + MOVW $1781, R12 + B runtime·callbackasm1(SB) + MOVW $1782, R12 + B runtime·callbackasm1(SB) + MOVW $1783, R12 + B runtime·callbackasm1(SB) + MOVW $1784, R12 + B runtime·callbackasm1(SB) + MOVW $1785, R12 + B runtime·callbackasm1(SB) + MOVW $1786, R12 + B runtime·callbackasm1(SB) + MOVW $1787, R12 + B runtime·callbackasm1(SB) + MOVW $1788, R12 + B runtime·callbackasm1(SB) + MOVW $1789, R12 + B runtime·callbackasm1(SB) + MOVW $1790, R12 + B runtime·callbackasm1(SB) + MOVW $1791, R12 + B runtime·callbackasm1(SB) + MOVW $1792, R12 + B runtime·callbackasm1(SB) + MOVW $1793, R12 + B runtime·callbackasm1(SB) + MOVW $1794, R12 + B runtime·callbackasm1(SB) + MOVW $1795, R12 + B runtime·callbackasm1(SB) + MOVW $1796, R12 + B runtime·callbackasm1(SB) + MOVW $1797, R12 + B runtime·callbackasm1(SB) + MOVW $1798, R12 + B runtime·callbackasm1(SB) + MOVW $1799, R12 + B runtime·callbackasm1(SB) + MOVW $1800, R12 + B runtime·callbackasm1(SB) + MOVW $1801, R12 + B runtime·callbackasm1(SB) + MOVW $1802, R12 + B runtime·callbackasm1(SB) + MOVW $1803, R12 + B runtime·callbackasm1(SB) + MOVW $1804, R12 + B runtime·callbackasm1(SB) + MOVW $1805, R12 + B runtime·callbackasm1(SB) + MOVW $1806, R12 + B runtime·callbackasm1(SB) + MOVW $1807, R12 + B runtime·callbackasm1(SB) + MOVW $1808, R12 + B runtime·callbackasm1(SB) + MOVW $1809, R12 + B runtime·callbackasm1(SB) + MOVW $1810, R12 + B runtime·callbackasm1(SB) + MOVW $1811, R12 + B runtime·callbackasm1(SB) + MOVW $1812, R12 + B runtime·callbackasm1(SB) + MOVW $1813, R12 + B runtime·callbackasm1(SB) + MOVW $1814, R12 + B runtime·callbackasm1(SB) + MOVW $1815, R12 + B runtime·callbackasm1(SB) + MOVW $1816, R12 + B runtime·callbackasm1(SB) + MOVW $1817, R12 + B runtime·callbackasm1(SB) + MOVW $1818, R12 + B runtime·callbackasm1(SB) + MOVW $1819, R12 + B runtime·callbackasm1(SB) + MOVW $1820, R12 + B runtime·callbackasm1(SB) + MOVW $1821, R12 + B runtime·callbackasm1(SB) + MOVW $1822, R12 + B runtime·callbackasm1(SB) + MOVW $1823, R12 + B runtime·callbackasm1(SB) + MOVW $1824, R12 + B runtime·callbackasm1(SB) + MOVW $1825, R12 + B runtime·callbackasm1(SB) + MOVW $1826, R12 + B runtime·callbackasm1(SB) + MOVW $1827, R12 + B runtime·callbackasm1(SB) + MOVW $1828, R12 + B runtime·callbackasm1(SB) + MOVW $1829, R12 + B runtime·callbackasm1(SB) + MOVW $1830, R12 + B runtime·callbackasm1(SB) + MOVW $1831, R12 + B runtime·callbackasm1(SB) + MOVW $1832, R12 + B runtime·callbackasm1(SB) + MOVW $1833, R12 + B runtime·callbackasm1(SB) + MOVW $1834, R12 + B runtime·callbackasm1(SB) + MOVW $1835, R12 + B runtime·callbackasm1(SB) + MOVW $1836, R12 + B runtime·callbackasm1(SB) + MOVW $1837, R12 + B runtime·callbackasm1(SB) + MOVW $1838, R12 + B runtime·callbackasm1(SB) + MOVW $1839, R12 + B runtime·callbackasm1(SB) + MOVW $1840, R12 + B runtime·callbackasm1(SB) + MOVW $1841, R12 + B runtime·callbackasm1(SB) + MOVW $1842, R12 + B runtime·callbackasm1(SB) + MOVW $1843, R12 + B runtime·callbackasm1(SB) + MOVW $1844, R12 + B runtime·callbackasm1(SB) + MOVW $1845, R12 + B runtime·callbackasm1(SB) + MOVW $1846, R12 + B runtime·callbackasm1(SB) + MOVW $1847, R12 + B runtime·callbackasm1(SB) + MOVW $1848, R12 + B runtime·callbackasm1(SB) + MOVW $1849, R12 + B runtime·callbackasm1(SB) + MOVW $1850, R12 + B runtime·callbackasm1(SB) + MOVW $1851, R12 + B runtime·callbackasm1(SB) + MOVW $1852, R12 + B runtime·callbackasm1(SB) + MOVW $1853, R12 + B runtime·callbackasm1(SB) + MOVW $1854, R12 + B runtime·callbackasm1(SB) + MOVW $1855, R12 + B runtime·callbackasm1(SB) + MOVW $1856, R12 + B runtime·callbackasm1(SB) + MOVW $1857, R12 + B runtime·callbackasm1(SB) + MOVW $1858, R12 + B runtime·callbackasm1(SB) + MOVW $1859, R12 + B runtime·callbackasm1(SB) + MOVW $1860, R12 + B runtime·callbackasm1(SB) + MOVW $1861, R12 + B runtime·callbackasm1(SB) + MOVW $1862, R12 + B runtime·callbackasm1(SB) + MOVW $1863, R12 + B runtime·callbackasm1(SB) + MOVW $1864, R12 + B runtime·callbackasm1(SB) + MOVW $1865, R12 + B runtime·callbackasm1(SB) + MOVW $1866, R12 + B runtime·callbackasm1(SB) + MOVW $1867, R12 + B runtime·callbackasm1(SB) + MOVW $1868, R12 + B runtime·callbackasm1(SB) + MOVW $1869, R12 + B runtime·callbackasm1(SB) + MOVW $1870, R12 + B runtime·callbackasm1(SB) + MOVW $1871, R12 + B runtime·callbackasm1(SB) + MOVW $1872, R12 + B runtime·callbackasm1(SB) + MOVW $1873, R12 + B runtime·callbackasm1(SB) + MOVW $1874, R12 + B runtime·callbackasm1(SB) + MOVW $1875, R12 + B runtime·callbackasm1(SB) + MOVW $1876, R12 + B runtime·callbackasm1(SB) + MOVW $1877, R12 + B runtime·callbackasm1(SB) + MOVW $1878, R12 + B runtime·callbackasm1(SB) + MOVW $1879, R12 + B runtime·callbackasm1(SB) + MOVW $1880, R12 + B runtime·callbackasm1(SB) + MOVW $1881, R12 + B runtime·callbackasm1(SB) + MOVW $1882, R12 + B runtime·callbackasm1(SB) + MOVW $1883, R12 + B runtime·callbackasm1(SB) + MOVW $1884, R12 + B runtime·callbackasm1(SB) + MOVW $1885, R12 + B runtime·callbackasm1(SB) + MOVW $1886, R12 + B runtime·callbackasm1(SB) + MOVW $1887, R12 + B runtime·callbackasm1(SB) + MOVW $1888, R12 + B runtime·callbackasm1(SB) + MOVW $1889, R12 + B runtime·callbackasm1(SB) + MOVW $1890, R12 + B runtime·callbackasm1(SB) + MOVW $1891, R12 + B runtime·callbackasm1(SB) + MOVW $1892, R12 + B runtime·callbackasm1(SB) + MOVW $1893, R12 + B runtime·callbackasm1(SB) + MOVW $1894, R12 + B runtime·callbackasm1(SB) + MOVW $1895, R12 + B runtime·callbackasm1(SB) + MOVW $1896, R12 + B runtime·callbackasm1(SB) + MOVW $1897, R12 + B runtime·callbackasm1(SB) + MOVW $1898, R12 + B runtime·callbackasm1(SB) + MOVW $1899, R12 + B runtime·callbackasm1(SB) + MOVW $1900, R12 + B runtime·callbackasm1(SB) + MOVW $1901, R12 + B runtime·callbackasm1(SB) + MOVW $1902, R12 + B runtime·callbackasm1(SB) + MOVW $1903, R12 + B runtime·callbackasm1(SB) + MOVW $1904, R12 + B runtime·callbackasm1(SB) + MOVW $1905, R12 + B runtime·callbackasm1(SB) + MOVW $1906, R12 + B runtime·callbackasm1(SB) + MOVW $1907, R12 + B runtime·callbackasm1(SB) + MOVW $1908, R12 + B runtime·callbackasm1(SB) + MOVW $1909, R12 + B runtime·callbackasm1(SB) + MOVW $1910, R12 + B runtime·callbackasm1(SB) + MOVW $1911, R12 + B runtime·callbackasm1(SB) + MOVW $1912, R12 + B runtime·callbackasm1(SB) + MOVW $1913, R12 + B runtime·callbackasm1(SB) + MOVW $1914, R12 + B runtime·callbackasm1(SB) + MOVW $1915, R12 + B runtime·callbackasm1(SB) + MOVW $1916, R12 + B runtime·callbackasm1(SB) + MOVW $1917, R12 + B runtime·callbackasm1(SB) + MOVW $1918, R12 + B runtime·callbackasm1(SB) + MOVW $1919, R12 + B runtime·callbackasm1(SB) + MOVW $1920, R12 + B runtime·callbackasm1(SB) + MOVW $1921, R12 + B runtime·callbackasm1(SB) + MOVW $1922, R12 + B runtime·callbackasm1(SB) + MOVW $1923, R12 + B runtime·callbackasm1(SB) + MOVW $1924, R12 + B runtime·callbackasm1(SB) + MOVW $1925, R12 + B runtime·callbackasm1(SB) + MOVW $1926, R12 + B runtime·callbackasm1(SB) + MOVW $1927, R12 + B runtime·callbackasm1(SB) + MOVW $1928, R12 + B runtime·callbackasm1(SB) + MOVW $1929, R12 + B runtime·callbackasm1(SB) + MOVW $1930, R12 + B runtime·callbackasm1(SB) + MOVW $1931, R12 + B runtime·callbackasm1(SB) + MOVW $1932, R12 + B runtime·callbackasm1(SB) + MOVW $1933, R12 + B runtime·callbackasm1(SB) + MOVW $1934, R12 + B runtime·callbackasm1(SB) + MOVW $1935, R12 + B runtime·callbackasm1(SB) + MOVW $1936, R12 + B runtime·callbackasm1(SB) + MOVW $1937, R12 + B runtime·callbackasm1(SB) + MOVW $1938, R12 + B runtime·callbackasm1(SB) + MOVW $1939, R12 + B runtime·callbackasm1(SB) + MOVW $1940, R12 + B runtime·callbackasm1(SB) + MOVW $1941, R12 + B runtime·callbackasm1(SB) + MOVW $1942, R12 + B runtime·callbackasm1(SB) + MOVW $1943, R12 + B runtime·callbackasm1(SB) + MOVW $1944, R12 + B runtime·callbackasm1(SB) + MOVW $1945, R12 + B runtime·callbackasm1(SB) + MOVW $1946, R12 + B runtime·callbackasm1(SB) + MOVW $1947, R12 + B runtime·callbackasm1(SB) + MOVW $1948, R12 + B runtime·callbackasm1(SB) + MOVW $1949, R12 + B runtime·callbackasm1(SB) + MOVW $1950, R12 + B runtime·callbackasm1(SB) + MOVW $1951, R12 + B runtime·callbackasm1(SB) + MOVW $1952, R12 + B runtime·callbackasm1(SB) + MOVW $1953, R12 + B runtime·callbackasm1(SB) + MOVW $1954, R12 + B runtime·callbackasm1(SB) + MOVW $1955, R12 + B runtime·callbackasm1(SB) + MOVW $1956, R12 + B runtime·callbackasm1(SB) + MOVW $1957, R12 + B runtime·callbackasm1(SB) + MOVW $1958, R12 + B runtime·callbackasm1(SB) + MOVW $1959, R12 + B runtime·callbackasm1(SB) + MOVW $1960, R12 + B runtime·callbackasm1(SB) + MOVW $1961, R12 + B runtime·callbackasm1(SB) + MOVW $1962, R12 + B runtime·callbackasm1(SB) + MOVW $1963, R12 + B runtime·callbackasm1(SB) + MOVW $1964, R12 + B runtime·callbackasm1(SB) + MOVW $1965, R12 + B runtime·callbackasm1(SB) + MOVW $1966, R12 + B runtime·callbackasm1(SB) + MOVW $1967, R12 + B runtime·callbackasm1(SB) + MOVW $1968, R12 + B runtime·callbackasm1(SB) + MOVW $1969, R12 + B runtime·callbackasm1(SB) + MOVW $1970, R12 + B runtime·callbackasm1(SB) + MOVW $1971, R12 + B runtime·callbackasm1(SB) + MOVW $1972, R12 + B runtime·callbackasm1(SB) + MOVW $1973, R12 + B runtime·callbackasm1(SB) + MOVW $1974, R12 + B runtime·callbackasm1(SB) + MOVW $1975, R12 + B runtime·callbackasm1(SB) + MOVW $1976, R12 + B runtime·callbackasm1(SB) + MOVW $1977, R12 + B runtime·callbackasm1(SB) + MOVW $1978, R12 + B runtime·callbackasm1(SB) + MOVW $1979, R12 + B runtime·callbackasm1(SB) + MOVW $1980, R12 + B runtime·callbackasm1(SB) + MOVW $1981, R12 + B runtime·callbackasm1(SB) + MOVW $1982, R12 + B runtime·callbackasm1(SB) + MOVW $1983, R12 + B runtime·callbackasm1(SB) + MOVW $1984, R12 + B runtime·callbackasm1(SB) + MOVW $1985, R12 + B runtime·callbackasm1(SB) + MOVW $1986, R12 + B runtime·callbackasm1(SB) + MOVW $1987, R12 + B runtime·callbackasm1(SB) + MOVW $1988, R12 + B runtime·callbackasm1(SB) + MOVW $1989, R12 + B runtime·callbackasm1(SB) + MOVW $1990, R12 + B runtime·callbackasm1(SB) + MOVW $1991, R12 + B runtime·callbackasm1(SB) + MOVW $1992, R12 + B runtime·callbackasm1(SB) + MOVW $1993, R12 + B runtime·callbackasm1(SB) + MOVW $1994, R12 + B runtime·callbackasm1(SB) + MOVW $1995, R12 + B runtime·callbackasm1(SB) + MOVW $1996, R12 + B runtime·callbackasm1(SB) + MOVW $1997, R12 + B runtime·callbackasm1(SB) + MOVW $1998, R12 + B runtime·callbackasm1(SB) + MOVW $1999, R12 + B runtime·callbackasm1(SB) |
