diff options
| author | Brad Fitzpatrick <bradfitz@golang.org> | 2016-03-01 23:21:55 +0000 |
|---|---|---|
| committer | Brad Fitzpatrick <bradfitz@golang.org> | 2016-03-02 00:13:47 +0000 |
| commit | 5fea2ccc77eb50a9704fa04b7c61755fe34e1d95 (patch) | |
| tree | 00137f90183ae2a01ca42249e04e9e4dabdf6249 /src/runtime | |
| parent | 8b4deb448e587802f67930b765c9598fc8cd36e5 (diff) | |
| download | go-5fea2ccc77eb50a9704fa04b7c61755fe34e1d95.tar.xz | |
all: single space after period.
The tree's pretty inconsistent about single space vs double space
after a period in documentation. Make it consistently a single space,
per earlier decisions. This means contributors won't be confused by
misleading precedence.
This CL doesn't use go/doc to parse. It only addresses // comments.
It was generated with:
$ perl -i -npe 's,^(\s*// .+[a-z]\.) +([A-Z]),$1 $2,' $(git grep -l -E '^\s*//(.+\.) +([A-Z])')
$ go test go/doc -update
Change-Id: Iccdb99c37c797ef1f804a94b22ba5ee4b500c4f7
Reviewed-on: https://go-review.googlesource.com/20022
Reviewed-by: Rob Pike <r@golang.org>
Reviewed-by: Dave Day <djd@golang.org>
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src/runtime')
112 files changed, 358 insertions, 358 deletions
diff --git a/src/runtime/alg.go b/src/runtime/alg.go index e507e71715..7aacc8cf9b 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -64,7 +64,7 @@ func memhash128(p unsafe.Pointer, h uintptr) uintptr { } // memhash_varlen is defined in assembly because it needs access -// to the closure. It appears here to provide an argument +// to the closure. It appears here to provide an argument // signature for the assembly routine. func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index 9237d57f24..2d16f4940a 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -226,7 +226,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-4 // func mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). -// Fn must never return. It should gogo(&g->sched) +// Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $0-4 MOVL fn+0(FP), DI @@ -259,7 +259,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4 RET // systemstack_switch is a dummy routine that systemstack leaves at the bottom -// of the G stack. We need to distinguish the routine that +// of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). @@ -291,7 +291,7 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-4 CALL AX switch: - // save our state in g->sched. Pretend to + // save our state in g->sched. Pretend to // be systemstack_switch if the G stack is scanned. MOVL $runtime·systemstack_switch(SB), (g_sched+gobuf_pc)(AX) MOVL SP, (g_sched+gobuf_sp)(AX) @@ -900,7 +900,7 @@ final1: RET endofpage: - // address ends in 1111xxxx. Might be up against + // address ends in 1111xxxx. Might be up against // a page boundary, so load ending at last byte. // Then shift bytes down using pshufb. MOVOU -32(AX)(BX*1), X1 @@ -1145,7 +1145,7 @@ DATA masks<>+0xfc(SB)/4, $0x00ffffff GLOBL masks<>(SB),RODATA,$256 -// these are arguments to pshufb. They move data down from +// these are arguments to pshufb. They move data down from // the high bytes of the register to the low bytes of the register. // index is how many bytes to move. DATA shifts<>+0x00(SB)/4, $0x00000000 @@ -1370,7 +1370,7 @@ small: MOVL (SI), SI JMP si_finish si_high: - // address ends in 111111xx. Load up to bytes we want, move to correct position. + // address ends in 111111xx. Load up to bytes we want, move to correct position. MOVL -4(SI)(BX*1), SI SHRL CX, SI si_finish: @@ -1606,7 +1606,7 @@ TEXT runtime·prefetcht2(SB),NOSPLIT,$0-4 TEXT runtime·prefetchnta(SB),NOSPLIT,$0-4 RET -// Add a module's moduledata to the linked list of moduledata objects. This +// Add a module's moduledata to the linked list of moduledata objects. This // is called from .init_array by a function generated in the linker and so // follows the platform ABI wrt register preservation -- it only touches AX, // CX (implicitly) and DX, but it does not follow the ABI wrt arguments: diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index ac4630c833..b4df1d80d7 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -203,7 +203,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-8 // func mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). -// Fn must never return. It should gogo(&g->sched) +// Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $0-8 MOVQ fn+0(FP), DI @@ -237,7 +237,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8 RET // systemstack_switch is a dummy routine that systemstack leaves at the bottom -// of the G stack. We need to distinguish the routine that +// of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). @@ -268,7 +268,7 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8 CALL AX switch: - // save our state in g->sched. Pretend to + // save our state in g->sched. Pretend to // be systemstack_switch if the G stack is scanned. MOVQ $runtime·systemstack_switch(SB), SI MOVQ SI, (g_sched+gobuf_pc)(AX) @@ -716,7 +716,7 @@ havem: CALL runtime·cgocallbackg(SB) MOVQ 0(SP), R8 - // Compute the size of the frame again. FP and SP have + // Compute the size of the frame again. FP and SP have // completely different values here than they did above, // but only their difference matters. LEAQ fv+0(FP), AX @@ -909,7 +909,7 @@ final1: RET endofpage: - // address ends in 1111xxxx. Might be up against + // address ends in 1111xxxx. Might be up against // a page boundary, so load ending at last byte. // Then shift bytes down using pshufb. MOVOU -32(AX)(CX*1), X1 @@ -1232,7 +1232,7 @@ TEXT ·checkASM(SB),NOSPLIT,$0-1 SETEQ ret+0(FP) RET -// these are arguments to pshufb. They move data down from +// these are arguments to pshufb. They move data down from // the high bytes of the register to the low bytes of the register. // index is how many bytes to move. DATA shifts<>+0x00(SB)/8, $0x0000000000000000 @@ -1412,7 +1412,7 @@ small: MOVQ (SI), SI JMP si_finish si_high: - // address ends in 11111xxx. Load up to bytes we want, move to correct position. + // address ends in 11111xxx. Load up to bytes we want, move to correct position. MOVQ -8(SI)(BX*1), SI SHRQ CX, SI si_finish: @@ -1872,7 +1872,7 @@ sseloopentry: CMPQ DI, AX JB sseloop - // Search the last 16-byte chunk. This chunk may overlap with the + // Search the last 16-byte chunk. This chunk may overlap with the // chunks we've already searched, but that's ok. MOVQ AX, DI MOVOU (AX), X1 diff --git a/src/runtime/asm_amd64p32.s b/src/runtime/asm_amd64p32.s index ae7a53821b..452ce04143 100644 --- a/src/runtime/asm_amd64p32.s +++ b/src/runtime/asm_amd64p32.s @@ -133,7 +133,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $0-4 // func mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). -// Fn must never return. It should gogo(&g->sched) +// Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $0-4 MOVL fn+0(FP), DI @@ -166,7 +166,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4 RET // systemstack_switch is a dummy routine that systemstack leaves at the bottom -// of the G stack. We need to distinguish the routine that +// of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). @@ -198,7 +198,7 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-4 CALL AX switch: - // save our state in g->sched. Pretend to + // save our state in g->sched. Pretend to // be systemstack_switch if the G stack is scanned. MOVL $runtime·systemstack_switch(SB), SI MOVL SI, (g_sched+gobuf_pc)(AX) @@ -491,7 +491,7 @@ TEXT runtime·memclr(SB),NOSPLIT,$0-8 REP STOSB // Note: we zero only 4 bytes at a time so that the tail is at most - // 3 bytes. That guarantees that we aren't zeroing pointers with STOSB. + // 3 bytes. That guarantees that we aren't zeroing pointers with STOSB. // See issue 13160. RET @@ -692,7 +692,7 @@ small: MOVQ (SI), SI JMP si_finish si_high: - // address ends in 11111xxx. Load up to bytes we want, move to correct position. + // address ends in 11111xxx. Load up to bytes we want, move to correct position. MOVQ BX, DX ADDQ SI, DX MOVQ -8(DX), SI diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index 2fdfbea0e1..46f8474f54 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -153,7 +153,7 @@ TEXT runtime·gogo(SB),NOSPLIT,$-4-4 // func mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). -// Fn must never return. It should gogo(&g->sched) +// Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB),NOSPLIT,$-4-4 // Save caller state in g->sched. @@ -185,7 +185,7 @@ TEXT runtime·mcall(SB),NOSPLIT,$-4-4 RET // systemstack_switch is a dummy routine that systemstack leaves at the bottom -// of the G stack. We need to distinguish the routine that +// of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). @@ -217,7 +217,7 @@ TEXT runtime·systemstack(SB),NOSPLIT,$0-4 BL (R0) switch: - // save our state in g->sched. Pretend to + // save our state in g->sched. Pretend to // be systemstack_switch if the G stack is scanned. MOVW $runtime·systemstack_switch(SB), R3 #ifdef GOOS_nacl @@ -973,7 +973,7 @@ yieldloop: // Called from cgo wrappers, this function returns g->m->curg.stack.hi. // Must obey the gcc calling convention. TEXT _cgo_topofstack(SB),NOSPLIT,$8 - // R11 and g register are clobbered by load_g. They are + // R11 and g register are clobbered by load_g. They are // callee-save in the gcc calling convention, so save them here. MOVW R11, saveR11-4(SP) MOVW g, saveG-8(SP) diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index 5a5c64c270..e06aa11a5d 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -149,7 +149,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $-8-8 // void mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). -// Fn must never return. It should gogo(&g->sched) +// Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $-8-8 // Save caller state in g->sched @@ -178,7 +178,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $-8-8 B runtime·badmcall2(SB) // systemstack_switch is a dummy routine that systemstack leaves at the bottom -// of the G stack. We need to distinguish the routine that +// of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). @@ -211,7 +211,7 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8 BL (R3) switch: - // save our state in g->sched. Pretend to + // save our state in g->sched. Pretend to // be systemstack_switch if the G stack is scanned. MOVD $runtime·systemstack_switch(SB), R6 ADD $8, R6 // get past prologue @@ -542,7 +542,7 @@ g0: BL (R1) MOVD R0, R9 - // Restore g, stack pointer. R0 is errno, so don't touch it + // Restore g, stack pointer. R0 is errno, so don't touch it MOVD 0(RSP), g BL runtime·save_g(SB) MOVD (g_stack+stack_hi)(g), R5 diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s index 80cea8587a..1ffa3ae348 100644 --- a/src/runtime/asm_mips64x.s +++ b/src/runtime/asm_mips64x.s @@ -130,7 +130,7 @@ TEXT runtime·gogo(SB), NOSPLIT, $-8-8 // void mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). -// Fn must never return. It should gogo(&g->sched) +// Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB), NOSPLIT, $-8-8 // Save caller state in g->sched @@ -156,7 +156,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $-8-8 JMP runtime·badmcall2(SB) // systemstack_switch is a dummy routine that systemstack leaves at the bottom -// of the G stack. We need to distinguish the routine that +// of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). @@ -186,7 +186,7 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8 JAL (R4) switch: - // save our state in g->sched. Pretend to + // save our state in g->sched. Pretend to // be systemstack_switch if the G stack is scanned. MOVV $runtime·systemstack_switch(SB), R4 ADDV $8, R4 // get past prologue diff --git a/src/runtime/asm_ppc64x.h b/src/runtime/asm_ppc64x.h index d6ae67fab9..5e55055fdb 100644 --- a/src/runtime/asm_ppc64x.h +++ b/src/runtime/asm_ppc64x.h @@ -14,7 +14,7 @@ // +---------------------+ <- R1 // // So a function that sets up a stack frame at all uses as least FIXED_FRAME -// bytes of stack. This mostly affects assembly that calls other functions +// bytes of stack. This mostly affects assembly that calls other functions // with arguments (the arguments should be stored at FIXED_FRAME+0(R1), // FIXED_FRAME+8(R1) etc) and some other low-level places. // diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index f067b4a9b9..59bc8a22dd 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -155,7 +155,7 @@ TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 // void mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). -// Fn must never return. It should gogo(&g->sched) +// Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 // Save caller state in g->sched @@ -187,7 +187,7 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 BR runtime·badmcall2(SB) // systemstack_switch is a dummy routine that systemstack leaves at the bottom -// of the G stack. We need to distinguish the routine that +// of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). @@ -226,7 +226,7 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8 BL (CTR) switch: - // save our state in g->sched. Pretend to + // save our state in g->sched. Pretend to // be systemstack_switch if the G stack is scanned. MOVD $runtime·systemstack_switch(SB), R6 ADD $16, R6 // get past prologue (including r2-setting instructions when they're there) diff --git a/src/runtime/cgo/callbacks.go b/src/runtime/cgo/callbacks.go index eea0371c87..47bd2b0edc 100644 --- a/src/runtime/cgo/callbacks.go +++ b/src/runtime/cgo/callbacks.go @@ -22,7 +22,7 @@ func _runtime_cgocallback(unsafe.Pointer, unsafe.Pointer, uintptr) //go:cgo_export_static crosscall2 //go:cgo_export_dynamic crosscall2 -// Panic. The argument is converted into a Go string. +// Panic. The argument is converted into a Go string. // Call like this in code compiled with gcc: // struct { const char *p; } a; @@ -69,7 +69,7 @@ var _cgo_thread_start = &x_cgo_thread_start // Creates a new system thread without updating any Go state. // // This method is invoked during shared library loading to create a new OS -// thread to perform the runtime initialization. This method is similar to +// thread to perform the runtime initialization. This method is similar to // _cgo_sys_thread_start except that it doesn't update any Go state. //go:cgo_import_static x_cgo_sys_thread_create @@ -82,7 +82,7 @@ var _cgo_sys_thread_create = &x_cgo_sys_thread_create // // We currently block at every CGO entry point (via _cgo_wait_runtime_init_done) // to ensure that the runtime has been initialized before the CGO call is -// executed. This is necessary for shared libraries where we kickoff runtime +// executed. This is necessary for shared libraries where we kickoff runtime // initialization in a separate thread and return without waiting for this // thread to complete the init. diff --git a/src/runtime/cgo/gcc_dragonfly_amd64.c b/src/runtime/cgo/gcc_dragonfly_amd64.c index 5f26136916..85c53ca707 100644 --- a/src/runtime/cgo/gcc_dragonfly_amd64.c +++ b/src/runtime/cgo/gcc_dragonfly_amd64.c @@ -69,11 +69,11 @@ threadentry(void *v) setg_gcc((void*)ts.g); // On DragonFly, a new thread inherits the signal stack of the - // creating thread. That confuses minit, so we remove that - // signal stack here before calling the regular mstart. It's + // creating thread. That confuses minit, so we remove that + // signal stack here before calling the regular mstart. It's // a bit baroque to remove a signal stack here only to add one // in minit, but it's a simple change that keeps DragonFly - // working like other OS's. At this point all signals are + // working like other OS's. At this point all signals are // blocked, so there is no race. memset(&ss, 0, sizeof ss); ss.ss_flags = SS_DISABLE; diff --git a/src/runtime/cgo/gcc_freebsd_arm.c b/src/runtime/cgo/gcc_freebsd_arm.c index 8c294aa6d7..73f32792c5 100644 --- a/src/runtime/cgo/gcc_freebsd_arm.c +++ b/src/runtime/cgo/gcc_freebsd_arm.c @@ -53,7 +53,7 @@ _cgo_sys_thread_start(ThreadStart *ts) // Not sure why the memset is necessary here, // but without it, we get a bogus stack size - // out of pthread_attr_getstacksize. C'est la Linux. + // out of pthread_attr_getstacksize. C'est la Linux. memset(&attr, 0, sizeof attr); pthread_attr_init(&attr); size = 0; diff --git a/src/runtime/cgo/gcc_linux_386.c b/src/runtime/cgo/gcc_linux_386.c index 380441c0d4..15e0a8a302 100644 --- a/src/runtime/cgo/gcc_linux_386.c +++ b/src/runtime/cgo/gcc_linux_386.c @@ -48,7 +48,7 @@ _cgo_sys_thread_start(ThreadStart *ts) // Not sure why the memset is necessary here, // but without it, we get a bogus stack size - // out of pthread_attr_getstacksize. C'est la Linux. + // out of pthread_attr_getstacksize. C'est la Linux. memset(&attr, 0, sizeof attr); pthread_attr_init(&attr); size = 0; diff --git a/src/runtime/cgo/gcc_linux_arm.c b/src/runtime/cgo/gcc_linux_arm.c index fdcc343d19..f552421957 100644 --- a/src/runtime/cgo/gcc_linux_arm.c +++ b/src/runtime/cgo/gcc_linux_arm.c @@ -28,7 +28,7 @@ _cgo_sys_thread_start(ThreadStart *ts) // Not sure why the memset is necessary here, // but without it, we get a bogus stack size - // out of pthread_attr_getstacksize. C'est la Linux. + // out of pthread_attr_getstacksize. C'est la Linux. memset(&attr, 0, sizeof attr); pthread_attr_init(&attr); size = 0; diff --git a/src/runtime/cgo/gcc_linux_arm64.c b/src/runtime/cgo/gcc_linux_arm64.c index a7b5ddf5b6..84bd7c0b49 100644 --- a/src/runtime/cgo/gcc_linux_arm64.c +++ b/src/runtime/cgo/gcc_linux_arm64.c @@ -28,7 +28,7 @@ _cgo_sys_thread_start(ThreadStart *ts) // Not sure why the memset is necessary here, // but without it, we get a bogus stack size - // out of pthread_attr_getstacksize. C'est la Linux. + // out of pthread_attr_getstacksize. C'est la Linux. memset(&attr, 0, sizeof attr); pthread_attr_init(&attr); size = 0; diff --git a/src/runtime/cgo/gcc_netbsd_386.c b/src/runtime/cgo/gcc_netbsd_386.c index ba4fa6e425..32f2e15678 100644 --- a/src/runtime/cgo/gcc_netbsd_386.c +++ b/src/runtime/cgo/gcc_netbsd_386.c @@ -68,11 +68,11 @@ threadentry(void *v) setg_gcc((void*)ts.g); // On NetBSD, a new thread inherits the signal stack of the - // creating thread. That confuses minit, so we remove that - // signal stack here before calling the regular mstart. It's + // creating thread. That confuses minit, so we remove that + // signal stack here before calling the regular mstart. It's // a bit baroque to remove a signal stack here only to add one // in minit, but it's a simple change that keeps NetBSD - // working like other OS's. At this point all signals are + // working like other OS's. At this point all signals are // blocked, so there is no race. memset(&ss, 0, sizeof ss); ss.ss_flags = SS_DISABLE; diff --git a/src/runtime/cgo/gcc_netbsd_amd64.c b/src/runtime/cgo/gcc_netbsd_amd64.c index 035c4ff482..aa357459c7 100644 --- a/src/runtime/cgo/gcc_netbsd_amd64.c +++ b/src/runtime/cgo/gcc_netbsd_amd64.c @@ -69,11 +69,11 @@ threadentry(void *v) setg_gcc((void*)ts.g); // On NetBSD, a new thread inherits the signal stack of the - // creating thread. That confuses minit, so we remove that - // signal stack here before calling the regular mstart. It's + // creating thread. That confuses minit, so we remove that + // signal stack here before calling the regular mstart. It's // a bit baroque to remove a signal stack here only to add one // in minit, but it's a simple change that keeps NetBSD - // working like other OS's. At this point all signals are + // working like other OS's. At this point all signals are // blocked, so there is no race. memset(&ss, 0, sizeof ss); ss.ss_flags = SS_DISABLE; diff --git a/src/runtime/cgo/gcc_netbsd_arm.c b/src/runtime/cgo/gcc_netbsd_arm.c index 5b8553e830..9589780ac8 100644 --- a/src/runtime/cgo/gcc_netbsd_arm.c +++ b/src/runtime/cgo/gcc_netbsd_arm.c @@ -65,11 +65,11 @@ threadentry(void *v) free(v); // On NetBSD, a new thread inherits the signal stack of the - // creating thread. That confuses minit, so we remove that - // signal stack here before calling the regular mstart. It's + // creating thread. That confuses minit, so we remove that + // signal stack here before calling the regular mstart. It's // a bit baroque to remove a signal stack here only to add one // in minit, but it's a simple change that keeps NetBSD - // working like other OS's. At this point all signals are + // working like other OS's. At this point all signals are // blocked, so there is no race. memset(&ss, 0, sizeof ss); ss.ss_flags = SS_DISABLE; diff --git a/src/runtime/cgo/iscgo.go b/src/runtime/cgo/iscgo.go index 7604132170..e12d0f4b95 100644 --- a/src/runtime/cgo/iscgo.go +++ b/src/runtime/cgo/iscgo.go @@ -3,10 +3,10 @@ // license that can be found in the LICENSE file. // The runtime package contains an uninitialized definition -// for runtime·iscgo. Override it to tell the runtime we're here. +// for runtime·iscgo. Override it to tell the runtime we're here. // There are various function pointers that should be set too, // but those depend on dynamic linker magic to get initialized -// correctly, and sometimes they break. This variable is a +// correctly, and sometimes they break. This variable is a // backup: it depends only on old C style static linking rules. package cgo diff --git a/src/runtime/cgo/mmap.go b/src/runtime/cgo/mmap.go index 6a4dc1562d..ff983599be 100644 --- a/src/runtime/cgo/mmap.go +++ b/src/runtime/cgo/mmap.go @@ -10,8 +10,8 @@ package cgo import _ "unsafe" // When using cgo, call the C library for mmap, so that we call into -// any sanitizer interceptors. This supports using the memory -// sanitizer with Go programs. The memory sanitizer only applies to +// any sanitizer interceptors. This supports using the memory +// sanitizer with Go programs. The memory sanitizer only applies to // C/C++ code; this permits that code to see the Go code as normal // program addresses that have been initialized. diff --git a/src/runtime/cgo_mmap.go b/src/runtime/cgo_mmap.go index dca1e22ad3..a23cc79b7e 100644 --- a/src/runtime/cgo_mmap.go +++ b/src/runtime/cgo_mmap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Support for memory sanitizer. See runtime/cgo/mmap.go. +// Support for memory sanitizer. See runtime/cgo/mmap.go. // +build linux,amd64 @@ -32,10 +32,10 @@ func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uns return sysMmap(addr, n, prot, flags, fd, off) } -// sysMmap calls the mmap system call. It is implemented in assembly. +// sysMmap calls the mmap system call. It is implemented in assembly. func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer // cgoMmap calls the mmap function in the runtime/cgo package on the // callCgoMmap calls the mmap function in the runtime/cgo package -// using the GCC calling convention. It is implemented in assembly. +// using the GCC calling convention. It is implemented in assembly. func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) uintptr diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 5c9a73734c..9514c0ba9a 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -28,7 +28,7 @@ // and then unlocks g from m. // // The above description skipped over the possibility of the gcc-compiled -// function f calling back into Go. If that happens, we continue down +// function f calling back into Go. If that happens, we continue down // the rabbit hole during the execution of f. // // To make it possible for gcc-compiled C code to call a Go function p.GoF, @@ -38,9 +38,9 @@ // GoF calls crosscall2(_cgoexp_GoF, frame, framesize). Crosscall2 // (in cgo/gcc_$GOARCH.S, a gcc-compiled assembly file) is a two-argument // adapter from the gcc function call ABI to the 6c function call ABI. -// It is called from gcc to call 6c functions. In this case it calls +// It is called from gcc to call 6c functions. In this case it calls // _cgoexp_GoF(frame, framesize), still running on m->g0's stack -// and outside the $GOMAXPROCS limit. Thus, this code cannot yet +// and outside the $GOMAXPROCS limit. Thus, this code cannot yet // call arbitrary Go code directly and must be careful not to allocate // memory or use up m->g0's stack. // @@ -317,8 +317,8 @@ var racecgosync uint64 // represents possible synchronization in C code // We want to detect all cases where a program that does not use // unsafe makes a cgo call passing a Go pointer to memory that -// contains a Go pointer. Here a Go pointer is defined as a pointer -// to memory allocated by the Go runtime. Programs that use unsafe +// contains a Go pointer. Here a Go pointer is defined as a pointer +// to memory allocated by the Go runtime. Programs that use unsafe // can evade this restriction easily, so we don't try to catch them. // The cgo program will rewrite all possibly bad pointer arguments to // call cgoCheckPointer, where we can catch cases of a Go pointer @@ -326,14 +326,14 @@ var racecgosync uint64 // represents possible synchronization in C code // Complicating matters, taking the address of a slice or array // element permits the C program to access all elements of the slice -// or array. In that case we will see a pointer to a single element, +// or array. In that case we will see a pointer to a single element, // but we need to check the entire data structure. // The cgoCheckPointer call takes additional arguments indicating that -// it was called on an address expression. An additional argument of -// true means that it only needs to check a single element. An +// it was called on an address expression. An additional argument of +// true means that it only needs to check a single element. An // additional argument of a slice or array means that it needs to -// check the entire slice/array, but nothing else. Otherwise, the +// check the entire slice/array, but nothing else. Otherwise, the // pointer could be anything, and we check the entire heap object, // which is conservative but safe. @@ -344,7 +344,7 @@ var racecgosync uint64 // represents possible synchronization in C code // pointers.) // cgoCheckPointer checks if the argument contains a Go pointer that -// points to a Go pointer, and panics if it does. It returns the pointer. +// points to a Go pointer, and panics if it does. It returns the pointer. func cgoCheckPointer(ptr interface{}, args ...interface{}) interface{} { if debug.cgocheck == 0 { return ptr @@ -395,9 +395,9 @@ func cgoCheckPointer(ptr interface{}, args ...interface{}) interface{} { const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer" const cgoResultFail = "cgo result has Go pointer" -// cgoCheckArg is the real work of cgoCheckPointer. The argument p +// cgoCheckArg is the real work of cgoCheckPointer. The argument p // is either a pointer to the value (of type t), or the value itself, -// depending on indir. The top parameter is whether we are at the top +// depending on indir. The top parameter is whether we are at the top // level, where Go pointers are allowed. func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if t.kind&kindNoPointers != 0 { @@ -423,7 +423,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { } case kindChan, kindMap: // These types contain internal pointers that will - // always be allocated in the Go heap. It's never OK + // always be allocated in the Go heap. It's never OK // to pass them to C. panic(errorString(msg)) case kindFunc: @@ -440,7 +440,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { return } // A type known at compile time is OK since it's - // constant. A type not known at compile time will be + // constant. A type not known at compile time will be // in the heap and will not be OK. if inheap(uintptr(unsafe.Pointer(it))) { panic(errorString(msg)) @@ -507,8 +507,8 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { } // cgoCheckUnknownPointer is called for an arbitrary pointer into Go -// memory. It checks whether that Go memory contains any other -// pointer into Go memory. If it does, we panic. +// memory. It checks whether that Go memory contains any other +// pointer into Go memory. If it does, we panic. // The return values are unused but useful to see in panic tracebacks. func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { if cgoInRange(p, mheap_.arena_start, mheap_.arena_used) { @@ -559,7 +559,7 @@ func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) { } // cgoIsGoPointer returns whether the pointer is a Go pointer--a -// pointer to Go memory. We only care about Go memory that might +// pointer to Go memory. We only care about Go memory that might // contain pointers. //go:nosplit //go:nowritebarrierrec @@ -589,7 +589,7 @@ func cgoInRange(p unsafe.Pointer, start, end uintptr) bool { } // cgoCheckResult is called to check the result parameter of an -// exported Go function. It panics if the result is or contains a Go +// exported Go function. It panics if the result is or contains a Go // pointer. func cgoCheckResult(val interface{}) { if debug.cgocheck == 0 { diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index 0d46dde59d..d85d5fe5a8 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -89,7 +89,7 @@ func cgoCheckSliceCopy(typ *_type, dst, src slice, n int) { } // cgoCheckTypedBlock checks the block of memory at src, for up to size bytes, -// and throws if it finds a Go pointer. The type of the memory is typ, +// and throws if it finds a Go pointer. The type of the memory is typ, // and src is off bytes into that type. //go:nosplit //go:nowritebarrier @@ -99,7 +99,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { return } - // The type has a GC program. Try to find GC bits somewhere else. + // The type has a GC program. Try to find GC bits somewhere else. for datap := &firstmoduledata; datap != nil; datap = datap.next { if cgoInRange(src, datap.data, datap.edata) { doff := uintptr(src) - datap.data @@ -148,8 +148,8 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { } // cgoCheckBits checks the block of memory at src, for up to size -// bytes, and throws if it finds a Go pointer. The gcbits mark each -// pointer value. The src pointer is off bytes into the gcbits. +// bytes, and throws if it finds a Go pointer. The gcbits mark each +// pointer value. The src pointer is off bytes into the gcbits. //go:nosplit //go:nowritebarrier func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) { @@ -186,7 +186,7 @@ func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) { // fall back to look for pointers in src using the type information. // We only this when looking at a value on the stack when the type // uses a GC program, because otherwise it's more efficient to use the -// GC bits. This is called on the system stack. +// GC bits. This is called on the system stack. //go:nowritebarrier //go:systemstack func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) { diff --git a/src/runtime/chan.go b/src/runtime/chan.go index f6f3ce4d90..2fc0839600 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -175,7 +175,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin } if c.qcount < c.dataqsiz { - // Space is available in the channel buffer. Enqueue the element to send. + // Space is available in the channel buffer. Enqueue the element to send. qp := chanbuf(c, c.sendx) if raceenabled { raceacquire(qp) @@ -196,7 +196,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin return false } - // Block on the channel. Some receiver will complete our operation for us. + // Block on the channel. Some receiver will complete our operation for us. gp := getg() mysg := acquireSudog() mysg.releasetime = 0 @@ -245,7 +245,7 @@ func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) { racesync(c, sg) } else { // Pretend we go through the buffer, even though - // we copy directly. Note that we need to increment + // we copy directly. Note that we need to increment // the head/tail locations only when raceenabled. qp := chanbuf(c, c.recvx) raceacquire(qp) @@ -422,8 +422,8 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r } if sg := c.sendq.dequeue(); sg != nil { - // Found a waiting sender. If buffer is size 0, receive value - // directly from sender. Otherwise, receive from head of queue + // Found a waiting sender. If buffer is size 0, receive value + // directly from sender. Otherwise, receive from head of queue // and add sender's value to the tail of the queue (both map to // the same buffer slot because the queue is full). recv(c, sg, ep, func() { unlock(&c.lock) }) @@ -513,9 +513,9 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) { typedmemmove(c.elemtype, ep, sg.elem) } } else { - // Queue is full. Take the item at the - // head of the queue. Make the sender enqueue - // its item at the tail of the queue. Since the + // Queue is full. Take the item at the + // head of the queue. Make the sender enqueue + // its item at the tail of the queue. Since the // queue is full, those are both the same slot. qp := chanbuf(c, c.recvx) if raceenabled { diff --git a/src/runtime/compiler.go b/src/runtime/compiler.go index 47e80b15ff..1ebc62dea1 100644 --- a/src/runtime/compiler.go +++ b/src/runtime/compiler.go @@ -5,7 +5,7 @@ package runtime // Compiler is the name of the compiler toolchain that built the -// running binary. Known toolchains are: +// running binary. Known toolchains are: // // gc Also known as cmd/compile. // gccgo The gccgo front end, part of the GCC compiler suite. diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go index 6334104003..4b9dfafb90 100644 --- a/src/runtime/cpuprof.go +++ b/src/runtime/cpuprof.go @@ -12,30 +12,30 @@ // writes to an operating system file. // // The signal handler for the profiling clock tick adds a new stack trace -// to a hash table tracking counts for recent traces. Most clock ticks -// hit in the cache. In the event of a cache miss, an entry must be +// to a hash table tracking counts for recent traces. Most clock ticks +// hit in the cache. In the event of a cache miss, an entry must be // evicted from the hash table, copied to a log that will eventually be -// written as profile data. The google-perftools code flushed the -// log itself during the signal handler. This code cannot do that, because +// written as profile data. The google-perftools code flushed the +// log itself during the signal handler. This code cannot do that, because // the io.Writer might block or need system calls or locks that are not -// safe to use from within the signal handler. Instead, we split the log +// safe to use from within the signal handler. Instead, we split the log // into two halves and let the signal handler fill one half while a goroutine -// is writing out the other half. When the signal handler fills its half, it -// offers to swap with the goroutine. If the writer is not done with its half, +// is writing out the other half. When the signal handler fills its half, it +// offers to swap with the goroutine. If the writer is not done with its half, // we lose the stack trace for this clock tick (and record that loss). // The goroutine interacts with the signal handler by calling getprofile() to // get the next log piece to write, implicitly handing back the last log // piece it obtained. // // The state of this dance between the signal handler and the goroutine -// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine +// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine // is not using either log half and is waiting (or will soon be waiting) for // a new piece by calling notesleep(&p.wait). If the signal handler // changes handoff from 0 to non-zero, it must call notewakeup(&p.wait) -// to wake the goroutine. The value indicates the number of entries in the -// log half being handed off. The goroutine leaves the non-zero value in +// to wake the goroutine. The value indicates the number of entries in the +// log half being handed off. The goroutine leaves the non-zero value in // place until it has finished processing the log half and then flips the number -// back to zero. Setting the high bit in handoff means that the profiling is over, +// back to zero. Setting the high bit in handoff means that the profiling is over, // and the goroutine is now in charge of flushing the data left in the hash table // to the log and returning that data. // @@ -44,7 +44,7 @@ // then the signal handler owns it and can change it to non-zero. // If handoff != 0 then the goroutine owns it and can change it to zero. // If that were the end of the story then we would not need to manipulate -// handoff using atomic operations. The operations are needed, however, +// handoff using atomic operations. The operations are needed, however, // in order to let the log closer set the high bit to indicate "EOF" safely // in the situation when normally the goroutine "owns" handoff. @@ -192,7 +192,7 @@ func SetCPUProfileRate(hz int) { // It is called from signal handlers and other limited environments // and cannot allocate memory or acquire locks that might be // held at the time of the signal, nor can it use substantial amounts -// of stack. It is allowed to call evict. +// of stack. It is allowed to call evict. func (p *cpuProfile) add(pc []uintptr) { if len(pc) > maxCPUProfStack { pc = pc[:maxCPUProfStack] @@ -232,7 +232,7 @@ Assoc: } if e.count > 0 { if !p.evict(e) { - // Could not evict entry. Record lost stack. + // Could not evict entry. Record lost stack. p.lost++ return } @@ -248,7 +248,7 @@ Assoc: // evict copies the given entry's data into the log, so that // the entry can be reused. evict is called from add, which // is called from the profiling signal handler, so it must not -// allocate memory or block. It is safe to call flushlog. +// allocate memory or block. It is safe to call flushlog. // evict returns true if the entry was copied to the log, // false if there was no room available. func (p *cpuProfile) evict(e *cpuprofEntry) bool { @@ -276,7 +276,7 @@ func (p *cpuProfile) evict(e *cpuprofEntry) bool { // flushlog tries to flush the current log and switch to the other one. // flushlog is called from evict, called from add, called from the signal handler, -// so it cannot allocate memory or block. It can try to swap logs with +// so it cannot allocate memory or block. It can try to swap logs with // the writing goroutine, as explained in the comment at the top of this file. func (p *cpuProfile) flushlog() bool { if !atomic.Cas(&p.handoff, 0, uint32(p.nlog)) { @@ -300,7 +300,7 @@ func (p *cpuProfile) flushlog() bool { } // getprofile blocks until the next block of profiling data is available -// and returns it as a []byte. It is called from the writing goroutine. +// and returns it as a []byte. It is called from the writing goroutine. func (p *cpuProfile) getprofile() []byte { if p == nil { return nil @@ -358,7 +358,7 @@ func (p *cpuProfile) getprofile() []byte { } // In flush mode. - // Add is no longer being called. We own the log. + // Add is no longer being called. We own the log. // Also, p.handoff is non-zero, so flushlog will return false. // Evict the hash table into the log and return it. Flush: @@ -367,7 +367,7 @@ Flush: for j := range b.entry { e := &b.entry[j] if e.count > 0 && !p.evict(e) { - // Filled the log. Stop the loop and return what we've got. + // Filled the log. Stop the loop and return what we've got. break Flush } } @@ -390,7 +390,7 @@ Flush: return uintptrBytes(eod[:]) } - // Finally done. Clean up and return nil. + // Finally done. Clean up and return nil. p.flushing = false if !atomic.Cas(&p.handoff, p.handoff, 0) { print("runtime: profile flush racing with something\n") @@ -410,7 +410,7 @@ func uintptrBytes(p []uintptr) (ret []byte) { } // CPUProfile returns the next chunk of binary CPU profiling stack trace data, -// blocking until data is available. If profiling is turned off and all the profile +// blocking until data is available. If profiling is turned off and all the profile // data accumulated while it was on has been returned, CPUProfile returns nil. // The caller must save the returned data before calling CPUProfile again. // diff --git a/src/runtime/cputicks.go b/src/runtime/cputicks.go index 3c036df2bd..91627460c3 100644 --- a/src/runtime/cputicks.go +++ b/src/runtime/cputicks.go @@ -10,5 +10,5 @@ package runtime // careful: cputicks is not guaranteed to be monotonic! In particular, we have -// noticed drift between cpus on certain os/arch combinations. See issue 8976. +// noticed drift between cpus on certain os/arch combinations. See issue 8976. func cputicks() int64 diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index de45e832f8..85fcc69fed 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -294,9 +294,9 @@ func TestRecoverBeforePanicAfterGoexit(t *testing.T) { // 1. defer a function that recovers // 2. defer a function that panics // 3. call goexit - // Goexit should run the #2 defer. Its panic + // Goexit should run the #2 defer. Its panic // should be caught by the #1 defer, and execution - // should resume in the caller. Like the Goexit + // should resume in the caller. Like the Goexit // never happened! defer func() { r := recover() diff --git a/src/runtime/debug.go b/src/runtime/debug.go index 0f5936566e..0e798fc6f5 100644 --- a/src/runtime/debug.go +++ b/src/runtime/debug.go @@ -10,7 +10,7 @@ import ( ) // GOMAXPROCS sets the maximum number of CPUs that can be executing -// simultaneously and returns the previous setting. If n < 1, it does not +// simultaneously and returns the previous setting. If n < 1, it does not // change the current setting. // The number of logical CPUs on the local machine can be queried with NumCPU. // This call will go away when the scheduler improves. diff --git a/src/runtime/defs2_linux.go b/src/runtime/defs2_linux.go index 980df9ec31..9dea6a1f3a 100644 --- a/src/runtime/defs2_linux.go +++ b/src/runtime/defs2_linux.go @@ -32,7 +32,7 @@ package runtime #include <linux/eventpoll.h> // This is the sigaction structure from the Linux 2.1.68 kernel which -// is used with the rt_sigaction system call. For 386 this is not +// is used with the rt_sigaction system call. For 386 this is not // defined in any public header file. struct kernel_sigaction { diff --git a/src/runtime/extern.go b/src/runtime/extern.go index 9c1f9f5a03..984b0ca817 100644 --- a/src/runtime/extern.go +++ b/src/runtime/extern.go @@ -147,11 +147,11 @@ package runtime import "runtime/internal/sys" // Caller reports file and line number information about function invocations on -// the calling goroutine's stack. The argument skip is the number of stack frames +// the calling goroutine's stack. The argument skip is the number of stack frames // to ascend, with 0 identifying the caller of Caller. (For historical reasons the // meaning of skip differs between Caller and Callers.) The return values report the // program counter, file name, and line number within the file of the corresponding -// call. The boolean ok is false if it was not possible to recover the information. +// call. The boolean ok is false if it was not possible to recover the information. func Caller(skip int) (pc uintptr, file string, line int, ok bool) { // Ask for two PCs: the one we were asked for // and what it called, so that we can see if it @@ -184,7 +184,7 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) { } // Callers fills the slice pc with the return program counters of function invocations -// on the calling goroutine's stack. The argument skip is the number of stack frames +// on the calling goroutine's stack. The argument skip is the number of stack frames // to skip before recording in pc, with 0 identifying the frame for Callers itself and // 1 identifying the caller of Callers. // It returns the number of entries written to pc. @@ -196,7 +196,7 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool) { // To easily look up file/line information for the call sequence, use Frames. func Callers(skip int, pc []uintptr) int { // runtime.callers uses pc.array==nil as a signal - // to print a stack trace. Pick off 0-length pc here + // to print a stack trace. Pick off 0-length pc here // so that we don't let a nil pc slice get to it. if len(pc) == 0 { return 0 diff --git a/src/runtime/hash_test.go b/src/runtime/hash_test.go index a787d8d367..7cceab86cc 100644 --- a/src/runtime/hash_test.go +++ b/src/runtime/hash_test.go @@ -17,7 +17,7 @@ import ( // https://code.google.com/p/smhasher/ // This code is a port of some of the Smhasher tests to Go. // -// The current AES hash function passes Smhasher. Our fallback +// The current AES hash function passes Smhasher. Our fallback // hash functions don't, so we only enable the difficult tests when // we know the AES implementation is available. @@ -349,7 +349,7 @@ func (k *EfaceKey) random(r *rand.Rand) { k.i = uint64(r.Int63()) } func (k *EfaceKey) bits() int { - // use 64 bits. This tests inlined interfaces + // use 64 bits. This tests inlined interfaces // on 64-bit targets and indirect interfaces on // 32-bit targets. return 64 @@ -381,7 +381,7 @@ func (k *IfaceKey) random(r *rand.Rand) { k.i = fInter(r.Int63()) } func (k *IfaceKey) bits() int { - // use 64 bits. This tests inlined interfaces + // use 64 bits. This tests inlined interfaces // on 64-bit targets and indirect interfaces on // 32-bit targets. return 64 @@ -443,7 +443,7 @@ func avalancheTest1(t *testing.T, k Key) { // Each entry in the grid should be about REP/2. // More precisely, we did N = k.bits() * hashSize experiments where - // each is the sum of REP coin flips. We want to find bounds on the + // each is the sum of REP coin flips. We want to find bounds on the // sum of coin flips such that a truly random experiment would have // all sums inside those bounds with 99% probability. N := n * hashSize @@ -563,19 +563,19 @@ func BenchmarkHash1024(b *testing.B) { benchmarkHash(b, 1024) } func BenchmarkHash65536(b *testing.B) { benchmarkHash(b, 65536) } func TestArrayHash(t *testing.T) { - // Make sure that "" in arrays hash correctly. The hash + // Make sure that "" in arrays hash correctly. The hash // should at least scramble the input seed so that, e.g., // {"","foo"} and {"foo",""} have different hashes. // If the hash is bad, then all (8 choose 4) = 70 keys // have the same hash. If so, we allocate 70/8 = 8 - // overflow buckets. If the hash is good we don't + // overflow buckets. If the hash is good we don't // normally allocate any overflow buckets, and the // probability of even one or two overflows goes down rapidly. - // (There is always 1 allocation of the bucket array. The map + // (There is always 1 allocation of the bucket array. The map // header is allocated on the stack.) f := func() { - // Make the key type at most 128 bytes. Otherwise, + // Make the key type at most 128 bytes. Otherwise, // we get an allocation per key. type key [8]string m := make(map[key]bool, 70) diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go index 892a79a914..6f7451e02c 100644 --- a/src/runtime/hashmap.go +++ b/src/runtime/hashmap.go @@ -6,10 +6,10 @@ package runtime // This file contains the implementation of Go's map type. // -// A map is just a hash table. The data is arranged -// into an array of buckets. Each bucket contains up to -// 8 key/value pairs. The low-order bits of the hash are -// used to select a bucket. Each bucket contains a few +// A map is just a hash table. The data is arranged +// into an array of buckets. Each bucket contains up to +// 8 key/value pairs. The low-order bits of the hash are +// used to select a bucket. Each bucket contains a few // high-order bits of each hash to distinguish the entries // within a single bucket. // @@ -17,7 +17,7 @@ package runtime // extra buckets. // // When the hashtable grows, we allocate a new array -// of buckets twice as big. Buckets are incrementally +// of buckets twice as big. Buckets are incrementally // copied from the old bucket array to the new bucket array. // // Map iterators walk through the array of buckets and @@ -31,7 +31,7 @@ package runtime // to the new table. // Picking loadFactor: too large and we have lots of overflow -// buckets, too small and we waste a lot of space. I wrote +// buckets, too small and we waste a lot of space. I wrote // a simple program to check some stats for different loads: // (64-bit, 8 byte keys and values) // loadFactor %overflow bytes/entry hitprobe missprobe @@ -51,7 +51,7 @@ package runtime // missprobe = # of entries to check when looking up an absent key // // Keep in mind this data is for maximally loaded tables, i.e. just -// before the table grows. Typical tables will be somewhat less loaded. +// before the table grows. Typical tables will be somewhat less loaded. import ( "runtime/internal/atomic" @@ -75,14 +75,14 @@ const ( maxValueSize = 128 // data offset should be the size of the bmap struct, but needs to be - // aligned correctly. For amd64p32 this means 64-bit alignment + // aligned correctly. For amd64p32 this means 64-bit alignment // even though pointers are 32 bit. dataOffset = unsafe.Offsetof(struct { b bmap v int64 }{}.v) - // Possible tophash values. We reserve a few possibilities for special marks. + // Possible tophash values. We reserve a few possibilities for special marks. // Each bucket (including its overflow buckets, if any) will have either all or none of its // entries in the evacuated* states (except during the evacuate() method, which only happens // during map writes and thus no one else can observe the map during that time). @@ -104,7 +104,7 @@ const ( // A header for a Go map. type hmap struct { // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and - // ../reflect/type.go. Don't change this structure without also changing that code! + // ../reflect/type.go. Don't change this structure without also changing that code! count int // # live cells == size of map. Must be first (used by len() builtin) flags uint8 B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) @@ -212,7 +212,7 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap { throw("value size wrong") } - // invariants we depend on. We should probably check these at compile time + // invariants we depend on. We should probably check these at compile time // somewhere, but for now we'll do it here. if t.key.align > bucketCnt { throw("key align too big") @@ -380,7 +380,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) } } -// returns both key and value. Used by map iterator +// returns both key and value. Used by map iterator func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { if h == nil || h.count == 0 { return nil, nil @@ -485,7 +485,7 @@ again: if !alg.equal(key, k2) { continue } - // already have a mapping for key. Update it. + // already have a mapping for key. Update it. if t.needkeyupdate { typedmemmove(t.key, k2, key) } @@ -504,7 +504,7 @@ again: b = ovf } - // did not find mapping for key. Allocate new cell & add entry. + // did not find mapping for key. Allocate new cell & add entry. if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt { hashGrow(t, h) goto again // Growing the table invalidates everything, so try again @@ -718,9 +718,9 @@ next: if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty { if checkBucket != noCheck { // Special case: iterator was started during a grow and the - // grow is not done yet. We're working on a bucket whose - // oldbucket has not been evacuated yet. Or at least, it wasn't - // evacuated when we started the bucket. So we're iterating + // grow is not done yet. We're working on a bucket whose + // oldbucket has not been evacuated yet. Or at least, it wasn't + // evacuated when we started the bucket. So we're iterating // through the oldbucket, skipping any keys that will go // to the other new bucket (each oldbucket expands to two // buckets during a grow). @@ -738,7 +738,7 @@ next: } else { // Hash isn't repeatable if k != k (NaNs). We need a // repeatable and randomish choice of which direction - // to send NaNs during evacuation. We'll use the low + // to send NaNs during evacuation. We'll use the low // bit of tophash to decide which way NaNs go. // NOTE: this case is why we need two evacuate tophash // values, evacuatedX and evacuatedY, that differ in @@ -779,7 +779,7 @@ next: it.value = rv } else { // if key!=key then the entry can't be deleted or - // updated, so we can just return it. That's lucky for + // updated, so we can just return it. That's lucky for // us because when key!=key we can't look it up // successfully in the current table. it.key = k2 @@ -882,12 +882,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { if h.flags&iterator != 0 { if !t.reflexivekey && !alg.equal(k2, k2) { // If key != key (NaNs), then the hash could be (and probably - // will be) entirely different from the old hash. Moreover, - // it isn't reproducible. Reproducibility is required in the + // will be) entirely different from the old hash. Moreover, + // it isn't reproducible. Reproducibility is required in the // presence of iterators, as our evacuation decision must // match whatever decision the iterator made. // Fortunately, we have the freedom to send these keys either - // way. Also, tophash is meaningless for these kinds of keys. + // way. Also, tophash is meaningless for these kinds of keys. // We let the low bit of tophash drive the evacuation decision. // We recompute a new random tophash for the next level so // these keys will get evenly distributed across all buckets @@ -965,7 +965,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { if oldbucket == h.nevacuate { h.nevacuate = oldbucket + 1 if oldbucket+1 == newbit { // newbit == # of oldbuckets - // Growing is all done. Free old main bucket array. + // Growing is all done. Free old main bucket array. h.oldbuckets = nil // Can discard old overflow buckets as well. // If they are still referenced by an iterator, @@ -981,7 +981,7 @@ func ismapkey(t *_type) bool { return t.alg.hash != nil } -// Reflect stubs. Called from ../reflect/asm_*.s +// Reflect stubs. Called from ../reflect/asm_*.s //go:linkname reflect_makemap reflect.makemap func reflect_makemap(t *maptype) *hmap { diff --git a/src/runtime/hashmap_fast.go b/src/runtime/hashmap_fast.go index f95ea3e1b7..6a5484edee 100644 --- a/src/runtime/hashmap_fast.go +++ b/src/runtime/hashmap_fast.go @@ -23,7 +23,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { } var b *bmap if h.B == 0 { - // One-bucket table. No need to hash. + // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) @@ -68,7 +68,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { } var b *bmap if h.B == 0 { - // One-bucket table. No need to hash. + // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) @@ -113,7 +113,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { } var b *bmap if h.B == 0 { - // One-bucket table. No need to hash. + // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) @@ -158,7 +158,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { } var b *bmap if h.B == 0 { - // One-bucket table. No need to hash. + // One-bucket table. No need to hash. b = (*bmap)(h.buckets) } else { hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) @@ -247,7 +247,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { continue } if keymaybe != bucketCnt { - // Two keys are potential matches. Use hash to distinguish them. + // Two keys are potential matches. Use hash to distinguish them. goto dohash } keymaybe = i @@ -350,7 +350,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { continue } if keymaybe != bucketCnt { - // Two keys are potential matches. Use hash to distinguish them. + // Two keys are potential matches. Use hash to distinguish them. goto dohash } keymaybe = i diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 3bff36bd78..e4ec302a19 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Implementation of runtime/debug.WriteHeapDump. Writes all +// Implementation of runtime/debug.WriteHeapDump. Writes all // objects in the heap plus additional info (roots, threads, // finalizers, etc.) to a file. @@ -97,7 +97,7 @@ func flush() { // Inside a bucket, we keep a list of types that // have been serialized so far, most recently used first. // Note: when a bucket overflows we may end up -// serializing a type more than once. That's ok. +// serializing a type more than once. That's ok. const ( typeCacheBuckets = 256 typeCacheAssoc = 4 @@ -172,7 +172,7 @@ func dumptype(t *_type) { } } - // Might not have been dumped yet. Dump it and + // Might not have been dumped yet. Dump it and // remember we did so. for j := typeCacheAssoc - 1; j > 0; j-- { b.t[j] = b.t[j-1] @@ -254,7 +254,7 @@ func dumpframe(s *stkframe, arg unsafe.Pointer) bool { pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, pc, nil) if pcdata == -1 { // We do not have a valid pcdata value but there might be a - // stackmap for this function. It is likely that we are looking + // stackmap for this function. It is likely that we are looking // at the function prologue, assume so and hope for the best. pcdata = 0 } @@ -679,8 +679,8 @@ func dumpfields(bv bitvector) { } // The heap dump reader needs to be able to disambiguate -// Eface entries. So it needs to know every type that might -// appear in such an entry. The following routine accomplishes that. +// Eface entries. So it needs to know every type that might +// appear in such an entry. The following routine accomplishes that. // TODO(rsc, khr): Delete - no longer possible. // Dump all the types that appear in the type field of diff --git a/src/runtime/iface.go b/src/runtime/iface.go index d980367866..bad0156e61 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -140,7 +140,7 @@ func convT2E(t *_type, elem unsafe.Pointer, x unsafe.Pointer) (e eface) { x = newobject(t) } // TODO: We allocate a zeroed object only to overwrite it with - // actual data. Figure out how to avoid zeroing. Also below in convT2I. + // actual data. Figure out how to avoid zeroing. Also below in convT2I. typedmemmove(t, x, elem) e._type = t e.data = x diff --git a/src/runtime/internal/atomic/asm_mips64x.s b/src/runtime/internal/atomic/asm_mips64x.s index 4cab4342f9..a454f284ab 100644 --- a/src/runtime/internal/atomic/asm_mips64x.s +++ b/src/runtime/internal/atomic/asm_mips64x.s @@ -189,7 +189,7 @@ TEXT ·Or8(SB), NOSPLIT, $0-9 // R4 = ((ptr & 3) * 8) AND $3, R1, R4 SLLV $3, R4 - // Shift val for aligned ptr. R2 = val << R4 + // Shift val for aligned ptr. R2 = val << R4 SLLV R4, R2 SYNC @@ -215,7 +215,7 @@ TEXT ·And8(SB), NOSPLIT, $0-9 // R4 = ((ptr & 3) * 8) AND $3, R1, R4 SLLV $3, R4 - // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) + // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) MOVV $0xFF, R5 SLLV R4, R2 SLLV R4, R5 diff --git a/src/runtime/internal/atomic/asm_ppc64x.s b/src/runtime/internal/atomic/asm_ppc64x.s index 87f7f5d892..45a48b6203 100644 --- a/src/runtime/internal/atomic/asm_ppc64x.s +++ b/src/runtime/internal/atomic/asm_ppc64x.s @@ -181,7 +181,7 @@ TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9 #endif // R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8) RLDC $3, R3, $(3*8), R6 - // Shift val for aligned ptr. R4 = val << R6 + // Shift val for aligned ptr. R4 = val << R6 SLD R6, R4, R4 again: @@ -208,7 +208,7 @@ TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-9 #endif // R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8) RLDC $3, R3, $(3*8), R6 - // Shift val for aligned ptr. R4 = val << R6 | ^(0xFF << R6) + // Shift val for aligned ptr. R4 = val << R6 | ^(0xFF << R6) MOVD $0xFF, R7 SLD R6, R4 SLD R6, R7 diff --git a/src/runtime/internal/atomic/atomic_test.go b/src/runtime/internal/atomic/atomic_test.go index e8ec788d6a..d5dc552b95 100644 --- a/src/runtime/internal/atomic/atomic_test.go +++ b/src/runtime/internal/atomic/atomic_test.go @@ -48,7 +48,7 @@ func TestXadduintptr(t *testing.T) { } } -// Tests that xadduintptr correctly updates 64-bit values. The place where +// Tests that xadduintptr correctly updates 64-bit values. The place where // we actually do so is mstats.go, functions mSysStat{Inc,Dec}. func TestXadduintptrOnUint64(t *testing.T) { /* if runtime.BigEndian != 0 { diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go index fc480290cf..d28fd92720 100644 --- a/src/runtime/lock_futex.go +++ b/src/runtime/lock_futex.go @@ -58,7 +58,7 @@ func lock(l *mutex) { // wait is either MUTEX_LOCKED or MUTEX_SLEEPING // depending on whether there is a thread sleeping - // on this mutex. If we ever change l->key from + // on this mutex. If we ever change l->key from // MUTEX_SLEEPING to some other value, we must be // careful to change it back to MUTEX_SLEEPING before // returning, to ensure that the sleeping thread gets diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go index 1220674ab9..0fa0481733 100644 --- a/src/runtime/lock_sema.go +++ b/src/runtime/lock_sema.go @@ -81,7 +81,7 @@ Loop: } } if v&locked != 0 { - // Queued. Wait. + // Queued. Wait. semasleep(-1) i = 0 } @@ -143,7 +143,7 @@ func notewakeup(n *note) { // Two notewakeups! Not allowed. throw("notewakeup - double wakeup") default: - // Must be the waiting m. Wake it up. + // Must be the waiting m. Wake it up. semawakeup((*m)(unsafe.Pointer(v))) } } @@ -161,7 +161,7 @@ func notesleep(n *note) { } return } - // Queued. Sleep. + // Queued. Sleep. gp.m.blocked = true semasleep(-1) gp.m.blocked = false @@ -184,7 +184,7 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { return true } if ns < 0 { - // Queued. Sleep. + // Queued. Sleep. gp.m.blocked = true semasleep(-1) gp.m.blocked = false @@ -193,7 +193,7 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { deadline = nanotime() + ns for { - // Registered. Sleep. + // Registered. Sleep. gp.m.blocked = true if semasleep(ns) >= 0 { gp.m.blocked = false @@ -202,15 +202,15 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { return true } gp.m.blocked = false - // Interrupted or timed out. Still registered. Semaphore not acquired. + // Interrupted or timed out. Still registered. Semaphore not acquired. ns = deadline - nanotime() if ns <= 0 { break } - // Deadline hasn't arrived. Keep sleeping. + // Deadline hasn't arrived. Keep sleeping. } - // Deadline arrived. Still registered. Semaphore not acquired. + // Deadline arrived. Still registered. Semaphore not acquired. // Want to give up and return, but have to unregister first, // so that any notewakeup racing with the return does not // try to grant us the semaphore when we don't expect it. diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index b520c68df0..e5a5fe61d9 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -65,8 +65,8 @@ // directly, bypassing the MCache and MCentral free lists. // // The small objects on the MCache and MCentral free lists -// may or may not be zeroed. They are zeroed if and only if -// the second word of the object is zero. A span in the +// may or may not be zeroed. They are zeroed if and only if +// the second word of the object is zero. A span in the // page heap is zeroed unless s->needzero is set. When a span // is allocated to break into small objects, it is zeroed if needed // and s->needzero is set. There are two main benefits to delaying the @@ -113,9 +113,9 @@ const ( // _64bit = 1 on 64-bit systems, 0 on 32-bit systems _64bit = 1 << (^uintptr(0) >> 63) / 2 - // Computed constant. The definition of MaxSmallSize and the + // Computed constant. The definition of MaxSmallSize and the // algorithm in msize.go produces some number of different allocation - // size classes. NumSizeClasses is that number. It's needed here + // size classes. NumSizeClasses is that number. It's needed here // because there are static arrays of this length; when msize runs its // size choosing algorithm it double-checks that NumSizeClasses agrees. _NumSizeClasses = 67 @@ -134,9 +134,9 @@ const ( // Per-P, per order stack segment cache size. _StackCacheSize = 32 * 1024 - // Number of orders that get caching. Order 0 is FixedStack + // Number of orders that get caching. Order 0 is FixedStack // and each successive order is twice as large. - // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks + // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks // will be allocated directly. // Since FixedStack is different on different systems, we // must vary NumStackOrders to keep the same maximum cached size. @@ -165,7 +165,7 @@ const ( // Max number of threads to run garbage collection. // 2, 3, and 4 are all plausible maximums depending - // on the hardware details of the machine. The garbage + // on the hardware details of the machine. The garbage // collector scales well to 32 cpus. _MaxGcproc = 32 ) @@ -192,14 +192,14 @@ const _MaxArena32 = 2 << 30 // // SysFree returns it unconditionally; this is only used if // an out-of-memory error has been detected midway through -// an allocation. It is okay if SysFree is a no-op. +// an allocation. It is okay if SysFree is a no-op. // // SysReserve reserves address space without allocating memory. // If the pointer passed to it is non-nil, the caller wants the // reservation there, but SysReserve can still choose another -// location if that one is unavailable. On some systems and in some +// location if that one is unavailable. On some systems and in some // cases SysReserve will simply check that the address space is -// available and not actually reserve it. If SysReserve returns +// available and not actually reserve it. If SysReserve returns // non-nil, it sets *reserved to true if the address space is // reserved, false if it has merely been checked. // NOTE: SysReserve returns OS-aligned memory, but the heap allocator @@ -211,7 +211,7 @@ const _MaxArena32 = 2 << 30 // reserved, not merely checked. // // SysFault marks a (already sysAlloc'd) region to fault -// if accessed. Used only for debugging the runtime. +// if accessed. Used only for debugging the runtime. func mallocinit() { initSizes() @@ -229,7 +229,7 @@ func mallocinit() { limit = 0 // Set up the allocation arena, a contiguous area of memory where - // allocated data will be found. The arena begins with a bitmap large + // allocated data will be found. The arena begins with a bitmap large // enough to hold 4 bits per allocated word. if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) { // On a 64-bit machine, allocate from a single contiguous reservation. @@ -239,12 +239,12 @@ func mallocinit() { // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f). // Allocating a 512 GB region takes away 39 bits, and the amd64 // doesn't let us choose the top 17 bits, so that leaves the 9 bits - // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means + // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df. // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid // UTF-8 sequences, and they are otherwise as far away from - // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 - // addresses. An earlier attempt to use 0x11f8 caused out of memory errors + // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0 + // addresses. An earlier attempt to use 0x11f8 caused out of memory errors // on OS X during thread allocations. 0x00c0 causes conflicts with // AddressSanitizer which reserves all memory up to 0x0100. // These choices are both for debuggability and to reduce the @@ -321,10 +321,10 @@ func mallocinit() { spansSize = round(spansSize, _PageSize) // SysReserve treats the address we ask for, end, as a hint, - // not as an absolute requirement. If we ask for the end + // not as an absolute requirement. If we ask for the end // of the data segment but the operating system requires // a little more space before we can start allocating, it will - // give out a slightly higher pointer. Except QEMU, which + // give out a slightly higher pointer. Except QEMU, which // is buggy, as usual: it won't adjust the pointer upward. // So adjust it upward a little bit ourselves: 1/4 MB to get // away from the running binary image and then round up @@ -803,7 +803,7 @@ func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer { return newarray(typ, n) } -// rawmem returns a chunk of pointerless memory. It is +// rawmem returns a chunk of pointerless memory. It is // not zeroed. func rawmem(size uintptr) unsafe.Pointer { return mallocgc(size, nil, flagNoScan|flagNoZero) diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index b06d354eb6..2230c5c200 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -101,7 +101,7 @@ func freemcache(c *mcache) { } // Gets a span that has a free object in it and assigns it -// to be the cached span for the given sizeclass. Returns this span. +// to be the cached span for the given sizeclass. Returns this span. func (c *mcache) refill(sizeclass int32) *mspan { _g_ := getg() diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index 8a2fbe98c9..4f0b86c228 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -146,7 +146,7 @@ func (c *mcentral) uncacheSpan(s *mspan) { // Free n objects from a span s back into the central free list c. // Called during sweep. -// Returns true if the span was returned to heap. Sets sweepgen to +// Returns true if the span was returned to heap. Sets sweepgen to // the latest generation. // If preserve=true, don't return the span to heap nor relink in MCentral lists; // caller takes care of it. @@ -179,7 +179,7 @@ func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, p c.nonempty.insert(s) } - // delay updating sweepgen until here. This is the signal that + // delay updating sweepgen until here. This is the signal that // the span may be used in an MCache, so it must come after the // linked list operations above (actually, just after the // lock of c above.) diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index 6c24137f3c..bf4f24426c 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -44,7 +44,7 @@ func sysFault(v unsafe.Pointer, n uintptr) { func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer { // On 64-bit, people with ulimit -v set complain if we reserve too - // much address space. Instead, assume that the reservation is okay + // much address space. Instead, assume that the reservation is okay // and check the assumption in SysMap. if sys.PtrSize == 8 && uint64(n) > 1<<32 || sys.GoosNacl != 0 { *reserved = false diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 85a7f80efc..1ee13bd7e6 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -172,7 +172,7 @@ func sysFault(v unsafe.Pointer, n uintptr) { func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer { // On 64-bit, people with ulimit -v set complain if we reserve too - // much address space. Instead, assume that the reservation is okay + // much address space. Instead, assume that the reservation is okay // if we can reserve at least 64K and check the assumption in SysMap. // Only user-mode Linux (UML) rejects these requests. if sys.PtrSize == 8 && uint64(n) > 1<<32 { diff --git a/src/runtime/memclr_arm.s b/src/runtime/memclr_arm.s index 8b5fe31c51..c9b8586eae 100644 --- a/src/runtime/memclr_arm.s +++ b/src/runtime/memclr_arm.s @@ -1,7 +1,7 @@ // Inferno's libkern/memset-arm.s // http://code.google.com/p/inferno-os/source/browse/libkern/memset-arm.s // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // diff --git a/src/runtime/memmove_386.s b/src/runtime/memmove_386.s index f72a73ae4f..9a21e84136 100644 --- a/src/runtime/memmove_386.s +++ b/src/runtime/memmove_386.s @@ -1,7 +1,7 @@ // Inferno's libkern/memmove-386.s // http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // @@ -33,8 +33,8 @@ TEXT runtime·memmove(SB), NOSPLIT, $0-12 MOVL n+8(FP), BX // REP instructions have a high startup cost, so we handle small sizes - // with some straightline code. The REP MOVSL instruction is really fast - // for large sizes. The cutover is approximately 1K. We implement up to + // with some straightline code. The REP MOVSL instruction is really fast + // for large sizes. The cutover is approximately 1K. We implement up to // 128 because that is the maximum SSE register load (loading all data // into registers lets us ignore copy direction). tail: diff --git a/src/runtime/memmove_amd64.s b/src/runtime/memmove_amd64.s index e14614d631..ae95b155be 100644 --- a/src/runtime/memmove_amd64.s +++ b/src/runtime/memmove_amd64.s @@ -1,7 +1,7 @@ // Derived from Inferno's libkern/memmove-386.s (adapted for amd64) // http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // @@ -35,8 +35,8 @@ TEXT runtime·memmove(SB), NOSPLIT, $0-24 MOVQ n+16(FP), BX // REP instructions have a high startup cost, so we handle small sizes - // with some straightline code. The REP MOVSQ instruction is really fast - // for large sizes. The cutover is approximately 2K. + // with some straightline code. The REP MOVSQ instruction is really fast + // for large sizes. The cutover is approximately 2K. tail: // move_129through256 or smaller work whether or not the source and the // destination memory regions overlap because they load all data into diff --git a/src/runtime/memmove_arm.s b/src/runtime/memmove_arm.s index 35f04a84bc..6b880d5e6d 100644 --- a/src/runtime/memmove_arm.s +++ b/src/runtime/memmove_arm.s @@ -1,7 +1,7 @@ // Inferno's libkern/memmove-arm.s // http://code.google.com/p/inferno-os/source/browse/libkern/memmove-arm.s // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // diff --git a/src/runtime/memmove_nacl_amd64p32.s b/src/runtime/memmove_nacl_amd64p32.s index 64b732e777..13907a90b2 100644 --- a/src/runtime/memmove_nacl_amd64p32.s +++ b/src/runtime/memmove_nacl_amd64p32.s @@ -47,6 +47,6 @@ back: CLD // Note: we copy only 4 bytes at a time so that the tail is at most - // 3 bytes. That guarantees that we aren't copying pointers with MOVSB. + // 3 bytes. That guarantees that we aren't copying pointers with MOVSB. // See issue 13160. RET diff --git a/src/runtime/memmove_plan9_386.s b/src/runtime/memmove_plan9_386.s index 3b492eb6cd..c4d62ec946 100644 --- a/src/runtime/memmove_plan9_386.s +++ b/src/runtime/memmove_plan9_386.s @@ -1,7 +1,7 @@ // Inferno's libkern/memmove-386.s // http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // @@ -31,8 +31,8 @@ TEXT runtime·memmove(SB), NOSPLIT, $0-12 MOVL n+8(FP), BX // REP instructions have a high startup cost, so we handle small sizes - // with some straightline code. The REP MOVSL instruction is really fast - // for large sizes. The cutover is approximately 1K. + // with some straightline code. The REP MOVSL instruction is really fast + // for large sizes. The cutover is approximately 1K. tail: TESTL BX, BX JEQ move_0 diff --git a/src/runtime/memmove_plan9_amd64.s b/src/runtime/memmove_plan9_amd64.s index a1cc25567b..9bef31d43e 100644 --- a/src/runtime/memmove_plan9_amd64.s +++ b/src/runtime/memmove_plan9_amd64.s @@ -1,7 +1,7 @@ // Derived from Inferno's libkern/memmove-386.s (adapted for amd64) // http://code.google.com/p/inferno-os/source/browse/libkern/memmove-386.s // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // @@ -33,8 +33,8 @@ TEXT runtime·memmove(SB), NOSPLIT, $0-24 MOVQ n+16(FP), BX // REP instructions have a high startup cost, so we handle small sizes - // with some straightline code. The REP MOVSQ instruction is really fast - // for large sizes. The cutover is approximately 1K. + // with some straightline code. The REP MOVSQ instruction is really fast + // for large sizes. The cutover is approximately 1K. tail: TESTQ BX, BX JEQ move_0 diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index 9418e147bc..6142c2d532 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -219,8 +219,8 @@ func runfinq() { // SetFinalizer sets the finalizer associated with x to f. // When the garbage collector finds an unreachable block // with an associated finalizer, it clears the association and runs -// f(x) in a separate goroutine. This makes x reachable again, but -// now without an associated finalizer. Assuming that SetFinalizer +// f(x) in a separate goroutine. This makes x reachable again, but +// now without an associated finalizer. Assuming that SetFinalizer // is not called again, the next time the garbage collector sees // that x is unreachable, it will free x. // @@ -374,8 +374,8 @@ okarg: }) } -// Look up pointer v in heap. Return the span containing the object, -// the start of the object, and the size of the object. If the object +// Look up pointer v in heap. Return the span containing the object, +// the start of the object, and the size of the object. If the object // does not exist, return nil, nil, 0. func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) { c := gomcache() diff --git a/src/runtime/mfixalloc.go b/src/runtime/mfixalloc.go index 569a304cf4..c4ab6487a8 100644 --- a/src/runtime/mfixalloc.go +++ b/src/runtime/mfixalloc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Fixed-size object allocator. Returned memory is not zeroed. +// Fixed-size object allocator. Returned memory is not zeroed. // // See malloc.go for overview. diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 138a623ca5..52936cec2c 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -1165,8 +1165,8 @@ func gcMarkTermination() { casgstatus(gp, _Grunning, _Gwaiting) gp.waitreason = "garbage collection" - // Run gc on the g0 stack. We do this so that the g stack - // we're currently running on will no longer change. Cuts + // Run gc on the g0 stack. We do this so that the g stack + // we're currently running on will no longer change. Cuts // the root set down a bit (g0 stacks are not scanned, and // we don't need to scan gc's internal state). We also // need to switch to g0 so we can shrink the stack. diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 0be908bfe8..241fbc8169 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -704,7 +704,7 @@ func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) { pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache) if pcdata == -1 { // We do not have a valid pcdata value but there might be a - // stackmap for this function. It is likely that we are looking + // stackmap for this function. It is likely that we are looking // at the function prologue, assume so and hope for the best. pcdata = 0 } diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go index 7bc4278195..63a3ade3a6 100644 --- a/src/runtime/mgcwork.go +++ b/src/runtime/mgcwork.go @@ -17,11 +17,11 @@ const ( // Garbage collector work pool abstraction. // // This implements a producer/consumer model for pointers to grey -// objects. A grey object is one that is marked and on a work -// queue. A black object is marked and not on a work queue. +// objects. A grey object is one that is marked and on a work +// queue. A black object is marked and not on a work queue. // // Write barriers, root discovery, stack scanning, and object scanning -// produce pointers to grey objects. Scanning consumes pointers to +// produce pointers to grey objects. Scanning consumes pointers to // grey objects, thus blackening them, and then scans them, // potentially producing new pointers to grey objects. diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index e8189547f8..06a7d88143 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -161,7 +161,7 @@ var h_allspans []*mspan // TODO: make this h.allspans once mheap can be defined // h_spans is a lookup table to map virtual address page IDs to *mspan. // For allocated spans, their pages map to the span itself. -// For free spans, only the lowest and highest pages map to the span itself. Internal +// For free spans, only the lowest and highest pages map to the span itself. Internal // pages map to an arbitrary span. // For pages that have never been allocated, h_spans entries are nil. var h_spans []*mspan // TODO: make this h.spans once mheap can be defined in Go @@ -689,7 +689,7 @@ func (h *mheap) lookup(v unsafe.Pointer) *mspan { // Address is *not* guaranteed to be in map // and may be anywhere in the span. // Map entries for the middle of a span are only -// valid for allocated spans. Free spans may have +// valid for allocated spans. Free spans may have // other garbage in their middles, so we have to // check for that. func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan { @@ -971,7 +971,7 @@ type special struct { } // Adds the special record s to the list of special records for -// the object p. All fields of s should be filled in except for +// the object p. All fields of s should be filled in except for // offset & next, which this routine will fill in. // Returns true if the special was successfully added, false otherwise. // (The add will fail only if a record with the same p and s->kind @@ -1069,7 +1069,7 @@ type specialfinalizer struct { ot *ptrtype } -// Adds a finalizer to the object p. Returns true if it succeeded. +// Adds a finalizer to the object p. Returns true if it succeeded. func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { lock(&mheap_.speciallock) s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) @@ -1138,7 +1138,7 @@ func setprofilebucket(p unsafe.Pointer, b *bucket) { } } -// Do whatever cleanup needs to be done to deallocate s. It has +// Do whatever cleanup needs to be done to deallocate s. It has // already been unlinked from the MSpan specials list. func freespecial(s *special, p unsafe.Pointer, size uintptr) { switch s.kind { diff --git a/src/runtime/mmap.go b/src/runtime/mmap.go index bcad065ea9..6363a90242 100644 --- a/src/runtime/mmap.go +++ b/src/runtime/mmap.go @@ -12,5 +12,5 @@ package runtime import "unsafe" -// mmap calls the mmap system call. It is implemented in assembly. +// mmap calls the mmap system call. It is implemented in assembly. func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index d498a9328a..7be3ee9bf9 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -262,7 +262,7 @@ func mProf_Free(b *bucket, size uintptr) { var blockprofilerate uint64 // in CPU ticks // SetBlockProfileRate controls the fraction of goroutine blocking events -// that are reported in the blocking profile. The profiler aims to sample +// that are reported in the blocking profile. The profiler aims to sample // an average of one blocking event per rate nanoseconds spent blocked. // // To include every blocking event in the profile, pass rate = 1. @@ -335,7 +335,7 @@ func (r *StackRecord) Stack() []uintptr { // // The tools that process the memory profiles assume that the // profile rate is constant across the lifetime of the program -// and equal to the current value. Programs that change the +// and equal to the current value. Programs that change the // memory profiling rate should do so just once, as early as // possible in the execution of the program (for example, // at the beginning of main). diff --git a/src/runtime/msan.go b/src/runtime/msan.go index 7eeeb30e6a..7177c8e611 100644 --- a/src/runtime/msan.go +++ b/src/runtime/msan.go @@ -24,10 +24,10 @@ func MSanWrite(addr unsafe.Pointer, len int) { const msanenabled = true // If we are running on the system stack, the C program may have -// marked part of that stack as uninitialized. We don't instrument +// marked part of that stack as uninitialized. We don't instrument // the runtime, but operations like a slice copy can call msanread -// anyhow for values on the stack. Just ignore msanread when running -// on the system stack. The other msan functions are fine. +// anyhow for values on the stack. Just ignore msanread when running +// on the system stack. The other msan functions are fine. func msanread(addr unsafe.Pointer, sz uintptr) { g := getg() if g == g.m.g0 || g == g.m.gsignal { diff --git a/src/runtime/msize.go b/src/runtime/msize.go index bc735beb42..21fe2f4c61 100644 --- a/src/runtime/msize.go +++ b/src/runtime/msize.go @@ -13,7 +13,7 @@ // and chopped up when new objects of the size class are needed. // That page count is chosen so that chopping up the run of // pages into objects of the given size wastes at most 12.5% (1.125x) -// of the memory. It is not necessary that the cutoff here be +// of the memory. It is not necessary that the cutoff here be // the same as above. // // The two sources of waste multiply, so the worst possible case @@ -27,7 +27,7 @@ package runtime -// Size classes. Computed and initialized by InitSizes. +// Size classes. Computed and initialized by InitSizes. // // SizeToClass(0 <= n <= MaxSmallSize) returns the size class, // 1 <= sizeclass < NumSizeClasses, for n. diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index 0f821b7e10..1d9b41ed20 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -371,7 +371,7 @@ func purgecachedstats(c *mcache) { } } -// Atomically increases a given *system* memory stat. We are counting on this +// Atomically increases a given *system* memory stat. We are counting on this // stat never overflowing a uintptr, so this function must only be used for // system memory stats. // @@ -395,7 +395,7 @@ func mSysStatInc(sysStat *uint64, n uintptr) { } } -// Atomically decreases a given *system* memory stat. Same comments as +// Atomically decreases a given *system* memory stat. Same comments as // mSysStatInc apply. //go:nosplit func mSysStatDec(sysStat *uint64, n uintptr) { diff --git a/src/runtime/netpoll_kqueue.go b/src/runtime/netpoll_kqueue.go index 36956bae71..337377a95b 100644 --- a/src/runtime/netpoll_kqueue.go +++ b/src/runtime/netpoll_kqueue.go @@ -31,7 +31,7 @@ func netpollinit() { func netpollopen(fd uintptr, pd *pollDesc) int32 { // Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR) - // for the whole fd lifetime. The notifications are automatically unregistered + // for the whole fd lifetime. The notifications are automatically unregistered // when fd is closed. var ev [2]keventt *(*uintptr)(unsafe.Pointer(&ev[0].ident)) = fd diff --git a/src/runtime/os1_darwin.go b/src/runtime/os1_darwin.go index 19bb0f16e0..01dc90f97c 100644 --- a/src/runtime/os1_darwin.go +++ b/src/runtime/os1_darwin.go @@ -66,7 +66,7 @@ func goenvs() { goenvs_unix() // Register our thread-creation callback (see sys_darwin_{amd64,386}.s) - // but only if we're not using cgo. If we are using cgo we need + // but only if we're not using cgo. If we are using cgo we need // to let the C pthread library install its own thread-creation callback. if !iscgo { if bsdthread_register() != 0 { @@ -290,7 +290,7 @@ func machcall(h *machheader, maxsize int32, rxsize int32) int32 { // Look for a response giving the return value. // Any call can send this back with an error, // and some calls only have return values so they - // send it back on success too. I don't quite see how + // send it back on success too. I don't quite see how // you know it's one of these and not the full response // format, so just look if the message is right. c := (*codemsg)(unsafe.Pointer(h)) diff --git a/src/runtime/os1_dragonfly.go b/src/runtime/os1_dragonfly.go index 7e4f84e6a3..d7044ae4b0 100644 --- a/src/runtime/os1_dragonfly.go +++ b/src/runtime/os1_dragonfly.go @@ -143,9 +143,9 @@ func minit() { // Initialize signal handling. // On DragonFly a thread created by pthread_create inherits - // the signal stack of the creating thread. We always create + // the signal stack of the creating thread. We always create // a new signal stack here, to avoid having two Go threads - // using the same signal stack. This breaks the case of a + // using the same signal stack. This breaks the case of a // thread created in C that calls sigaltstack and then calls a // Go function, because we will lose track of the C code's // sigaltstack, but it's the best we can do. @@ -191,7 +191,7 @@ func memlimit() uintptr { return 0; // If there's not at least 16 MB left, we're probably - // not going to be able to do much. Treat as no limit. + // not going to be able to do much. Treat as no limit. rl.rlim_cur -= used; if(rl.rlim_cur < (16<<20)) return 0; diff --git a/src/runtime/os1_freebsd.go b/src/runtime/os1_freebsd.go index f00fdf4389..0dafe02325 100644 --- a/src/runtime/os1_freebsd.go +++ b/src/runtime/os1_freebsd.go @@ -201,7 +201,7 @@ func memlimit() uintptr { return 0; // If there's not at least 16 MB left, we're probably - // not going to be able to do much. Treat as no limit. + // not going to be able to do much. Treat as no limit. rl.rlim_cur -= used; if(rl.rlim_cur < (16<<20)) return 0; diff --git a/src/runtime/os1_linux.go b/src/runtime/os1_linux.go index 2d53b934f5..1c1ead8790 100644 --- a/src/runtime/os1_linux.go +++ b/src/runtime/os1_linux.go @@ -33,7 +33,7 @@ func futexsleep(addr *uint32, val uint32, ns int64) { // Some Linux kernels have a bug where futex of // FUTEX_WAIT returns an internal error code - // as an errno. Libpthread ignores the return value + // as an errno. Libpthread ignores the return value // here, and so can we: as it says a few lines up, // spurious wakeups are allowed. if ns < 0 { @@ -138,7 +138,7 @@ func newosproc(mp *m, stk unsafe.Pointer) { } // Disable signals during clone, so that the new thread starts - // with signals disabled. It will enable them in minit. + // with signals disabled. It will enable them in minit. var oset sigset rtsigprocmask(_SIG_SETMASK, &sigset_all, &oset, int32(unsafe.Sizeof(oset))) ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(funcPC(mstart))) @@ -288,7 +288,7 @@ func memlimit() uintptr { return 0; // If there's not at least 16 MB left, we're probably - // not going to be able to do much. Treat as no limit. + // not going to be able to do much. Treat as no limit. rl.rlim_cur -= used; if(rl.rlim_cur < (16<<20)) return 0; diff --git a/src/runtime/os1_nacl.go b/src/runtime/os1_nacl.go index 4cafaf2ebc..6fc2819cdd 100644 --- a/src/runtime/os1_nacl.go +++ b/src/runtime/os1_nacl.go @@ -172,7 +172,7 @@ func memlimit() uintptr { return 0 } -// This runs on a foreign stack, without an m or a g. No stack split. +// This runs on a foreign stack, without an m or a g. No stack split. //go:nosplit func badsignal2() { write(2, unsafe.Pointer(&badsignal1[0]), int32(len(badsignal1))) diff --git a/src/runtime/os1_netbsd.go b/src/runtime/os1_netbsd.go index e32df9585c..3c3b64186d 100644 --- a/src/runtime/os1_netbsd.go +++ b/src/runtime/os1_netbsd.go @@ -114,9 +114,9 @@ func newosproc(mp *m, stk unsafe.Pointer) { } // netbsdMStart is the function call that starts executing a newly -// created thread. On NetBSD, a new thread inherits the signal stack -// of the creating thread. That confuses minit, so we remove that -// signal stack here before calling the regular mstart. It's a bit +// created thread. On NetBSD, a new thread inherits the signal stack +// of the creating thread. That confuses minit, so we remove that +// signal stack here before calling the regular mstart. It's a bit // baroque to remove a signal stack here only to add one in minit, but // it's a simple change that keeps NetBSD working like other OS's. // At this point all signals are blocked, so there is no race. @@ -175,9 +175,9 @@ func minit() { // Initialize signal handling. // On NetBSD a thread created by pthread_create inherits the - // signal stack of the creating thread. We always create a + // signal stack of the creating thread. We always create a // new signal stack here, to avoid having two Go threads using - // the same signal stack. This breaks the case of a thread + // the same signal stack. This breaks the case of a thread // created in C that calls sigaltstack and then calls a Go // function, because we will lose track of the C code's // sigaltstack, but it's the best we can do. diff --git a/src/runtime/os1_openbsd.go b/src/runtime/os1_openbsd.go index a6cefa2039..447dff8193 100644 --- a/src/runtime/os1_openbsd.go +++ b/src/runtime/os1_openbsd.go @@ -80,7 +80,7 @@ func semasleep(ns int64) int32 { // // From OpenBSD's __thrsleep(2) manual: // "The abort argument, if not NULL, points to an int that will - // be examined [...] immediately before blocking. If that int + // be examined [...] immediately before blocking. If that int // is non-zero then __thrsleep() will immediately return EINTR // without blocking." ret := thrsleep(uintptr(unsafe.Pointer(&_g_.m.waitsemacount)), _CLOCK_MONOTONIC, tsp, 0, &_g_.m.waitsemacount) diff --git a/src/runtime/os1_plan9.go b/src/runtime/os1_plan9.go index f35a39a418..c114b1db62 100644 --- a/src/runtime/os1_plan9.go +++ b/src/runtime/os1_plan9.go @@ -257,7 +257,7 @@ func memlimit() uint64 { var _badsignal = []byte("runtime: signal received on thread not created by Go.\n") -// This runs on a foreign stack, without an m or a g. No stack split. +// This runs on a foreign stack, without an m or a g. No stack split. //go:nosplit func badsignal2() { pwrite(2, unsafe.Pointer(&_badsignal[0]), int32(len(_badsignal)), -1) diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go index fdc817d3f7..349f4abbcf 100644 --- a/src/runtime/os3_solaris.go +++ b/src/runtime/os3_solaris.go @@ -159,7 +159,7 @@ func newosproc(mp *m, _ unsafe.Pointer) { } // Disable signals during create, so that the new thread starts - // with signals disabled. It will enable them in minit. + // with signals disabled. It will enable them in minit. sigprocmask(_SIG_SETMASK, &sigset_all, &oset) ret = pthread_create(&tid, &attr, funcPC(tstart_sysvicall), unsafe.Pointer(mp)) sigprocmask(_SIG_SETMASK, &oset, nil) @@ -266,7 +266,7 @@ func memlimit() uintptr { return 0; // If there's not at least 16 MB left, we're probably - // not going to be able to do much. Treat as no limit. + // not going to be able to do much. Treat as no limit. rl.rlim_cur -= used; if(rl.rlim_cur < (16<<20)) return 0; @@ -357,8 +357,8 @@ func semacreate(mp *m) { var sem *semt _g_ := getg() - // Call libc's malloc rather than malloc. This will - // allocate space on the C heap. We can't call malloc + // Call libc's malloc rather than malloc. This will + // allocate space on the C heap. We can't call malloc // here because it could cause a deadlock. _g_.m.libcall.fn = uintptr(unsafe.Pointer(&libc_malloc)) _g_.m.libcall.n = 1 diff --git a/src/runtime/os_nacl.go b/src/runtime/os_nacl.go index 402dfe9059..6f126b4770 100644 --- a/src/runtime/os_nacl.go +++ b/src/runtime/os_nacl.go @@ -63,7 +63,7 @@ func sigpanic() { func raiseproc(sig int32) { } -// Stubs so tests can link correctly. These should never be called. +// Stubs so tests can link correctly. These should never be called. func open(name *byte, mode, perm int32) int32 func closefd(fd int32) int32 func read(fd int32, p unsafe.Pointer, n int32) int32 diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index cab405e804..8bdf5a271f 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -17,7 +17,7 @@ func os_sigpipe() { throw("too many writes on closed pipe") } -// Stubs so tests can link correctly. These should never be called. +// Stubs so tests can link correctly. These should never be called. func open(name *byte, mode, perm int32) int32 { throw("unimplemented") return -1 diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 349e997395..10065c1803 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -62,10 +62,10 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn throw("defer on system stack") } - // the arguments of fn are in a perilous state. The stack map - // for deferproc does not describe them. So we can't let garbage + // the arguments of fn are in a perilous state. The stack map + // for deferproc does not describe them. So we can't let garbage // collection or stack copying trigger until we've copied them out - // to somewhere safe. The memmove below does that. + // to somewhere safe. The memmove below does that. // Until the copy completes, we can only call nosplit routines. sp := getcallersp(unsafe.Pointer(&siz)) argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) @@ -255,7 +255,7 @@ func freedeferfn() { // If there is a deferred function, this will call runtime·jmpdefer, // which will jump to the deferred function such that it appears // to have been called by the caller of deferreturn at the point -// just before deferreturn was called. The effect is that deferreturn +// just before deferreturn was called. The effect is that deferreturn // is called again and again until there are no more deferred functions. // Cannot split the stack because we reuse the caller's frame to // call the deferred function. @@ -291,8 +291,8 @@ func deferreturn(arg0 uintptr) { jmpdefer(fn, uintptr(unsafe.Pointer(&arg0))) } -// Goexit terminates the goroutine that calls it. No other goroutine is affected. -// Goexit runs all deferred calls before terminating the goroutine. Because Goexit +// Goexit terminates the goroutine that calls it. No other goroutine is affected. +// Goexit runs all deferred calls before terminating the goroutine. Because Goexit // is not panic, however, any recover calls in those deferred functions will return nil. // // Calling Goexit from the main goroutine terminates that goroutine @@ -348,7 +348,7 @@ func preprintpanics(p *_panic) { } } -// Print all currently active panics. Used when crashing. +// Print all currently active panics. Used when crashing. func printpanics(p *_panic) { if p.link != nil { printpanics(p.link) @@ -449,7 +449,7 @@ func gopanic(e interface{}) { d.fn = nil gp._defer = d.link - // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic + // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic //GC() pc := d.pc @@ -554,7 +554,7 @@ func throw(s string) { var paniclk mutex // Unwind the stack after a deferred function calls recover -// after a panic. Then arrange to continue running as though +// after a panic. Then arrange to continue running as though // the caller of the deferred function returned normally. func recovery(gp *g) { // Info about defer passed in G struct. diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go index f4a4a90c84..b702fd345d 100644 --- a/src/runtime/pprof/pprof.go +++ b/src/runtime/pprof/pprof.go @@ -31,7 +31,7 @@ import ( // // A Profile's methods can be called from multiple goroutines simultaneously. // -// Each Profile has a unique name. A few profiles are predefined: +// Each Profile has a unique name. A few profiles are predefined: // // goroutine - stack traces of all current goroutines // heap - a sampling of all heap allocations @@ -48,7 +48,7 @@ import ( // all known allocations. This exception helps mainly in programs running // without garbage collection enabled, usually for debugging purposes. // -// The CPU profile is not available as a Profile. It has a special API, +// The CPU profile is not available as a Profile. It has a special API, // the StartCPUProfile and StopCPUProfile functions, because it streams // output to a writer during profiling. // @@ -173,11 +173,11 @@ func (p *Profile) Count() int { // Add adds the current execution stack to the profile, associated with value. // Add stores value in an internal map, so value must be suitable for use as // a map key and will not be garbage collected until the corresponding -// call to Remove. Add panics if the profile already contains a stack for value. +// call to Remove. Add panics if the profile already contains a stack for value. // // The skip parameter has the same meaning as runtime.Caller's skip -// and controls where the stack trace begins. Passing skip=0 begins the -// trace in the function calling Add. For example, given this +// and controls where the stack trace begins. Passing skip=0 begins the +// trace in the function calling Add. For example, given this // execution stack: // // Add @@ -266,7 +266,7 @@ func (x stackProfile) Less(i, j int) bool { } // A countProfile is a set of stack traces to be printed as counts -// grouped by stack trace. There are multiple implementations: +// grouped by stack trace. There are multiple implementations: // all that matters is that we can find out how many traces there are // and obtain each trace in turn. type countProfile interface { @@ -500,7 +500,7 @@ func writeGoroutine(w io.Writer, debug int) error { func writeGoroutineStacks(w io.Writer) error { // We don't know how big the buffer needs to be to collect - // all the goroutines. Start with 1 MB and try a few times, doubling each time. + // all the goroutines. Start with 1 MB and try a few times, doubling each time. // Give up and use a truncated trace if 64 MB is not enough. buf := make([]byte, 1<<20) for i := 0; ; i++ { @@ -563,7 +563,7 @@ var cpu struct { // Go code built with -buildmode=c-archive or -buildmode=c-shared. // StartCPUProfile relies on the SIGPROF signal, but that signal will // be delivered to the main program's SIGPROF signal handler (if any) -// not to the one used by Go. To make it work, call os/signal.Notify +// not to the one used by Go. To make it work, call os/signal.Notify // for syscall.SIGPROF, but note that doing so may break any profiling // being done by the main program. func StartCPUProfile(w io.Writer) error { @@ -574,7 +574,7 @@ func StartCPUProfile(w io.Writer) error { // 100 Hz is a reasonable choice: it is frequent enough to // produce useful data, rare enough not to bog down the // system, and a nice round number to make it easy to - // convert sample counts to seconds. Instead of requiring + // convert sample counts to seconds. Instead of requiring // each client to specify the frequency, we hard code it. const hz = 100 diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 16237e98ec..4aa6b3600e 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -126,7 +126,7 @@ func main() { }) // Lock the main goroutine onto this, the main OS thread, - // during initialization. Most programs won't care, but a few + // during initialization. Most programs won't care, but a few // do require certain calls to be made by the main thread. // Those can arrange for main.main to run in the main thread // by calling runtime.LockOSThread during initialization @@ -237,7 +237,7 @@ func forcegchelper() { //go:nosplit -// Gosched yields the processor, allowing other goroutines to run. It does not +// Gosched yields the processor, allowing other goroutines to run. It does not // suspend the current goroutine, so execution resumes automatically. func Gosched() { mcall(gosched_m) @@ -424,7 +424,7 @@ func schedinit() { sched.maxmcount = 10000 - // Cache the framepointer experiment. This affects stack unwinding. + // Cache the framepointer experiment. This affects stack unwinding. framepointer_enabled = haveexperiment("framepointer") tracebackinit() @@ -454,7 +454,7 @@ func schedinit() { } if buildVersion == "" { - // Condition should never trigger. This code just serves + // Condition should never trigger. This code just serves // to ensure runtime·buildVersion is kept in the resulting binary. buildVersion = "unknown" } @@ -1014,7 +1014,7 @@ func startTheWorldWithSema() { // in the hope that it will be available next time. // It would have been even better to start it before the collection, // but doing so requires allocating memory, so it's tricky to - // coordinate. This lazy approach works out in practice: + // coordinate. This lazy approach works out in practice: // we don't mind if the first couple gc rounds don't have quite // the maximum number of procs. newm(mhelpgc, nil) @@ -1470,7 +1470,7 @@ func unlockextra(mp *m) { atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) } -// Create a new m. It will start off with a call to fn, or else the scheduler. +// Create a new m. It will start off with a call to fn, or else the scheduler. // fn needs to be static and not a heap allocated closure. // May run with m.p==nil, so write barriers are not allowed. //go:nowritebarrier @@ -2641,7 +2641,7 @@ func newproc(siz int32, fn *funcval) { // Create a new g running fn with narg bytes of arguments starting // at argp and returning nret bytes of results. callerpc is the -// address of the go statement that created this. The new g is put +// address of the go statement that created this. The new g is put // on the queue of g's waiting to run. func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { _g_ := getg() @@ -2792,7 +2792,7 @@ retry: _p_.gfree = gp.schedlink.ptr() _p_.gfreecnt-- if gp.stack.lo == 0 { - // Stack was deallocated in gfput. Allocate a new one. + // Stack was deallocated in gfput. Allocate a new one. systemstack(func() { gp.stack, gp.stkbar = stackalloc(_FixedStack) }) @@ -3128,7 +3128,7 @@ func setcpuprofilerate_m(hz int32) { _g_.m.locks-- } -// Change number of processors. The world is stopped, sched is locked. +// Change number of processors. The world is stopped, sched is locked. // gcworkbufs are not being modified by either the GC or // the write barrier code. // Returns list of Ps with local work, they need to be scheduled by the caller. @@ -3334,7 +3334,7 @@ func incidlelocked(v int32) { // The check is based on number of running M's, if 0 -> deadlock. func checkdead() { // For -buildmode=c-shared or -buildmode=c-archive it's OK if - // there are no running goroutines. The calling program is + // there are no running goroutines. The calling program is // assumed to be running. if islibrary || isarchive { return @@ -3579,7 +3579,7 @@ func retake(now int64) uint32 { } // Tell all goroutines that they have been preempted and they should stop. -// This function is purely best-effort. It can fail to inform a goroutine if a +// This function is purely best-effort. It can fail to inform a goroutine if a // processor just started running it. // No locks need to be held. // Returns true if preemption request was issued to at least one goroutine. @@ -3598,8 +3598,8 @@ func preemptall() bool { } // Tell the goroutine running on processor P to stop. -// This function is purely best-effort. It can incorrectly fail to inform the -// goroutine. It can send inform the wrong goroutine. Even if it informs the +// This function is purely best-effort. It can incorrectly fail to inform the +// goroutine. It can send inform the wrong goroutine. Even if it informs the // correct goroutine, that goroutine might ignore the request if it is // simultaneously executing newstack. // No lock needs to be held. diff --git a/src/runtime/rt0_linux_arm.s b/src/runtime/rt0_linux_arm.s index d28c15a43a..a4419b898e 100644 --- a/src/runtime/rt0_linux_arm.s +++ b/src/runtime/rt0_linux_arm.s @@ -13,7 +13,7 @@ TEXT _rt0_arm_linux(SB),NOSPLIT,$-4 // When building with -buildmode=c-shared, this symbol is called when the shared // library is loaded. TEXT _rt0_arm_linux_lib(SB),NOSPLIT,$32 - // Preserve callee-save registers. Raspberry Pi's dlopen(), for example, + // Preserve callee-save registers. Raspberry Pi's dlopen(), for example, // actually cares that R11 is preserved. MOVW R4, 12(R13) MOVW R5, 16(R13) diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 400ea296a9..0d539c829c 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -77,7 +77,7 @@ func goargs() { func goenvs_unix() { // TODO(austin): ppc64 in dynamic linking mode doesn't - // guarantee env[] will immediately follow argv. Might cause + // guarantee env[] will immediately follow argv. Might cause // problems. n := int32(0) for argv_index(argv, argc+1+n) != nil { diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 5f22afd863..6230b69e80 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -539,7 +539,7 @@ const ( _Structrnd = sys.RegSize ) -// startup_random_data holds random bytes initialized at startup. These come from +// startup_random_data holds random bytes initialized at startup. These come from // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). var startupRandomData []byte diff --git a/src/runtime/runtime_test.go b/src/runtime/runtime_test.go index f9d8f9dc9f..cd078c7eac 100644 --- a/src/runtime/runtime_test.go +++ b/src/runtime/runtime_test.go @@ -247,8 +247,8 @@ func TestBadOpen(t *testing.T) { if GOOS == "windows" || GOOS == "nacl" { t.Skip("skipping OS that doesn't have open/read/write/close") } - // make sure we get the correct error code if open fails. Same for - // read/write/close on the resulting -1 fd. See issue 10052. + // make sure we get the correct error code if open fails. Same for + // read/write/close on the resulting -1 fd. See issue 10052. nonfile := []byte("/notreallyafile") fd := Open(&nonfile[0], 0, 0) if fd != -1 { diff --git a/src/runtime/select.go b/src/runtime/select.go index b315dde6c6..4e87dba3e7 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -239,7 +239,7 @@ func selectgoImpl(sel *hselect) (uintptr, uint16) { // only 0 or 1 cases plus default into simpler constructs. // The only way we can end up with such small sel.ncase // values here is for a larger select in which most channels - // have been nilled out. The general code handles those + // have been nilled out. The general code handles those // cases correctly, and they are rare enough not to bother // optimizing (and needing to test). @@ -649,8 +649,8 @@ func (q *waitq) dequeueSudoG(sgp *sudog) { return } - // x==y==nil. Either sgp is the only element in the queue, - // or it has already been removed. Use q.first to disambiguate. + // x==y==nil. Either sgp is the only element in the queue, + // or it has already been removed. Use q.first to disambiguate. if q.first == sgp { q.first = nil q.last = nil diff --git a/src/runtime/signal1_unix.go b/src/runtime/signal1_unix.go index c848b129ce..8e4d425fde 100644 --- a/src/runtime/signal1_unix.go +++ b/src/runtime/signal1_unix.go @@ -212,11 +212,11 @@ func raisebadsignal(sig int32) { // Reset the signal handler and raise the signal. // We are currently running inside a signal handler, so the - // signal is blocked. We need to unblock it before raising the + // signal is blocked. We need to unblock it before raising the // signal, or the signal we raise will be ignored until we return - // from the signal handler. We know that the signal was unblocked + // from the signal handler. We know that the signal was unblocked // before entering the handler, or else we would not have received - // it. That means that we don't have to worry about blocking it + // it. That means that we don't have to worry about blocking it // again. unblocksig(sig) setsig(sig, handler, false) @@ -294,14 +294,14 @@ func ensureSigM() { // This is called when we receive a signal when there is no signal stack. // This can only happen if non-Go code calls sigaltstack to disable the -// signal stack. This is called via cgocallback to establish a stack. +// signal stack. This is called via cgocallback to establish a stack. func noSignalStack(sig uint32) { println("signal", sig, "received on thread with no signal stack") throw("non-Go code disabled sigaltstack") } // This is called if we receive a signal when there is a signal stack -// but we are not on it. This can only happen if non-Go code called +// but we are not on it. This can only happen if non-Go code called // sigaction without setting the SS_ONSTACK flag. func sigNotOnStack(sig uint32) { println("signal", sig, "received but handler not on signal stack") diff --git a/src/runtime/signal2_unix.go b/src/runtime/signal2_unix.go index e8c57b38ee..b137169940 100644 --- a/src/runtime/signal2_unix.go +++ b/src/runtime/signal2_unix.go @@ -12,7 +12,7 @@ import "unsafe" func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer) // Determines if the signal should be handled by Go and if not, forwards the -// signal to the handler that was installed before Go's. Returns whether the +// signal to the handler that was installed before Go's. Returns whether the // signal was forwarded. // This is called by the signal handler, and the world may be stopped. //go:nosplit @@ -54,7 +54,7 @@ func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool { if c.sigcode() == _SI_USER || flags&_SigPanic == 0 { return false } - // Determine if the signal occurred inside Go code. We test that: + // Determine if the signal occurred inside Go code. We test that: // (1) we were in a goroutine (i.e., m.curg != nil), and // (2) we weren't in CGO (i.e., m.curg.syscallsp == 0). g := getg() diff --git a/src/runtime/signal_386.go b/src/runtime/signal_386.go index 967f2b5299..f27cf9d8e2 100644 --- a/src/runtime/signal_386.go +++ b/src/runtime/signal_386.go @@ -84,7 +84,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { // Only push runtime.sigpanic if pc != 0. // If pc == 0, probably panicked because of a - // call to a nil func. Not pushing that onto sp will + // call to a nil func. Not pushing that onto sp will // make the trace look like a call to runtime.sigpanic instead. // (Otherwise the trace will end at runtime.sigpanic and we // won't get to see who faulted.) diff --git a/src/runtime/signal_amd64x.go b/src/runtime/signal_amd64x.go index e5d4389d89..7b51fcc481 100644 --- a/src/runtime/signal_amd64x.go +++ b/src/runtime/signal_amd64x.go @@ -116,7 +116,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { // Only push runtime.sigpanic if pc != 0. // If pc == 0, probably panicked because of a - // call to a nil func. Not pushing that onto sp will + // call to a nil func. Not pushing that onto sp will // make the trace look like a call to runtime.sigpanic instead. // (Otherwise the trace will end at runtime.sigpanic and we // won't get to see who faulted.) diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go index ab7183fe6b..6a53cf6452 100644 --- a/src/runtime/signal_windows.go +++ b/src/runtime/signal_windows.go @@ -82,7 +82,7 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 { // Only push runtime·sigpanic if r.ip() != 0. // If r.ip() == 0, probably panicked because of a - // call to a nil func. Not pushing that onto sp will + // call to a nil func. Not pushing that onto sp will // make the trace look like a call to runtime·sigpanic instead. // (Otherwise the trace will end at runtime·sigpanic and we // won't get to see who faulted.) diff --git a/src/runtime/sigqueue.go b/src/runtime/sigqueue.go index 546d3abb24..e86e6a5636 100644 --- a/src/runtime/sigqueue.go +++ b/src/runtime/sigqueue.go @@ -12,7 +12,7 @@ // sigsend is called by the signal handler to queue a new signal. // signal_recv is called by the Go program to receive a newly queued signal. // Synchronization between sigsend and signal_recv is based on the sig.state -// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending. +// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending. // sigReceiving means that signal_recv is blocked on sig.Note and there are no // new pending signals. // sigSending means that sig.mask *may* contain new pending signals, @@ -136,7 +136,7 @@ func signal_recv() uint32 { func signal_enable(s uint32) { if !sig.inuse { // The first call to signal_enable is for us - // to use for initialization. It does not pass + // to use for initialization. It does not pass // signal information in m. sig.inuse = true // enable reception of signals; cannot disable noteclear(&sig.note) @@ -177,7 +177,7 @@ func signal_ignored(s uint32) bool { return sig.ignored[s/32]&(1<<(s&31)) != 0 } -// This runs on a foreign stack, without an m or a g. No stack split. +// This runs on a foreign stack, without an m or a g. No stack split. //go:nosplit //go:norace //go:nowritebarrierrec diff --git a/src/runtime/sigqueue_plan9.go b/src/runtime/sigqueue_plan9.go index 89f96be2e9..575d26afb4 100644 --- a/src/runtime/sigqueue_plan9.go +++ b/src/runtime/sigqueue_plan9.go @@ -115,7 +115,7 @@ func signal_recv() string { func signal_enable(s uint32) { if !sig.inuse { // The first call to signal_enable is for us - // to use for initialization. It does not pass + // to use for initialization. It does not pass // signal information in m. sig.inuse = true // enable reception of signals; cannot disable noteclear(&sig.note) diff --git a/src/runtime/sqrt.go b/src/runtime/sqrt.go index 1b130e3b01..d71a498c0a 100644 --- a/src/runtime/sqrt.go +++ b/src/runtime/sqrt.go @@ -11,7 +11,7 @@ package runtime // The original C code and the long comment below are // from FreeBSD's /usr/src/lib/msun/src/e_sqrt.c and -// came with this notice. The go code is a simplified +// came with this notice. The go code is a simplified // version of the original C. // // ==================================================== diff --git a/src/runtime/stack.go b/src/runtime/stack.go index d2466de653..f7865144d7 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -93,7 +93,7 @@ const ( _StackGuard = 720*sys.StackGuardMultiplier + _StackSystem // After a stack split check the SP is allowed to be this - // many bytes below the stack guard. This saves an instruction + // many bytes below the stack guard. This saves an instruction // in the checking sequence for tiny frames. _StackSmall = 128 @@ -180,13 +180,13 @@ func stacklog2(n uintptr) int { return log2 } -// Allocates a stack from the free pool. Must be called with +// Allocates a stack from the free pool. Must be called with // stackpoolmu held. func stackpoolalloc(order uint8) gclinkptr { list := &stackpool[order] s := list.first if s == nil { - // no free stacks. Allocate another span worth. + // no free stacks. Allocate another span worth. s = mheap_.allocStack(_StackCacheSize >> _PageShift) if s == nil { throw("out of memory") @@ -217,7 +217,7 @@ func stackpoolalloc(order uint8) gclinkptr { return x } -// Adds stack x to the free pool. Must be called with stackpoolmu held. +// Adds stack x to the free pool. Must be called with stackpoolmu held. func stackpoolfree(x gclinkptr, order uint8) { s := mheap_.lookup(unsafe.Pointer(x)) if s.state != _MSpanStack { diff --git a/src/runtime/stack_test.go b/src/runtime/stack_test.go index 9702e1e913..a32b68b630 100644 --- a/src/runtime/stack_test.go +++ b/src/runtime/stack_test.go @@ -416,9 +416,9 @@ func TestStackAllOutput(t *testing.T) { } func TestStackPanic(t *testing.T) { - // Test that stack copying copies panics correctly. This is difficult + // Test that stack copying copies panics correctly. This is difficult // to test because it is very unlikely that the stack will be copied - // in the middle of gopanic. But it can happen. + // in the middle of gopanic. But it can happen. // To make this test effective, edit panic.go:gopanic and uncomment // the GC() call just before freedefer(d). defer func() { diff --git a/src/runtime/string_test.go b/src/runtime/string_test.go index 37b75c1a89..292d5595e3 100644 --- a/src/runtime/string_test.go +++ b/src/runtime/string_test.go @@ -237,7 +237,7 @@ func TestRangeStringCast(t *testing.T) { func TestString2Slice(t *testing.T) { // Make sure we don't return slices that expose // an unzeroed section of stack-allocated temp buf - // between len and cap. See issue 14232. + // between len and cap. See issue 14232. s := "foož" b := ([]byte)(s) if cap(b) != 5 { diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index afea41448f..f1df93f9e4 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -163,8 +163,8 @@ const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup tabl // Each bucket represents 4096 bytes of the text segment. // Each subbucket represents 256 bytes of the text segment. // To find a function given a pc, locate the bucket and subbucket for -// that pc. Add together the idx and subbucket value to obtain a -// function index. Then scan the functab array starting at that +// that pc. Add together the idx and subbucket value to obtain a +// function index. Then scan the functab array starting at that // index to find the target function. // This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead. type findfuncbucket struct { diff --git a/src/runtime/sys_darwin_386.s b/src/runtime/sys_darwin_386.s index ad3dca444a..9e45f8a002 100644 --- a/src/runtime/sys_darwin_386.s +++ b/src/runtime/sys_darwin_386.s @@ -377,7 +377,7 @@ TEXT runtime·bsdthread_start(SB),NOSPLIT,$0 POPL AX POPAL - // Now segment is established. Initialize m, g. + // Now segment is established. Initialize m, g. get_tls(BP) MOVL m_g0(DX), AX MOVL AX, g(BP) diff --git a/src/runtime/sys_dragonfly_amd64.s b/src/runtime/sys_dragonfly_amd64.s index 4e4d793c43..be964cb3ec 100644 --- a/src/runtime/sys_dragonfly_amd64.s +++ b/src/runtime/sys_dragonfly_amd64.s @@ -52,11 +52,11 @@ TEXT runtime·lwp_start(SB),NOSPLIT,$0 MOVQ DI, g(CX) // On DragonFly, a new thread inherits the signal stack of the - // creating thread. That confuses minit, so we remove that - // signal stack here before calling the regular mstart. It's + // creating thread. That confuses minit, so we remove that + // signal stack here before calling the regular mstart. It's // a bit baroque to remove a signal stack here only to add one // in minit, but it's a simple change that keeps DragonFly - // working like other OS's. At this point all signals are + // working like other OS's. At this point all signals are // blocked, so there is no race. SUBQ $8, SP MOVQ $0, 0(SP) diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s index 4a74196032..3c7b17f927 100644 --- a/src/runtime/sys_linux_386.s +++ b/src/runtime/sys_linux_386.s @@ -355,7 +355,7 @@ TEXT runtime·clone(SB),NOSPLIT,$0 POPL AX POPAL - // Now segment is established. Initialize m, g. + // Now segment is established. Initialize m, g. get_tls(AX) MOVL DX, g(AX) MOVL BX, g_m(DX) diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s index f407078176..7cab649238 100644 --- a/src/runtime/sys_linux_amd64.s +++ b/src/runtime/sys_linux_amd64.s @@ -357,7 +357,7 @@ nog: // Call fn CALL R12 - // It shouldn't return. If it does, exit that thread. + // It shouldn't return. If it does, exit that thread. MOVL $111, DI MOVL $60, AX SYSCALL diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s index 6a3b924330..50a551320a 100644 --- a/src/runtime/sys_linux_arm.s +++ b/src/runtime/sys_linux_arm.s @@ -313,7 +313,7 @@ nog: MOVW $16(R13), R13 BL (R0) - // It shouldn't return. If it does, exit that thread. + // It shouldn't return. If it does, exit that thread. SUB $16, R13 // restore the stack pointer to avoid memory corruption MOVW $0, R0 MOVW R0, 4(R13) diff --git a/src/runtime/sys_netbsd_386.s b/src/runtime/sys_netbsd_386.s index 3b3c1095f0..0322c36d1c 100644 --- a/src/runtime/sys_netbsd_386.s +++ b/src/runtime/sys_netbsd_386.s @@ -262,7 +262,7 @@ TEXT runtime·lwp_tramp(SB),NOSPLIT,$0 POPL AX POPAL - // Now segment is established. Initialize m, g. + // Now segment is established. Initialize m, g. get_tls(AX) MOVL DX, g(AX) MOVL BX, g_m(DX) diff --git a/src/runtime/sys_netbsd_amd64.s b/src/runtime/sys_netbsd_amd64.s index fb21f1155a..d6b5d35232 100644 --- a/src/runtime/sys_netbsd_amd64.s +++ b/src/runtime/sys_netbsd_amd64.s @@ -37,7 +37,7 @@ TEXT runtime·lwp_tramp(SB),NOSPLIT,$0 // Call fn CALL R12 - // It shouldn't return. If it does, exit. + // It shouldn't return. If it does, exit. MOVL $310, AX // sys__lwp_exit SYSCALL JMP -3(PC) // keep exiting diff --git a/src/runtime/sys_openbsd_386.s b/src/runtime/sys_openbsd_386.s index 769b2f9455..f80a85fb67 100644 --- a/src/runtime/sys_openbsd_386.s +++ b/src/runtime/sys_openbsd_386.s @@ -279,7 +279,7 @@ TEXT runtime·tfork(SB),NOSPLIT,$12 POPL AX POPAL - // Now segment is established. Initialize m, g. + // Now segment is established. Initialize m, g. get_tls(AX) MOVL DX, g(AX) MOVL BX, g_m(DX) diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s index ed368babc0..c9fb8322c5 100644 --- a/src/runtime/sys_openbsd_amd64.s +++ b/src/runtime/sys_openbsd_amd64.s @@ -50,7 +50,7 @@ TEXT runtime·tfork(SB),NOSPLIT,$32 // Call fn CALL R12 - // It shouldn't return. If it does, exit + // It shouldn't return. If it does, exit MOVQ $0, DI // arg 1 - notdead MOVL $302, AX // sys___threxit SYSCALL diff --git a/src/runtime/sys_x86.go b/src/runtime/sys_x86.go index 9729671725..f6e45cc2dc 100644 --- a/src/runtime/sys_x86.go +++ b/src/runtime/sys_x86.go @@ -40,13 +40,13 @@ func rewindmorestack(buf *gobuf) { return } if pc[0] == 0xcc { - // This is a breakpoint inserted by gdb. We could use - // runtime·findfunc to find the function. But if we + // This is a breakpoint inserted by gdb. We could use + // runtime·findfunc to find the function. But if we // do that, then we will continue execution at the // function entry point, and we will not hit the gdb - // breakpoint. So for this case we don't change + // breakpoint. So for this case we don't change // buf.pc, so that when we return we will execute - // the jump instruction and carry on. This means that + // the jump instruction and carry on. This means that // stack unwinding may not work entirely correctly // (https://golang.org/issue/5723) but the user is // running under gdb anyhow. diff --git a/src/runtime/textflag.h b/src/runtime/textflag.h index 4739781530..e11c5dc3a2 100644 --- a/src/runtime/textflag.h +++ b/src/runtime/textflag.h @@ -3,12 +3,12 @@ // license that can be found in the LICENSE file. // This file defines flags attached to various functions -// and data objects. The compilers, assemblers, and linker must +// and data objects. The compilers, assemblers, and linker must // all agree on these values. -// Don't profile the marked routine. This flag is deprecated. +// Don't profile the marked routine. This flag is deprecated. #define NOPROF 1 -// It is ok for the linker to get multiple of these symbols. It will +// It is ok for the linker to get multiple of these symbols. It will // pick one of the duplicates to use. #define DUPOK 2 // Don't insert stack check preamble. diff --git a/src/runtime/time.go b/src/runtime/time.go index 3f8f6968c2..8df185dc8f 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -202,7 +202,7 @@ func timerproc() { goparkunlock(&timers.lock, "timer goroutine (idle)", traceEvGoBlock, 1) continue } - // At least one timer pending. Sleep until then. + // At least one timer pending. Sleep until then. timers.sleeping = true noteclear(&timers.waitnote) unlock(&timers.lock) diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 1d76947380..1717624c1c 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -115,7 +115,7 @@ func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v uns } } -// Generic traceback. Handles runtime stack prints (pcbuf == nil), +// Generic traceback. Handles runtime stack prints (pcbuf == nil), // the runtime.Callers function (pcbuf != nil), as well as the garbage // collector (callback != nil). A little clunky to merge these, but avoids // duplicating the code and all its subtlety. @@ -706,7 +706,7 @@ func tracebackothers(me *g) { goroutineheader(gp) // Note: gp.m == g.m occurs when tracebackothers is // called from a signal handler initiated during a - // systemstack call. The original G is still in the + // systemstack call. The original G is still in the // running state, and we want to print its stack. if gp.m != g.m && readgstatus(gp)&^_Gscan == _Grunning { print("\tgoroutine running on other thread; stack unavailable\n") diff --git a/src/runtime/vlop_386.s b/src/runtime/vlop_386.s index ce8e7d0643..92232d57bc 100644 --- a/src/runtime/vlop_386.s +++ b/src/runtime/vlop_386.s @@ -1,7 +1,7 @@ // Inferno's libkern/vlop-386.s // http://code.google.com/p/inferno-os/source/browse/libkern/vlop-386.s // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // diff --git a/src/runtime/vlop_arm.s b/src/runtime/vlop_arm.s index ae1f58254a..338d9d5d09 100644 --- a/src/runtime/vlop_arm.s +++ b/src/runtime/vlop_arm.s @@ -1,7 +1,7 @@ // Inferno's libkern/vlop-arm.s // http://code.google.com/p/inferno-os/source/browse/libkern/vlop-arm.s // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // diff --git a/src/runtime/vlrt.go b/src/runtime/vlrt.go index 6370732ca0..2419f78ce2 100644 --- a/src/runtime/vlrt.go +++ b/src/runtime/vlrt.go @@ -1,7 +1,7 @@ // Inferno's libkern/vlrt-arm.c // http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-arm.c // -// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. // Portions Copyright 2009 The Go Authors. All rights reserved. // |
