diff options
| author | Russ Cox <rsc@golang.org> | 2022-01-29 19:07:27 -0500 |
|---|---|---|
| committer | Russ Cox <rsc@golang.org> | 2022-04-01 18:18:01 +0000 |
| commit | 7d87ccc860dc31c0cd60faf00720e2f30fd37efb (patch) | |
| tree | 3dc01e7fa04a4683de0583774fab0e881bae7193 /src/runtime | |
| parent | df89f2ba53aab53356be197c581d142cefc2c6bc (diff) | |
| download | go-7d87ccc860dc31c0cd60faf00720e2f30fd37efb.tar.xz | |
all: fix various doc comment formatting nits
A run of lines that are indented with any number of spaces or tabs
format as a <pre> block. This commit fixes various doc comments
that format badly according to that (standard) rule.
For example, consider:
// - List item.
// Second line.
// - Another item.
Because the - lines are unindented, this is actually two paragraphs
separated by a one-line <pre> block. This CL rewrites it to:
// - List item.
// Second line.
// - Another item.
Today, that will format as a single <pre> block.
In a future release, we hope to format it as a bulleted list.
Various other minor fixes as well, all in preparation for reformatting.
For #51082.
Change-Id: I95cf06040d4186830e571cd50148be3bf8daf189
Reviewed-on: https://go-review.googlesource.com/c/go/+/384257
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Gopher Robot <gobot@golang.org>
Diffstat (limited to 'src/runtime')
| -rw-r--r-- | src/runtime/chan.go | 8 | ||||
| -rw-r--r-- | src/runtime/mheap.go | 18 | ||||
| -rw-r--r-- | src/runtime/netpoll.go | 20 | ||||
| -rw-r--r-- | src/runtime/proc.go | 10 | ||||
| -rw-r--r-- | src/runtime/runtime2.go | 6 | ||||
| -rw-r--r-- | src/runtime/string.go | 8 | ||||
| -rw-r--r-- | src/runtime/symtab_test.go | 16 | ||||
| -rw-r--r-- | src/runtime/type.go | 2 |
8 files changed, 44 insertions, 44 deletions
diff --git a/src/runtime/chan.go b/src/runtime/chan.go index 3cdb5dce11..16fec26aeb 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -594,10 +594,10 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) // recv processes a receive operation on a full channel c. // There are 2 parts: -// 1) The value sent by the sender sg is put into the channel -// and the sender is woken up to go on its merry way. -// 2) The value received by the receiver (the current G) is -// written to ep. +// 1) The value sent by the sender sg is put into the channel +// and the sender is woken up to go on its merry way. +// 2) The value received by the receiver (the current G) is +// written to ep. // For synchronous channels, both values are the same. // For asynchronous channels, the receiver gets its data from // the channel buffer and the sender's data is put in the diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index ecbd0a3a49..d2a63d0938 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -319,16 +319,16 @@ type arenaHint struct { // mSpanManual, or mSpanFree. Transitions between these states are // constrained as follows: // -// * A span may transition from free to in-use or manual during any GC -// phase. +// * A span may transition from free to in-use or manual during any GC +// phase. // -// * During sweeping (gcphase == _GCoff), a span may transition from -// in-use to free (as a result of sweeping) or manual to free (as a -// result of stacks being freed). +// * During sweeping (gcphase == _GCoff), a span may transition from +// in-use to free (as a result of sweeping) or manual to free (as a +// result of stacks being freed). // -// * During GC (gcphase != _GCoff), a span *must not* transition from -// manual or in-use to free. Because concurrent GC may read a pointer -// and then look up its span, the span state must be monotonic. +// * During GC (gcphase != _GCoff), a span *must not* transition from +// manual or in-use to free. Because concurrent GC may read a pointer +// and then look up its span, the span state must be monotonic. // // Setting mspan.state to mSpanInUse or mSpanManual must be done // atomically and only after all other span fields are valid. @@ -1706,7 +1706,7 @@ func spanHasNoSpecials(s *mspan) { // offset & next, which this routine will fill in. // Returns true if the special was successfully added, false otherwise. // (The add will fail only if a record with the same p and s->kind -// already exists.) +// already exists.) func addspecial(p unsafe.Pointer, s *special) bool { span := spanOfHeap(uintptr(p)) if span == nil { diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go index c6acc2328a..864148b715 100644 --- a/src/runtime/netpoll.go +++ b/src/runtime/netpoll.go @@ -47,16 +47,16 @@ const ( // pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer // goroutines respectively. The semaphore can be in the following states: -// pdReady - io readiness notification is pending; -// a goroutine consumes the notification by changing the state to nil. -// pdWait - a goroutine prepares to park on the semaphore, but not yet parked; -// the goroutine commits to park by changing the state to G pointer, -// or, alternatively, concurrent io notification changes the state to pdReady, -// or, alternatively, concurrent timeout/close changes the state to nil. -// G pointer - the goroutine is blocked on the semaphore; -// io notification or timeout/close changes the state to pdReady or nil respectively -// and unparks the goroutine. -// nil - none of the above. +// pdReady - io readiness notification is pending; +// a goroutine consumes the notification by changing the state to nil. +// pdWait - a goroutine prepares to park on the semaphore, but not yet parked; +// the goroutine commits to park by changing the state to G pointer, +// or, alternatively, concurrent io notification changes the state to pdReady, +// or, alternatively, concurrent timeout/close changes the state to nil. +// G pointer - the goroutine is blocked on the semaphore; +// io notification or timeout/close changes the state to pdReady or nil respectively +// and unparks the goroutine. +// nil - none of the above. const ( pdReady uintptr = 1 pdWait uintptr = 2 diff --git a/src/runtime/proc.go b/src/runtime/proc.go index df16e0f9b6..f9f82f3867 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -5575,11 +5575,11 @@ func (p pMask) clear(id int32) { // // Thus, we get the following effects on timer-stealing in findrunnable: // -// * Idle Ps with no timers when they go idle are never checked in findrunnable -// (for work- or timer-stealing; this is the ideal case). -// * Running Ps must always be checked. -// * Idle Ps whose timers are stolen must continue to be checked until they run -// again, even after timer expiration. +// * Idle Ps with no timers when they go idle are never checked in findrunnable +// (for work- or timer-stealing; this is the ideal case). +// * Running Ps must always be checked. +// * Idle Ps whose timers are stolen must continue to be checked until they run +// again, even after timer expiration. // // When the P starts running again, the mask should be set, as a timer may be // added at any time. diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 1fb9e195e5..dc18bf927e 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -291,10 +291,10 @@ func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } // Because we do free Ms, there are some additional constrains on // muintptrs: // -// 1. Never hold an muintptr locally across a safe point. +// 1. Never hold an muintptr locally across a safe point. // -// 2. Any muintptr in the heap must be owned by the M itself so it can -// ensure it is not in use when the last true *m is released. +// 2. Any muintptr in the heap must be owned by the M itself so it can +// ensure it is not in use when the last true *m is released. type muintptr uintptr //go:nosplit diff --git a/src/runtime/string.go b/src/runtime/string.go index 980a9866e6..eec29075b9 100644 --- a/src/runtime/string.go +++ b/src/runtime/string.go @@ -147,10 +147,10 @@ func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) { // and otherwise intrinsified by the compiler. // // Some internal compiler optimizations use this function. -// - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)] -// where k is []byte, T1 to Tn is a nesting of struct and array literals. -// - Used for "<"+string(b)+">" concatenation where b is []byte. -// - Used for string(b)=="foo" comparison where b is []byte. +// - Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)] +// where k is []byte, T1 to Tn is a nesting of struct and array literals. +// - Used for "<"+string(b)+">" concatenation where b is []byte. +// - Used for string(b)=="foo" comparison where b is []byte. func slicebytetostringtmp(ptr *byte, n int) (str string) { if raceenabled && n > 0 { racereadrangepc(unsafe.Pointer(ptr), diff --git a/src/runtime/symtab_test.go b/src/runtime/symtab_test.go index 99ff0d4420..a83afc3385 100644 --- a/src/runtime/symtab_test.go +++ b/src/runtime/symtab_test.go @@ -205,15 +205,15 @@ func tracebackFunc(t *testing.T) uintptr { // Go obviously doesn't easily expose the problematic PCs to running programs, // so this test is a bit fragile. Some details: // -// * tracebackFunc is our target function. We want to get a PC in the -// alignment region following this function. This function also has other -// functions inlined into it to ensure it has an InlTree (this was the source -// of the bug in issue 44971). +// * tracebackFunc is our target function. We want to get a PC in the +// alignment region following this function. This function also has other +// functions inlined into it to ensure it has an InlTree (this was the source +// of the bug in issue 44971). // -// * We acquire a PC in tracebackFunc, walking forwards until FuncForPC says -// we're in a new function. The last PC of the function according to FuncForPC -// should be in the alignment region (assuming the function isn't already -// perfectly aligned). +// * We acquire a PC in tracebackFunc, walking forwards until FuncForPC says +// we're in a new function. The last PC of the function according to FuncForPC +// should be in the alignment region (assuming the function isn't already +// perfectly aligned). // // This is a regression test for issue 44971. func TestFunctionAlignmentTraceback(t *testing.T) { diff --git a/src/runtime/type.go b/src/runtime/type.go index da47147897..a00394f3b3 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -17,7 +17,7 @@ import ( // cmd/compile/internal/reflectdata/reflect.go // cmd/link/internal/ld/decodesym.go // reflect/type.go -// internal/reflectlite/type.go +// internal/reflectlite/type.go type tflag uint8 const ( |
