diff options
| author | Ainar Garipov <gugl.zadolbal@gmail.com> | 2015-06-11 16:49:38 +0300 |
|---|---|---|
| committer | Brad Fitzpatrick <bradfitz@golang.org> | 2015-06-11 14:18:57 +0000 |
| commit | 7f9f70e5b65d116539b5c6ee586ea12988682a4f (patch) | |
| tree | e5790f78d97cd92a1642ceea499f4bdc963fa49d /src/runtime | |
| parent | 15c8ab00e88419b584eefd38ec6ddcdbea3e488c (diff) | |
| download | go-7f9f70e5b65d116539b5c6ee586ea12988682a4f.tar.xz | |
all: fix misprints in comments
These were found by grepping the comments from the go code and feeding
the output to aspell.
Change-Id: Id734d6c8d1938ec3c36bd94a4dbbad577e3ad395
Reviewed-on: https://go-review.googlesource.com/10941
Reviewed-by: Aamir Khan <syst3m.w0rm@gmail.com>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Diffstat (limited to 'src/runtime')
| -rw-r--r-- | src/runtime/lfstack_test.go | 2 | ||||
| -rw-r--r-- | src/runtime/mbitmap.go | 2 | ||||
| -rw-r--r-- | src/runtime/mgc.go | 2 | ||||
| -rw-r--r-- | src/runtime/mgcmark.go | 2 | ||||
| -rw-r--r-- | src/runtime/mgcwork.go | 4 | ||||
| -rw-r--r-- | src/runtime/mheap.go | 2 | ||||
| -rw-r--r-- | src/runtime/netpoll.go | 2 | ||||
| -rw-r--r-- | src/runtime/netpoll_solaris.go | 2 | ||||
| -rw-r--r-- | src/runtime/panic.go | 2 | ||||
| -rw-r--r-- | src/runtime/proc1.go | 6 | ||||
| -rw-r--r-- | src/runtime/race/testdata/select_test.go | 2 | ||||
| -rw-r--r-- | src/runtime/runtime2.go | 4 | ||||
| -rw-r--r-- | src/runtime/slice.go | 2 | ||||
| -rw-r--r-- | src/runtime/sqrt_test.go | 2 | ||||
| -rw-r--r-- | src/runtime/sys_nacl_386.s | 2 |
15 files changed, 19 insertions, 19 deletions
diff --git a/src/runtime/lfstack_test.go b/src/runtime/lfstack_test.go index 4da4d88619..fb4b45992d 100644 --- a/src/runtime/lfstack_test.go +++ b/src/runtime/lfstack_test.go @@ -30,7 +30,7 @@ func TestLFStack(t *testing.T) { stack := new(uint64) global = stack // force heap allocation - // Need to keep additional referenfces to nodes, the stack is not all that type-safe. + // Need to keep additional references to nodes, the stack is not all that type-safe. var nodes []*MyNode // Check the stack is initially empty. diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index a4090b9a08..a1d5d8fc81 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -355,7 +355,7 @@ func (h heapBits) setCheckmarked(size uintptr) { // Callers should call heapBitsBulkBarrier immediately after // calling memmove(p, src, size). This function is marked nosplit // to avoid being preempted; the GC must not stop the goroutine -// betwen the memmove and the execution of the barriers. +// between the memmove and the execution of the barriers. // // The heap bitmap is not maintained for allocations containing // no pointers at all; any caller of heapBitsBulkBarrier must first diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index c2ee16383e..53d6797c52 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -322,7 +322,7 @@ type gcControllerState struct { fractionalMarkTime int64 // idleMarkTime is the nanoseconds spent in idle marking - // during this cycle. This is udpated atomically throughout + // during this cycle. This is updated atomically throughout // the cycle. idleMarkTime int64 diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 2c076734bd..dead22a312 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -954,7 +954,7 @@ func gcmarknewobject_m(obj, size uintptr) { // stopped ensuring that any object encountered has their normal // mark bit set. To do this we use an orthogonal bit // pattern to indicate the object is marked. The following pattern -// uses the upper two bits in the object's bounday nibble. +// uses the upper two bits in the object's boundary nibble. // 01: scalar not marked // 10: pointer not marked // 11: pointer marked diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go index b7feb847b4..226c65635f 100644 --- a/src/runtime/mgcwork.go +++ b/src/runtime/mgcwork.go @@ -308,7 +308,7 @@ func putfull(b *workbuf, entry int) { // getpartialorempty tries to return a partially empty // and if none are available returns an empty one. -// entry is used to provide a brief histoy of ownership +// entry is used to provide a brief history of ownership // using entry + xxx00000 to // indicating that two line numbers in the call chain. //go:nowritebarrier @@ -328,7 +328,7 @@ func getpartialorempty(entry int) *workbuf { // putpartial puts empty buffers on the work.empty queue, // full buffers on the work.full queue and // others on the work.partial queue. -// entry is used to provide a brief histoy of ownership +// entry is used to provide a brief history of ownership // using entry + xxx00000 to // indicating that two call chain line numbers. //go:nowritebarrier diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 04fa050bc5..06d6fb15f7 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -58,7 +58,7 @@ type mheap struct { cachealloc fixalloc // allocator for mcache* specialfinalizeralloc fixalloc // allocator for specialfinalizer* specialprofilealloc fixalloc // allocator for specialprofile* - speciallock mutex // lock for sepcial record allocators. + speciallock mutex // lock for special record allocators. } var mheap_ mheap diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go index c06722fb98..7c6e3fa93e 100644 --- a/src/runtime/netpoll.go +++ b/src/runtime/netpoll.go @@ -46,7 +46,7 @@ type pollDesc struct { // in a lock-free way by all operations. // NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg), // that will blow up when GC starts moving objects. - lock mutex // protectes the following fields + lock mutex // protects the following fields fd uintptr closing bool seq uintptr // protects from stale timers and ready notifications diff --git a/src/runtime/netpoll_solaris.go b/src/runtime/netpoll_solaris.go index 359fd47b2a..e4652d8ebd 100644 --- a/src/runtime/netpoll_solaris.go +++ b/src/runtime/netpoll_solaris.go @@ -32,7 +32,7 @@ import "unsafe" // Beside calling runtimeĀ·netpollopen, the networking code paths // will call runtimeĀ·netpollarm each time goroutines are interested // in doing network I/O. Because now we know what kind of I/O we -// are interested in (reading/writting), we can call port_associate +// are interested in (reading/writing), we can call port_associate // passing the correct type of event set (POLLIN/POLLOUT). As we made // sure to have already associated the file descriptor with the port, // when we now call port_associate, we will unblock the main poller diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 47563f450e..c8158b9dec 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -411,7 +411,7 @@ func gopanic(e interface{}) { // Mark defer as started, but keep on list, so that traceback // can find and update the defer's argument frame if stack growth - // or a garbage collection hapens before reflectcall starts executing d.fn. + // or a garbage collection happens before reflectcall starts executing d.fn. d.started = true // Record the panic that is running the defer. diff --git a/src/runtime/proc1.go b/src/runtime/proc1.go index 0f93ff7620..c179c5aea7 100644 --- a/src/runtime/proc1.go +++ b/src/runtime/proc1.go @@ -1960,7 +1960,7 @@ func exitsyscall(dummy int32) { var exitTicks int64 if trace.enabled { - // Wait till traceGoSysBlock event is emited. + // Wait till traceGoSysBlock event is emitted. // This ensures consistency of the trace (the goroutine is started after it is blocked). for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { osyield() @@ -2047,7 +2047,7 @@ func exitsyscallfast() bool { ok = exitsyscallfast_pidle() if ok && trace.enabled { if oldp != nil { - // Wait till traceGoSysBlock event is emited. + // Wait till traceGoSysBlock event is emitted. // This ensures consistency of the trace (the goroutine is started after it is blocked). for oldp.syscalltick == _g_.m.syscalltick { osyield() @@ -2695,7 +2695,7 @@ func procresize(nprocs int32) *p { traceProcStop(p) } } - // move all runable goroutines to the global queue + // move all runnable goroutines to the global queue for p.runqhead != p.runqtail { // pop from tail of local queue p.runqtail-- diff --git a/src/runtime/race/testdata/select_test.go b/src/runtime/race/testdata/select_test.go index 4a3a236479..b4b1991155 100644 --- a/src/runtime/race/testdata/select_test.go +++ b/src/runtime/race/testdata/select_test.go @@ -236,7 +236,7 @@ func TestRaceSelect4(t *testing.T) { // there are two variables, access to one // of them is synchronized, access to the other // is not. -// Select must (unconditionaly) choose the non-synchronized variable +// Select must (unconditionally) choose the non-synchronized variable // thus causing exactly one race. // Currently this test doesn't look like it accomplishes // this goal. diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 81555af6af..55d153bc15 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -27,7 +27,7 @@ const ( // the following encode that the GC is scanning the stack and what to do when it is done _Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state, // _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs - _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue) + _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning completes make Grunnable (it is already on run queue) _Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack. _Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make it Gsyscall _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting @@ -237,7 +237,7 @@ type g struct { preempt bool // preemption signal, duplicates stackguard0 = stackpreempt paniconfault bool // panic (instead of crash) on unexpected fault address preemptscan bool // preempted g does scan for gc - gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle + gcworkdone bool // debug: cleared at beginning of gc work phase cycle, set by gcphasework, tested at end of cycle gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan throwsplit bool // must not split stack raceignore int8 // ignore race detection events diff --git a/src/runtime/slice.go b/src/runtime/slice.go index 79b611839d..15820a5181 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -83,7 +83,7 @@ func growslice(t *slicetype, old slice, n int) slice { memmove(p, old.array, lenmem) memclr(add(p, lenmem), capmem-lenmem) } else { - // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan unitialized memory. + // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. p = newarray(et, uintptr(newcap)) if !writeBarrierEnabled { memmove(p, old.array, lenmem) diff --git a/src/runtime/sqrt_test.go b/src/runtime/sqrt_test.go index f1a6e8369d..d5ccc7fb1d 100644 --- a/src/runtime/sqrt_test.go +++ b/src/runtime/sqrt_test.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // A copy of Sqrt tests from the math package to test the -// purely integer arithmetic implementaiton in sqrt.go. +// purely integer arithmetic implementation in sqrt.go. package runtime_test diff --git a/src/runtime/sys_nacl_386.s b/src/runtime/sys_nacl_386.s index f8e7196bd3..bf2d36ec85 100644 --- a/src/runtime/sys_nacl_386.s +++ b/src/runtime/sys_nacl_386.s @@ -322,7 +322,7 @@ ret: // Enable exceptions again. NACL_SYSCALL(SYS_exception_clear_flag) - // NaCl has abidcated its traditional operating system responsibility + // NaCl has abdicated its traditional operating system responsibility // and declined to implement 'sigreturn'. Instead the only way to return // to the execution of our program is to restore the registers ourselves. // Unfortunately, that is impossible to do with strict fidelity, because |
