aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile
diff options
context:
space:
mode:
authorBrad Fitzpatrick <bradfitz@golang.org>2016-03-01 23:21:55 +0000
committerBrad Fitzpatrick <bradfitz@golang.org>2016-03-02 00:13:47 +0000
commit5fea2ccc77eb50a9704fa04b7c61755fe34e1d95 (patch)
tree00137f90183ae2a01ca42249e04e9e4dabdf6249 /src/cmd/compile
parent8b4deb448e587802f67930b765c9598fc8cd36e5 (diff)
downloadgo-5fea2ccc77eb50a9704fa04b7c61755fe34e1d95.tar.xz
all: single space after period.
The tree's pretty inconsistent about single space vs double space after a period in documentation. Make it consistently a single space, per earlier decisions. This means contributors won't be confused by misleading precedence. This CL doesn't use go/doc to parse. It only addresses // comments. It was generated with: $ perl -i -npe 's,^(\s*// .+[a-z]\.) +([A-Z]),$1 $2,' $(git grep -l -E '^\s*//(.+\.) +([A-Z])') $ go test go/doc -update Change-Id: Iccdb99c37c797ef1f804a94b22ba5ee4b500c4f7 Reviewed-on: https://go-review.googlesource.com/20022 Reviewed-by: Rob Pike <r@golang.org> Reviewed-by: Dave Day <djd@golang.org> Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org>
Diffstat (limited to 'src/cmd/compile')
-rw-r--r--src/cmd/compile/internal/amd64/peep.go4
-rw-r--r--src/cmd/compile/internal/arm/cgen64.go2
-rw-r--r--src/cmd/compile/internal/arm64/cgen.go2
-rw-r--r--src/cmd/compile/internal/big/arith_test.go2
-rw-r--r--src/cmd/compile/internal/big/nat.go4
-rw-r--r--src/cmd/compile/internal/big/rat.go4
-rw-r--r--src/cmd/compile/internal/big/ratconv_test.go2
-rw-r--r--src/cmd/compile/internal/gc/alg.go2
-rw-r--r--src/cmd/compile/internal/gc/align.go4
-rw-r--r--src/cmd/compile/internal/gc/bimport.go2
-rw-r--r--src/cmd/compile/internal/gc/builtin/runtime.go2
-rw-r--r--src/cmd/compile/internal/gc/builtin/unsafe.go2
-rw-r--r--src/cmd/compile/internal/gc/cgen.go2
-rw-r--r--src/cmd/compile/internal/gc/dcl.go4
-rw-r--r--src/cmd/compile/internal/gc/esc.go22
-rw-r--r--src/cmd/compile/internal/gc/fmt.go4
-rw-r--r--src/cmd/compile/internal/gc/global_test.go6
-rw-r--r--src/cmd/compile/internal/gc/inl.go10
-rw-r--r--src/cmd/compile/internal/gc/order.go2
-rw-r--r--src/cmd/compile/internal/gc/parser.go2
-rw-r--r--src/cmd/compile/internal/gc/plive.go82
-rw-r--r--src/cmd/compile/internal/gc/reflect.go4
-rw-r--r--src/cmd/compile/internal/gc/ssa.go86
-rw-r--r--src/cmd/compile/internal/gc/subr.go8
-rw-r--r--src/cmd/compile/internal/gc/testdata/addressed_ssa.go2
-rw-r--r--src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go2
-rw-r--r--src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go2
-rw-r--r--src/cmd/compile/internal/gc/testdata/loadstore_ssa.go2
-rw-r--r--src/cmd/compile/internal/gc/testdata/phi_ssa.go2
-rw-r--r--src/cmd/compile/internal/gc/testdata/unsafe_ssa.go4
-rw-r--r--src/cmd/compile/internal/gc/type.go2
-rw-r--r--src/cmd/compile/internal/gc/typecheck.go10
-rw-r--r--src/cmd/compile/internal/gc/walk.go6
-rw-r--r--src/cmd/compile/internal/mips64/cgen.go2
-rw-r--r--src/cmd/compile/internal/mips64/peep.go6
-rw-r--r--src/cmd/compile/internal/ppc64/cgen.go2
-rw-r--r--src/cmd/compile/internal/ppc64/gsubr.go4
-rw-r--r--src/cmd/compile/internal/ppc64/opt.go2
-rw-r--r--src/cmd/compile/internal/ppc64/peep.go6
-rw-r--r--src/cmd/compile/internal/ppc64/prog.go4
-rw-r--r--src/cmd/compile/internal/ssa/block.go14
-rw-r--r--src/cmd/compile/internal/ssa/check.go2
-rw-r--r--src/cmd/compile/internal/ssa/compile.go6
-rw-r--r--src/cmd/compile/internal/ssa/config.go4
-rw-r--r--src/cmd/compile/internal/ssa/cse.go12
-rw-r--r--src/cmd/compile/internal/ssa/deadcode.go18
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go8
-rw-r--r--src/cmd/compile/internal/ssa/deadstore_test.go2
-rw-r--r--src/cmd/compile/internal/ssa/dom.go14
-rw-r--r--src/cmd/compile/internal/ssa/flagalloc.go8
-rw-r--r--src/cmd/compile/internal/ssa/func.go8
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules10
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go8
-rw-r--r--src/cmd/compile/internal/ssa/gen/generic.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go18
-rw-r--r--src/cmd/compile/internal/ssa/gen/main.go4
-rw-r--r--src/cmd/compile/internal/ssa/gen/rulegen.go20
-rwxr-xr-xsrc/cmd/compile/internal/ssa/likelyadjust.go6
-rw-r--r--src/cmd/compile/internal/ssa/magic.go2
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go2
-rw-r--r--src/cmd/compile/internal/ssa/op.go2
-rw-r--r--src/cmd/compile/internal/ssa/phielim.go8
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go90
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go2
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go20
-rw-r--r--src/cmd/compile/internal/ssa/sparsetree.go4
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go10
-rw-r--r--src/cmd/compile/internal/ssa/value.go12
-rw-r--r--src/cmd/compile/internal/x86/cgen64.go2
-rw-r--r--src/cmd/compile/internal/x86/gsubr.go2
-rw-r--r--src/cmd/compile/internal/x86/peep.go2
71 files changed, 322 insertions, 322 deletions
diff --git a/src/cmd/compile/internal/amd64/peep.go b/src/cmd/compile/internal/amd64/peep.go
index 810214504f..b24c92cf69 100644
--- a/src/cmd/compile/internal/amd64/peep.go
+++ b/src/cmd/compile/internal/amd64/peep.go
@@ -252,14 +252,14 @@ loop1:
// MOVLQZX removal.
// The MOVLQZX exists to avoid being confused for a
// MOVL that is just copying 32-bit data around during
- // copyprop. Now that copyprop is done, remov MOVLQZX R1, R2
+ // copyprop. Now that copyprop is done, remov MOVLQZX R1, R2
// if it is dominated by an earlier ADDL/MOVL/etc into R1 that
// will have already cleared the high bits.
//
// MOVSD removal.
// We never use packed registers, so a MOVSD between registers
// can be replaced by MOVAPD, which moves the pair of float64s
- // instead of just the lower one. We only use the lower one, but
+ // instead of just the lower one. We only use the lower one, but
// the processor can do better if we do moves using both.
for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
p = r.Prog
diff --git a/src/cmd/compile/internal/arm/cgen64.go b/src/cmd/compile/internal/arm/cgen64.go
index d46d5a8660..9cda561069 100644
--- a/src/cmd/compile/internal/arm/cgen64.go
+++ b/src/cmd/compile/internal/arm/cgen64.go
@@ -126,7 +126,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
var ah gc.Node
gc.Regalloc(&ah, hi1.Type, nil)
- // Do op. Leave result in ah:al.
+ // Do op. Leave result in ah:al.
switch n.Op {
default:
gc.Fatalf("cgen64: not implemented: %v\n", n)
diff --git a/src/cmd/compile/internal/arm64/cgen.go b/src/cmd/compile/internal/arm64/cgen.go
index a7f1c18b55..e8a5c14761 100644
--- a/src/cmd/compile/internal/arm64/cgen.go
+++ b/src/cmd/compile/internal/arm64/cgen.go
@@ -129,7 +129,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
// generate the offsets directly and eliminate the
- // ADDs. That will produce shorter, more
+ // ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for ; c > 0; c-- {
diff --git a/src/cmd/compile/internal/big/arith_test.go b/src/cmd/compile/internal/big/arith_test.go
index f46a494f17..ea8e82d0b6 100644
--- a/src/cmd/compile/internal/big/arith_test.go
+++ b/src/cmd/compile/internal/big/arith_test.go
@@ -442,7 +442,7 @@ func benchmarkBitLenN(b *testing.B, nbits uint) {
}
}
-// Individual bitLen tests. Numbers chosen to examine both sides
+// Individual bitLen tests. Numbers chosen to examine both sides
// of powers-of-two boundaries.
func BenchmarkBitLen0(b *testing.B) { benchmarkBitLenN(b, 0) }
func BenchmarkBitLen1(b *testing.B) { benchmarkBitLenN(b, 1) }
diff --git a/src/cmd/compile/internal/big/nat.go b/src/cmd/compile/internal/big/nat.go
index 79cf6e07f7..7668b6481b 100644
--- a/src/cmd/compile/internal/big/nat.go
+++ b/src/cmd/compile/internal/big/nat.go
@@ -647,7 +647,7 @@ func trailingZeroBits(x Word) uint {
// x & -x leaves only the right-most bit set in the word. Let k be the
// index of that bit. Since only a single bit is set, the value is two
// to the power of k. Multiplying by a power of two is equivalent to
- // left shifting, in this case by k bits. The de Bruijn constant is
+ // left shifting, in this case by k bits. The de Bruijn constant is
// such that all six bit, consecutive substrings are distinct.
// Therefore, if we have a left shifted version of this constant we can
// find by how many bits it was shifted by looking at which six bit
@@ -1018,7 +1018,7 @@ func (z nat) expNNWindowed(x, y, m nat) nat {
for j := 0; j < _W; j += n {
if i != len(y)-1 || j != 0 {
// Unrolled loop for significant performance
- // gain. Use go test -bench=".*" in crypto/rsa
+ // gain. Use go test -bench=".*" in crypto/rsa
// to check performance before making changes.
zz = zz.mul(z, z)
zz, z = z, zz
diff --git a/src/cmd/compile/internal/big/rat.go b/src/cmd/compile/internal/big/rat.go
index 2cd9ed0938..56ce33d882 100644
--- a/src/cmd/compile/internal/big/rat.go
+++ b/src/cmd/compile/internal/big/rat.go
@@ -63,7 +63,7 @@ func (z *Rat) SetFloat64(f float64) *Rat {
// quotToFloat32 returns the non-negative float32 value
// nearest to the quotient a/b, using round-to-even in
-// halfway cases. It does not mutate its arguments.
+// halfway cases. It does not mutate its arguments.
// Preconditions: b is non-zero; a and b have no common factors.
func quotToFloat32(a, b nat) (f float32, exact bool) {
const (
@@ -161,7 +161,7 @@ func quotToFloat32(a, b nat) (f float32, exact bool) {
// quotToFloat64 returns the non-negative float64 value
// nearest to the quotient a/b, using round-to-even in
-// halfway cases. It does not mutate its arguments.
+// halfway cases. It does not mutate its arguments.
// Preconditions: b is non-zero; a and b have no common factors.
func quotToFloat64(a, b nat) (f float64, exact bool) {
const (
diff --git a/src/cmd/compile/internal/big/ratconv_test.go b/src/cmd/compile/internal/big/ratconv_test.go
index da2fdab4ca..17bda47637 100644
--- a/src/cmd/compile/internal/big/ratconv_test.go
+++ b/src/cmd/compile/internal/big/ratconv_test.go
@@ -137,7 +137,7 @@ func TestFloatString(t *testing.T) {
}
}
-// Test inputs to Rat.SetString. The prefix "long:" causes the test
+// Test inputs to Rat.SetString. The prefix "long:" causes the test
// to be skipped in --test.short mode. (The threshold is about 500us.)
var float64inputs = []string{
// Constants plundered from strconv/testfp.txt.
diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go
index a0ff4890c1..36cd1198a0 100644
--- a/src/cmd/compile/internal/gc/alg.go
+++ b/src/cmd/compile/internal/gc/alg.go
@@ -406,7 +406,7 @@ func geneq(sym *Sym, t *Type) {
// An array of pure memory would be handled by the
// standard memequal, so the element type must not be
- // pure memory. Even if we unrolled the range loop,
+ // pure memory. Even if we unrolled the range loop,
// each iteration would be a function call, so don't bother
// unrolling.
nrange := Nod(ORANGE, nil, Nod(OIND, np, nil))
diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go
index 812a8cb150..f0122aff97 100644
--- a/src/cmd/compile/internal/gc/align.go
+++ b/src/cmd/compile/internal/gc/align.go
@@ -86,9 +86,9 @@ func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
}
// For nonzero-sized structs which end in a zero-sized thing, we add
- // an extra byte of padding to the type. This padding ensures that
+ // an extra byte of padding to the type. This padding ensures that
// taking the address of the zero-sized thing can't manufacture a
- // pointer to the next object in the heap. See issue 9401.
+ // pointer to the next object in the heap. See issue 9401.
if flag == 1 && o > starto && o == lastzero {
o++
}
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
index 5c2ffa6888..8ec6300245 100644
--- a/src/cmd/compile/internal/gc/bimport.go
+++ b/src/cmd/compile/internal/gc/bimport.go
@@ -248,7 +248,7 @@ func (p *importer) typ() *Type {
// (comment from go.y)
// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
- // out by typecheck's lookdot as this $$.ttype. So by providing
+ // out by typecheck's lookdot as this $$.ttype. So by providing
// this back link here we avoid special casing there.
n.Type.Nname = n
diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go
index 0fe6242e74..4286f361b8 100644
--- a/src/cmd/compile/internal/gc/builtin/runtime.go
+++ b/src/cmd/compile/internal/gc/builtin/runtime.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// NOTE: If you change this file you must run "go generate"
-// to update builtin.go. This is not done automatically
+// to update builtin.go. This is not done automatically
// to avoid depending on having a working compiler binary.
// +build ignore
diff --git a/src/cmd/compile/internal/gc/builtin/unsafe.go b/src/cmd/compile/internal/gc/builtin/unsafe.go
index a7fc8aa53e..6e25db65cc 100644
--- a/src/cmd/compile/internal/gc/builtin/unsafe.go
+++ b/src/cmd/compile/internal/gc/builtin/unsafe.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// NOTE: If you change this file you must run "go generate"
-// to update builtin.go. This is not done automatically
+// to update builtin.go. This is not done automatically
// to avoid depending on having a working compiler binary.
// +build ignore
diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go
index 74f61129c2..df30100b10 100644
--- a/src/cmd/compile/internal/gc/cgen.go
+++ b/src/cmd/compile/internal/gc/cgen.go
@@ -2296,7 +2296,7 @@ func sgen_wb(n *Node, ns *Node, w int64, wb bool) {
if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) || wb && osrc != -1000 {
// osrc and odst both on stack, and at least one is in
- // an unknown position. Could generate code to test
+ // an unknown position. Could generate code to test
// for forward/backward copy, but instead just copy
// to a temporary location first.
//
diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go
index 52ada12f86..f68cffb33e 100644
--- a/src/cmd/compile/internal/gc/dcl.go
+++ b/src/cmd/compile/internal/gc/dcl.go
@@ -167,7 +167,7 @@ func declare(n *Node, ctxt Class) {
n.Lineno = int32(parserline())
s := n.Sym
- // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
+ // kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
if importpkg == nil && !typecheckok && s.Pkg != localpkg {
Yyerror("cannot declare name %v", s)
}
@@ -1021,7 +1021,7 @@ func embedded(s *Sym, pkg *Pkg) *Node {
CenterDot = 0xB7
)
// Names sometimes have disambiguation junk
- // appended after a center dot. Discard it when
+ // appended after a center dot. Discard it when
// making the name for the embedded struct field.
name := s.Name
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index e26cbb372b..7ba377b200 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -15,7 +15,7 @@ import (
// or single non-recursive functions, bottom up.
//
// Finding these sets is finding strongly connected components
-// in the static call graph. The algorithm for doing that is taken
+// in the static call graph. The algorithm for doing that is taken
// from Sedgewick, Algorithms, Second Edition, p. 482, with two
// adaptations.
//
@@ -168,7 +168,7 @@ func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 {
//
// First escfunc, esc and escassign recurse over the ast of each
// function to dig out flow(dst,src) edges between any
-// pointer-containing nodes and store them in dst->escflowsrc. For
+// pointer-containing nodes and store them in dst->escflowsrc. For
// variables assigned to a variable in an outer scope or used as a
// return value, they store a flow(theSink, src) edge to a fake node
// 'the Sink'. For variables referenced in closures, an edge
@@ -180,7 +180,7 @@ func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 {
// parameters it can reach as leaking.
//
// If a value's address is taken but the address does not escape,
-// then the value can stay on the stack. If the value new(T) does
+// then the value can stay on the stack. If the value new(T) does
// not escape, then new(T) can be rewritten into a stack allocation.
// The same is true of slice literals.
//
@@ -340,7 +340,7 @@ func (e *EscState) track(n *Node) {
}
// Escape constants are numbered in order of increasing "escapiness"
-// to help make inferences be monotonic. With the exception of
+// to help make inferences be monotonic. With the exception of
// EscNever which is sticky, eX < eY means that eY is more exposed
// than eX, and hence replaces it in a conservative analysis.
const (
@@ -378,7 +378,7 @@ func escMax(e, etype uint16) uint16 {
}
// For each input parameter to a function, the escapeReturnEncoding describes
-// how the parameter may leak to the function's outputs. This is currently the
+// how the parameter may leak to the function's outputs. This is currently the
// "level" of the leak where level is 0 or larger (negative level means stored into
// something whose address is returned -- but that implies stored into the heap,
// hence EscHeap, which means that the details are not currently relevant. )
@@ -524,7 +524,7 @@ func escfunc(e *EscState, func_ *Node) {
// Mark labels that have no backjumps to them as not increasing e->loopdepth.
// Walk hasn't generated (goto|label)->left->sym->label yet, so we'll cheat
-// and set it to one of the following two. Then in esc we'll clear it again.
+// and set it to one of the following two. Then in esc we'll clear it again.
var looping Label
var nonlooping Label
@@ -1099,7 +1099,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
// Might be pointer arithmetic, in which case
// the operands flow into the result.
- // TODO(rsc): Decide what the story is here. This is unsettling.
+ // TODO(rsc): Decide what the story is here. This is unsettling.
case OADD,
OSUB,
OOR,
@@ -1128,7 +1128,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
// flow are 000, 001, 010, 011 and EEEE is computed Esc bits.
// Note width of xxx depends on value of constant
// bitsPerOutputInTag -- expect 2 or 3, so in practice the
-// tag cache array is 64 or 128 long. Some entries will
+// tag cache array is 64 or 128 long. Some entries will
// never be populated.
var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string
@@ -1290,7 +1290,7 @@ func (e *EscState) addDereference(n *Node) *Node {
if Istype(t, Tptr) {
// This should model our own sloppy use of OIND to encode
// decreasing levels of indirection; i.e., "indirecting" an array
- // might yield the type of an element. To be enhanced...
+ // might yield the type of an element. To be enhanced...
t = t.Type
}
ind.Type = t
@@ -1419,7 +1419,7 @@ func esccall(e *EscState, n *Node, up *Node) {
fmt.Printf("%v::esccall:: %v in recursive group\n", Ctxt.Line(int(lineno)), Nconv(n, obj.FmtShort))
}
- // function in same mutually recursive group. Incorporate into flow graph.
+ // function in same mutually recursive group. Incorporate into flow graph.
// print("esc local fn: %N\n", fn->ntype);
if fn.Name.Defn.Esc == EscFuncUnknown || nE.Escretval != nil {
Fatalf("graph inconsistency")
@@ -1469,7 +1469,7 @@ func esccall(e *EscState, n *Node, up *Node) {
return
}
- // Imported or completely analyzed function. Use the escape tags.
+ // Imported or completely analyzed function. Use the escape tags.
if nE.Escretval != nil {
Fatalf("esc already decorated call %v\n", Nconv(n, obj.FmtSign))
}
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index 8864b57f5d..cf9ffc1fd1 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -69,7 +69,7 @@ var fmtbody bool
// E.g. for %S: %+S %#S %-S print an identifier properly qualified for debug/export/internal mode.
//
// The mode flags +, - and # are sticky, meaning they persist through
-// recursions of %N, %T and %S, but not the h and l flags. The u flag is
+// recursions of %N, %T and %S, but not the h and l flags. The u flag is
// sticky only on %T recursions and only used in %-/Sym mode.
//
@@ -796,7 +796,7 @@ func stmtfmt(n *Node) string {
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
- // and inlining. If it doesn't fit the syntax, emit an enclosing
+ // and inlining. If it doesn't fit the syntax, emit an enclosing
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
diff --git a/src/cmd/compile/internal/gc/global_test.go b/src/cmd/compile/internal/gc/global_test.go
index bd1391d9ad..54d3ed1b7d 100644
--- a/src/cmd/compile/internal/gc/global_test.go
+++ b/src/cmd/compile/internal/gc/global_test.go
@@ -17,7 +17,7 @@ import (
)
// Make sure "hello world" does not link in all the
-// fmt.scanf routines. See issue 6853.
+// fmt.scanf routines. See issue 6853.
func TestScanfRemoval(t *testing.T) {
testenv.MustHaveGoBuild(t)
@@ -64,7 +64,7 @@ func main() {
}
}
-// Make sure -S prints assembly code. See issue 14515.
+// Make sure -S prints assembly code. See issue 14515.
func TestDashS(t *testing.T) {
testenv.MustHaveGoBuild(t)
@@ -99,7 +99,7 @@ func main() {
patterns := []string{
// It is hard to look for actual instructions in an
- // arch-independent way. So we'll just look for
+ // arch-independent way. So we'll just look for
// pseudo-ops that are arch-independent.
"\tTEXT\t",
"\tFUNCDATA\t",
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index 84065658ae..5b8a533666 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -43,7 +43,7 @@ var inlretlabel *Node // target of the goto substituted in place of a return
var inlretvars *NodeList // temp out variables
-// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
+// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
// the ->sym can be re-used in the local package, so peel it off the receiver's type.
func fnpkg(fn *Node) *Pkg {
if fn.Type.Thistuple != 0 {
@@ -63,7 +63,7 @@ func fnpkg(fn *Node) *Pkg {
return fn.Sym.Pkg
}
-// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
+// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
func typecheckinl(fn *Node) {
lno := int(setlineno(fn))
@@ -300,7 +300,7 @@ func inlcopyslice(ll []*Node) []*Node {
}
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
-// calls made to inlineable functions. This is the external entry point.
+// calls made to inlineable functions. This is the external entry point.
func inlcalls(fn *Node) {
savefn := Curfn
Curfn = fn
@@ -358,7 +358,7 @@ func inlnodeslice(l []*Node) {
}
// inlnode recurses over the tree to find inlineable calls, which will
-// be turned into OINLCALLs by mkinlcall. When the recursion comes
+// be turned into OINLCALLs by mkinlcall. When the recursion comes
// back up will examine left, right, list, rlist, ninit, ntest, nincr,
// nbody and nelse and use one of the 4 inlconv/glue functions above
// to turn the OINLCALL into an expression, a statement, or patch it
@@ -881,7 +881,7 @@ func inlvar(var_ *Node) *Node {
// This may no longer be necessary now that we run escape analysis
// after wrapper generation, but for 1.5 this is conservatively left
- // unchanged. See bugs 11053 and 9537.
+ // unchanged. See bugs 11053 and 9537.
if var_.Esc == EscHeap {
addrescapes(n)
}
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index 7f59e2cafc..88a19f9015 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -10,7 +10,7 @@ import (
)
// Rewrite tree to use separate statements to enforce
-// order of evaluation. Makes walk easier, because it
+// order of evaluation. Makes walk easier, because it
// can (after this runs) reorder at will within an expression.
//
// Rewrite x op= y into x = x op y.
diff --git a/src/cmd/compile/internal/gc/parser.go b/src/cmd/compile/internal/gc/parser.go
index 983ffa356f..a485fa181a 100644
--- a/src/cmd/compile/internal/gc/parser.go
+++ b/src/cmd/compile/internal/gc/parser.go
@@ -2011,7 +2011,7 @@ func (p *parser) hidden_fndcl() *Node {
// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
- // out by typecheck's lookdot as this $$.ttype. So by providing
+ // out by typecheck's lookdot as this $$.ttype. So by providing
// this back link here we avoid special casing there.
ss.Type.Nname = ss
return ss
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index 78872c1af2..384261b05e 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -29,7 +29,7 @@ const (
// An ordinary basic block.
//
-// Instructions are threaded together in a doubly-linked list. To iterate in
+// Instructions are threaded together in a doubly-linked list. To iterate in
// program order follow the link pointer from the first node and stop after the
// last node has been visited
//
@@ -122,7 +122,7 @@ func addedge(from *BasicBlock, to *BasicBlock) {
}
// Inserts prev before curr in the instruction
-// stream. Any control flow, such as branches or fall-throughs, that target the
+// stream. Any control flow, such as branches or fall-throughs, that target the
// existing instruction are adjusted to target the new instruction.
func splicebefore(lv *Liveness, bb *BasicBlock, prev *obj.Prog, curr *obj.Prog) {
// There may be other instructions pointing at curr,
@@ -181,9 +181,9 @@ func printblock(bb *BasicBlock) {
}
}
-// Iterates over a basic block applying a callback to each instruction. There
-// are two criteria for termination. If the end of basic block is reached a
-// value of zero is returned. If the callback returns a non-zero value, the
+// Iterates over a basic block applying a callback to each instruction. There
+// are two criteria for termination. If the end of basic block is reached a
+// value of zero is returned. If the callback returns a non-zero value, the
// iteration is stopped and the value of the callback is returned.
func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
@@ -244,7 +244,7 @@ func getvariables(fn *Node) []*Node {
return result
}
-// A pretty printer for control flow graphs. Takes an array of BasicBlock*s.
+// A pretty printer for control flow graphs. Takes an array of BasicBlock*s.
func printcfg(cfg []*BasicBlock) {
for _, bb := range cfg {
printblock(bb)
@@ -252,7 +252,7 @@ func printcfg(cfg []*BasicBlock) {
}
// Assigns a reverse post order number to each connected basic block using the
-// standard algorithm. Unconnected blocks will not be affected.
+// standard algorithm. Unconnected blocks will not be affected.
func reversepostorder(root *BasicBlock, rpo *int32) {
root.mark = VISITED
for _, bb := range root.succ {
@@ -272,7 +272,7 @@ func (x blockrpocmp) Len() int { return len(x) }
func (x blockrpocmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x blockrpocmp) Less(i, j int) bool { return x[i].rpo < x[j].rpo }
-// A pattern matcher for call instructions. Returns true when the instruction
+// A pattern matcher for call instructions. Returns true when the instruction
// is a call to a specific package qualified function name.
func iscall(prog *obj.Prog, name *obj.LSym) bool {
if prog == nil {
@@ -340,8 +340,8 @@ func isdeferreturn(prog *obj.Prog) bool {
}
// Walk backwards from a runtime·selectgo call up to its immediately dominating
-// runtime·newselect call. Any successor nodes of communication clause nodes
-// are implicit successors of the runtime·selectgo call node. The goal of this
+// runtime·newselect call. Any successor nodes of communication clause nodes
+// are implicit successors of the runtime·selectgo call node. The goal of this
// analysis is to add these missing edges to complete the control flow graph.
func addselectgosucc(selectgo *BasicBlock) {
var succ *BasicBlock
@@ -379,7 +379,7 @@ func addselectgosucc(selectgo *BasicBlock) {
}
}
-// The entry point for the missing selectgo control flow algorithm. Takes an
+// The entry point for the missing selectgo control flow algorithm. Takes an
// array of BasicBlock*s containing selectgo calls.
func fixselectgo(selectgo []*BasicBlock) {
for _, bb := range selectgo {
@@ -387,15 +387,15 @@ func fixselectgo(selectgo []*BasicBlock) {
}
}
-// Constructs a control flow graph from a sequence of instructions. This
+// Constructs a control flow graph from a sequence of instructions. This
// procedure is complicated by various sources of implicit control flow that are
-// not accounted for using the standard cfg construction algorithm. Returns an
+// not accounted for using the standard cfg construction algorithm. Returns an
// array of BasicBlock*s in control flow graph form (basic blocks ordered by
// their RPO number).
func newcfg(firstp *obj.Prog) []*BasicBlock {
- // Reset the opt field of each prog to nil. In the first and second
+ // Reset the opt field of each prog to nil. In the first and second
// passes, instructions that are labels temporarily use the opt field to
- // point to their basic block. In the third pass, the opt field reset
+ // point to their basic block. In the third pass, the opt field reset
// to point to the predecessor of an instruction in its basic block.
for p := firstp; p != nil; p = p.Link {
p.Opt = nil
@@ -436,7 +436,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
}
// Loop through all basic blocks maximally growing the list of
- // contained instructions until a label is reached. Add edges
+ // contained instructions until a label is reached. Add edges
// for branches and fall-through instructions.
for _, bb := range cfg {
for p := bb.last; p != nil && p.As != obj.AEND; p = p.Link {
@@ -448,7 +448,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
// Stop before an unreachable RET, to avoid creating
// unreachable control flow nodes.
if p.Link != nil && p.Link.As == obj.ARET && p.Link.Mode == 1 {
- // TODO: remove after SSA is done. SSA does not
+ // TODO: remove after SSA is done. SSA does not
// generate any unreachable RET instructions.
break
}
@@ -472,7 +472,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
}
// Add back links so the instructions in a basic block can be traversed
- // backward. This is the final state of the instruction opt field.
+ // backward. This is the final state of the instruction opt field.
for _, bb := range cfg {
p := bb.first
var prev *obj.Prog
@@ -500,13 +500,13 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
rpo := int32(len(cfg))
reversepostorder(bb, &rpo)
- // Sort the basic blocks by their depth first number. The
+ // Sort the basic blocks by their depth first number. The
// array is now a depth-first spanning tree with the first
// node being the root.
sort.Sort(blockrpocmp(cfg))
// Unreachable control flow nodes are indicated by a -1 in the rpo
- // field. If we see these nodes something must have gone wrong in an
+ // field. If we see these nodes something must have gone wrong in an
// upstream compilation phase.
bb = cfg[0]
if bb.rpo == -1 {
@@ -536,7 +536,7 @@ func isfunny(n *Node) bool {
}
// Computes the effects of an instruction on a set of
-// variables. The vars argument is an array of Node*s.
+// variables. The vars argument is an array of Node*s.
//
// The output vectors give bits for variables:
// uevar - used by this instruction
@@ -555,8 +555,8 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar Bvec, varkill Bvec, avarini
bvresetall(avarinit)
if prog.As == obj.ARET {
- // Return instructions implicitly read all the arguments. For
- // the sake of correctness, out arguments must be read. For the
+ // Return instructions implicitly read all the arguments. For
+ // the sake of correctness, out arguments must be read. For the
// sake of backtrace quality, we read in arguments as well.
//
// A return instruction with a p->to is a tail return, which brings
@@ -676,7 +676,7 @@ Next:
}
// Constructs a new liveness structure used to hold the global state of the
-// liveness computation. The cfg argument is an array of BasicBlock*s and the
+// liveness computation. The cfg argument is an array of BasicBlock*s and the
// vars argument is an array of Node*s.
func newliveness(fn *Node, ptxt *obj.Prog, cfg []*BasicBlock, vars []*Node) *Liveness {
result := new(Liveness)
@@ -721,7 +721,7 @@ func printeffects(p *obj.Prog, uevar Bvec, varkill Bvec, avarinit Bvec) {
fmt.Printf("\n")
}
-// Pretty print a variable node. Uses Pascal like conventions for pointers and
+// Pretty print a variable node. Uses Pascal like conventions for pointers and
// addresses to avoid confusing the C like conventions used in the node variable
// names.
func printnode(node *Node) {
@@ -736,7 +736,7 @@ func printnode(node *Node) {
fmt.Printf(" %v%s%s", node, p, a)
}
-// Pretty print a list of variables. The vars argument is an array of Node*s.
+// Pretty print a list of variables. The vars argument is an array of Node*s.
func printvars(name string, bv Bvec, vars []*Node) {
fmt.Printf("%s:", name)
for i, node := range vars {
@@ -850,10 +850,10 @@ func checkprog(fn *Node, p *obj.Prog) {
}
}
-// Check instruction invariants. We assume that the nodes corresponding to the
+// Check instruction invariants. We assume that the nodes corresponding to the
// sources and destinations of memory operations will be declared in the
-// function. This is not strictly true, as is the case for the so-called funny
-// nodes and there are special cases to skip over that stuff. The analysis will
+// function. This is not strictly true, as is the case for the so-called funny
+// nodes and there are special cases to skip over that stuff. The analysis will
// fail if this invariant blindly changes.
func checkptxt(fn *Node, firstp *obj.Prog) {
if debuglive == 0 {
@@ -931,7 +931,7 @@ func onebitwalktype1(t *Type, xoffset *int64, bv Bvec) {
case TARRAY:
// The value of t->bound is -1 for slices types and >=0 for
- // for fixed array types. All other values are invalid.
+ // for fixed array types. All other values are invalid.
if t.Bound < -1 {
Fatalf("onebitwalktype1: invalid bound, %v", t)
}
@@ -975,8 +975,8 @@ func argswords() int32 {
return int32(Curfn.Type.Argwid / int64(Widthptr))
}
-// Generates live pointer value maps for arguments and local variables. The
-// this argument and the in arguments are always assumed live. The vars
+// Generates live pointer value maps for arguments and local variables. The
+// this argument and the in arguments are always assumed live. The vars
// argument is an array of Node*s.
func onebitlivepointermap(lv *Liveness, liveout Bvec, vars []*Node, args Bvec, locals Bvec) {
var node *Node
@@ -1046,7 +1046,7 @@ func issafepoint(prog *obj.Prog) bool {
return prog.As == obj.ATEXT || prog.As == obj.ACALL
}
-// Initializes the sets for solving the live variables. Visits all the
+// Initializes the sets for solving the live variables. Visits all the
// instructions in each basic block to summarizes the information at each basic
// block
func livenessprologue(lv *Liveness) {
@@ -1140,15 +1140,15 @@ func livenesssolve(lv *Liveness) {
}
}
- // Iterate through the blocks in reverse round-robin fashion. A work
- // queue might be slightly faster. As is, the number of iterations is
+ // Iterate through the blocks in reverse round-robin fashion. A work
+ // queue might be slightly faster. As is, the number of iterations is
// so low that it hardly seems to be worth the complexity.
change = 1
for change != 0 {
change = 0
- // Walk blocks in the general direction of propagation. This
+ // Walk blocks in the general direction of propagation. This
// improves convergence.
for i := len(lv.cfg) - 1; i >= 0; i-- {
bb := lv.cfg[i]
@@ -1714,10 +1714,10 @@ func livenessprintdebug(lv *Liveness) {
fmt.Printf("\n")
}
-// Dumps an array of bitmaps to a symbol as a sequence of uint32 values. The
-// first word dumped is the total number of bitmaps. The second word is the
-// length of the bitmaps. All bitmaps are assumed to be of equal length. The
-// words that are followed are the raw bitmap words. The arr argument is an
+// Dumps an array of bitmaps to a symbol as a sequence of uint32 values. The
+// first word dumped is the total number of bitmaps. The second word is the
+// length of the bitmaps. All bitmaps are assumed to be of equal length. The
+// words that are followed are the raw bitmap words. The arr argument is an
// array of Node*s.
func onebitwritesymbol(arr []Bvec, sym *Sym) {
var i int
@@ -1759,7 +1759,7 @@ func printprog(p *obj.Prog) {
}
}
-// Entry pointer for liveness analysis. Constructs a complete CFG, solves for
+// Entry pointer for liveness analysis. Constructs a complete CFG, solves for
// the liveness of pointer variables in the function, and emits a runtime data
// structure read by the garbage collector.
func liveness(fn *Node, firstp *obj.Prog, argssym *Sym, livesym *Sym) {
diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go
index 43c6db0a00..3cf480efd4 100644
--- a/src/cmd/compile/internal/gc/reflect.go
+++ b/src/cmd/compile/internal/gc/reflect.go
@@ -42,7 +42,7 @@ func siglt(a, b *Sig) bool {
}
// Builds a type representing a Bucket structure for
-// the given map type. This type is not visible to users -
+// the given map type. This type is not visible to users -
// we include only enough information to generate a correct GC
// program for it.
// Make sure this stays in sync with ../../../../runtime/hashmap.go!
@@ -421,7 +421,7 @@ func dimportpath(p *Pkg) {
}
// If we are compiling the runtime package, there are two runtime packages around
- // -- localpkg and Runtimepkg. We don't want to produce import path symbols for
+ // -- localpkg and Runtimepkg. We don't want to produce import path symbols for
// both of them, so just produce one for localpkg.
if myimportpath == "runtime" && p == Runtimepkg {
return
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 03ff17eb01..1033cd9226 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -245,7 +245,7 @@ type state struct {
// *Node is the unique identifier (an ONAME Node) for the variable.
vars map[*Node]*ssa.Value
- // all defined variables at the end of each block. Indexed by block ID.
+ // all defined variables at the end of each block. Indexed by block ID.
defvars []map[*Node]*ssa.Value
// addresses of PPARAM and PPARAMOUT variables.
@@ -254,12 +254,12 @@ type state struct {
// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
varsyms map[*Node]interface{}
- // starting values. Memory, stack pointer, and globals pointer
+ // starting values. Memory, stack pointer, and globals pointer
startmem *ssa.Value
sp *ssa.Value
sb *ssa.Value
- // line number stack. The current line number is top of stack
+ // line number stack. The current line number is top of stack
line []int32
// list of panic calls by function name and line number.
@@ -269,7 +269,7 @@ type state struct {
// list of FwdRef values.
fwdRefs []*ssa.Value
- // list of PPARAMOUT (return) variables. Does not include PPARAM|PHEAP vars.
+ // list of PPARAMOUT (return) variables. Does not include PPARAM|PHEAP vars.
returns []*Node
cgoUnsafeArgs bool
@@ -339,7 +339,7 @@ func (s *state) startBlock(b *ssa.Block) {
}
// endBlock marks the end of generating code for the current block.
-// Returns the (former) current block. Returns nil if there is no current
+// Returns the (former) current block. Returns nil if there is no current
// block, i.e. if no code flows to the current execution point.
func (s *state) endBlock() *ssa.Block {
b := s.curBlock
@@ -540,7 +540,7 @@ func (s *state) stmt(n *Node) {
b.Kind = ssa.BlockExit
b.Control = m
// TODO: never rewrite OPANIC to OCALLFUNC in the
- // first place. Need to wait until all backends
+ // first place. Need to wait until all backends
// go through SSA.
}
case ODEFER:
@@ -653,8 +653,8 @@ func (s *state) stmt(n *Node) {
rhs := n.Right
if rhs != nil && (rhs.Op == OSTRUCTLIT || rhs.Op == OARRAYLIT) {
// All literals with nonzero fields have already been
- // rewritten during walk. Any that remain are just T{}
- // or equivalents. Use the zero value.
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
if !iszero(rhs) {
Fatalf("literal with nonzero value in SSA: %v", rhs)
}
@@ -891,10 +891,10 @@ func (s *state) stmt(n *Node) {
}
// exit processes any code that needs to be generated just before returning.
-// It returns a BlockRet block that ends the control flow. Its control value
+// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
- // Run exit code. Typically, this code copies heap-allocated PPARAMOUT
+ // Run exit code. Typically, this code copies heap-allocated PPARAMOUT
// variables back to the stack.
s.stmts(s.exitCode)
@@ -906,7 +906,7 @@ func (s *state) exit() *ssa.Block {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, n, s.mem())
s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, n.Type.Size(), addr, val, s.mem())
// TODO: if val is ever spilled, we'd like to use the
- // PPARAMOUT slot for spilling it. That won't happen
+ // PPARAMOUT slot for spilling it. That won't happen
// currently.
}
@@ -1382,7 +1382,7 @@ func (s *state) expr(n *Node) *ssa.Value {
case CTBOOL:
v := s.constBool(n.Val().U.(bool))
// For some reason the frontend gets the line numbers of
- // CTBOOL literals totally wrong. Fix it here by grabbing
+ // CTBOOL literals totally wrong. Fix it here by grabbing
// the line number of the enclosing AST node.
if len(s.line) >= 2 {
v.Line = s.line[len(s.line)-2]
@@ -1925,7 +1925,7 @@ func (s *state) expr(n *Node) *ssa.Value {
tab := s.expr(n.Left)
data := s.expr(n.Right)
// The frontend allows putting things like struct{*byte} in
- // the data portion of an eface. But we don't want struct{*byte}
+ // the data portion of an eface. But we don't want struct{*byte}
// as a register type because (among other reasons) the liveness
// analysis is confused by the "fat" variables that result from
// such types being spilled.
@@ -2037,7 +2037,7 @@ func (s *state) expr(n *Node) *ssa.Value {
r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl)
s.vars[&ptrVar] = r[0]
- // Note: we don't need to read r[1], the result's length. It will be nl.
+ // Note: we don't need to read r[1], the result's length. It will be nl.
// (or maybe we should, we just have to spill/restore nl otherwise?)
s.vars[&capVar] = r[2]
b = s.endBlock()
@@ -2106,7 +2106,7 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
return
// Note: if likely==1, then both recursive calls pass 1.
// If likely==-1, then we don't have enough information to decide
- // whether the first branch is likely or not. So we pass 0 for
+ // whether the first branch is likely or not. So we pass 0 for
// the likeliness of the first branch.
// TODO: have the frontend give us branch prediction hints for
// OANDAND and OOROR nodes (if it ever has such info).
@@ -2191,7 +2191,7 @@ func (s *state) assign(left *Node, right *ssa.Value, wb, deref bool, line int32)
s.addNamedValue(left, right)
return
}
- // Left is not ssa-able. Compute its address.
+ // Left is not ssa-able. Compute its address.
addr := s.addr(left, false)
if left.Op == ONAME {
s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, left, s.mem())
@@ -2333,7 +2333,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value {
dowidth(fn.Type)
stksize := fn.Type.Argwid // includes receiver
- // Run all argument assignments. The arg slots have already
+ // Run all argument assignments. The arg slots have already
// been offset by the appropriate amount (+2*widthptr for go/defer,
// +widthptr for interface calls).
// For OCALLMETH, the receiver is set in these statements.
@@ -2462,12 +2462,12 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value {
return nil
case PAUTO:
// We need to regenerate the address of autos
- // at every use. This prevents LEA instructions
+ // at every use. This prevents LEA instructions
// from occurring before the corresponding VarDef
// op and confusing the liveness analysis into thinking
// the variable is live at function entry.
// TODO: I'm not sure if this really works or we're just
- // getting lucky. We might need a real dependency edge
+ // getting lucky. We might need a real dependency edge
// between vardef and addr ops.
aux := &ssa.AutoSymbol{Typ: n.Type, Node: n}
return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
@@ -2599,7 +2599,7 @@ func (s *state) canSSA(n *Node) bool {
func canSSAType(t *Type) bool {
dowidth(t)
if t.Width > int64(4*Widthptr) {
- // 4*Widthptr is an arbitrary constant. We want it
+ // 4*Widthptr is an arbitrary constant. We want it
// to be at least 3*Widthptr so slices can be registerized.
// Too big and we'll introduce too much register pressure.
return false
@@ -2647,7 +2647,7 @@ func (s *state) nilCheck(ptr *ssa.Value) {
s.startBlock(bNext)
}
-// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
+// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
// Starts a new block on return.
func (s *state) boundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
@@ -2661,7 +2661,7 @@ func (s *state) boundsCheck(idx, len *ssa.Value) {
s.check(cmp, Panicindex)
}
-// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
+// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
// Starts a new block on return.
func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
if Debug['B'] != 0 {
@@ -2701,7 +2701,7 @@ func (s *state) check(cmp *ssa.Value, fn *Node) {
// Returns a slice of results of the given result types.
// The call is added to the end of the current block.
// If returns is false, the block is marked as an exit block.
-// If returns is true, the block is marked as a call block. A new block
+// If returns is true, the block is marked as a call block. A new block
// is started to load the return values.
func (s *state) rtcall(fn *Node, returns bool, results []*Type, args ...*ssa.Value) []*ssa.Value {
// Write args to the stack
@@ -2773,7 +2773,7 @@ func (s *state) insertWBmove(t *Type, left, right *ssa.Value, line int32) {
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
- // TODO: select the .enabled field. It is currently first, so not needed for now.
+ // TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
@@ -2818,7 +2818,7 @@ func (s *state) insertWBstore(t *Type, left, right *ssa.Value, line int32) {
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
- // TODO: select the .enabled field. It is currently first, so not needed for now.
+ // TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte.
flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
@@ -3018,7 +3018,7 @@ func (s *state) slice(t *Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
var rcap *ssa.Value
switch {
case t.IsString():
- // Capacity of the result is unimportant. However, we use
+ // Capacity of the result is unimportant. However, we use
// rcap to test if we've generated a zero-length slice.
// Use length of strings for that.
rcap = rlen
@@ -3123,13 +3123,13 @@ func (s *state) uintTofloat(cvttab *u2fcvtTab, n *Node, x *ssa.Value, ft, tt *Ty
// Code borrowed from old code generator.
// What's going on: large 64-bit "unsigned" looks like
// negative number to hardware's integer-to-float
- // conversion. However, because the mantissa is only
+ // conversion. However, because the mantissa is only
// 63 bits, we don't need the LSB, so instead we do an
// unsigned right shift (divide by two), convert, and
- // double. However, before we do that, we need to be
+ // double. However, before we do that, we need to be
// sure that we do not lose a "1" if that made the
- // difference in the resulting rounding. Therefore, we
- // preserve it, and OR (not ADD) it back in. The case
+ // difference in the resulting rounding. Therefore, we
+ // preserve it, and OR (not ADD) it back in. The case
// that matters is when the eleven discarded bits are
// equal to 10000000001; that rounds up, and the 1 cannot
// be lost else it would round down if the LSB of the
@@ -3470,15 +3470,15 @@ func (s *state) mem() *ssa.Value {
}
func (s *state) linkForwardReferences() {
- // Build SSA graph. Each variable on its first use in a basic block
+ // Build SSA graph. Each variable on its first use in a basic block
// leaves a FwdRef in that block representing the incoming value
- // of that variable. This function links that ref up with possible definitions,
- // inserting Phi values as needed. This is essentially the algorithm
+ // of that variable. This function links that ref up with possible definitions,
+ // inserting Phi values as needed. This is essentially the algorithm
// described by Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau:
// http://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
// Differences:
// - We use FwdRef nodes to postpone phi building until the CFG is
- // completely built. That way we can avoid the notion of "sealed"
+ // completely built. That way we can avoid the notion of "sealed"
// blocks.
// - Phi optimization is a separate pass (in ../ssa/phielim.go).
for len(s.fwdRefs) > 0 {
@@ -3501,7 +3501,7 @@ func (s *state) resolveFwdRef(v *ssa.Value) {
v.Aux = name
return
}
- // Not SSAable. Load it.
+ // Not SSAable. Load it.
addr := s.decladdrs[name]
if addr == nil {
// TODO: closure args reach here.
@@ -3527,7 +3527,7 @@ func (s *state) resolveFwdRef(v *ssa.Value) {
args = append(args, s.lookupVarOutgoing(p, v.Type, name, v.Line))
}
- // Decide if we need a phi or not. We need a phi if there
+ // Decide if we need a phi or not. We need a phi if there
// are two different args (which are both not v).
var w *ssa.Value
for _, a := range args {
@@ -3548,7 +3548,7 @@ func (s *state) resolveFwdRef(v *ssa.Value) {
if w == nil {
s.Fatalf("no witness for reachable phi %s", v)
}
- // One witness. Make v a copy of w.
+ // One witness. Make v a copy of w.
v.Op = ssa.OpCopy
v.AddArg(w)
}
@@ -3560,7 +3560,7 @@ func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node, line int
return v
}
// The variable is not defined by b and we haven't
- // looked it up yet. Generate a FwdRef for the variable and return that.
+ // looked it up yet. Generate a FwdRef for the variable and return that.
v := b.NewValue0A(line, ssa.OpFwdRef, t, name)
s.fwdRefs = append(s.fwdRefs, v)
m[name] = v
@@ -3740,7 +3740,7 @@ func genssa(f *ssa.Func, ptxt *obj.Prog, gcargs, gclocals *Sym) {
gcsymdup(gcargs)
gcsymdup(gclocals)
- // Add frame prologue. Zero ambiguously live variables.
+ // Add frame prologue. Zero ambiguously live variables.
Thearch.Defframe(ptxt)
if Debug['f'] != 0 {
frame(0)
@@ -4115,7 +4115,7 @@ func (s *genState) genValue(v *ssa.Value) {
if v.AuxInt2Int64() == -1<<31 || x == r {
if x != r {
// This code compensates for the fact that the register allocator
- // doesn't understand 2-address instructions yet. TODO: fix that.
+ // doesn't understand 2-address instructions yet. TODO: fix that.
p := Prog(moveByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = x
@@ -4183,7 +4183,7 @@ func (s *genState) genValue(v *ssa.Value) {
ssa.OpAMD64SARBconst, ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst,
ssa.OpAMD64ROLBconst:
// This code compensates for the fact that the register allocator
- // doesn't understand 2-address instructions yet. TODO: fix that.
+ // doesn't understand 2-address instructions yet. TODO: fix that.
x := regnum(v.Args[0])
r := regnum(v)
if x != r {
@@ -4943,7 +4943,7 @@ func (s *state) extendIndex(v *ssa.Value) *ssa.Value {
return v
}
if size > s.config.IntSize {
- // TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
+ // TODO: truncate 64-bit indexes on 32-bit pointer archs. We'd need to test
// the high word and branch to out-of-bounds failure if it is not 0.
s.Unimplementedf("64->32 index truncation not implemented")
return v
@@ -5089,7 +5089,7 @@ func moveByType(t ssa.Type) int {
}
// regnum returns the register (in cmd/internal/obj numbering) to
-// which v has been allocated. Panics if v is not assigned to a
+// which v has been allocated. Panics if v is not assigned to a
// register.
// TODO: Make this panic again once it stops happening routinely.
func regnum(v *ssa.Value) int16 {
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index 204962ca85..ce1a317530 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -739,7 +739,7 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
}
if t1.Sym != nil || t2.Sym != nil {
// Special case: we keep byte and uint8 separate
- // for error messages. Treat them as equal.
+ // for error messages. Treat them as equal.
switch t1.Etype {
case TUINT8:
if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
@@ -997,7 +997,7 @@ func convertop(src *Type, dst *Type, why *string) Op {
}
// The rules for interfaces are no different in conversions
- // than assignments. If interfaces are involved, stop now
+ // than assignments. If interfaces are involved, stop now
// with the good message from assignop.
// Otherwise clear the error.
if src.Etype == TINTER || dst.Etype == TINTER {
@@ -2684,8 +2684,8 @@ func ngotype(n *Node) *Sym {
}
// Convert raw string to the prefix that will be used in the symbol
-// table. All control characters, space, '%' and '"', as well as
-// non-7-bit clean bytes turn into %xx. The period needs escaping
+// table. All control characters, space, '%' and '"', as well as
+// non-7-bit clean bytes turn into %xx. The period needs escaping
// only in the last segment of the path, and it makes for happier
// users if we escape that as little as possible.
//
diff --git a/src/cmd/compile/internal/gc/testdata/addressed_ssa.go b/src/cmd/compile/internal/gc/testdata/addressed_ssa.go
index f9f459360b..98003fe48f 100644
--- a/src/cmd/compile/internal/gc/testdata/addressed_ssa.go
+++ b/src/cmd/compile/internal/gc/testdata/addressed_ssa.go
@@ -143,7 +143,7 @@ func (v V) val() int64 {
// address taken to force heap allocation, and then based on
// the value of which a pair of those locals are copied in
// various ways to the two results y, and z, which are also
-// addressed. Which is expected to be one of 11-13, 21-23, 31, 32,
+// addressed. Which is expected to be one of 11-13, 21-23, 31, 32,
// and y.val() should be equal to which and y.p.val() should
// be equal to z.val(). Also, x(.p)**8 == x; that is, the
// autos are all linked into a ring.
diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go b/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go
index 7c7d721a23..be0aad5ff8 100644
--- a/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go
+++ b/src/cmd/compile/internal/gc/testdata/gen/arithBoundaryGen.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// This program generates a test to verify that the standard arithmetic
-// operators properly handle some special cases. The test file should be
+// operators properly handle some special cases. The test file should be
// generated with a known working version of go.
// launch with `go run arithBoundaryGen.go` a file called arithBoundary_ssa.go
// will be written into the parent directory containing the tests
diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go b/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go
index 34e54ad08a..b32a59d514 100644
--- a/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go
+++ b/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// This program generates a test to verify that the standard arithmetic
-// operators properly handle const cases. The test file should be
+// operators properly handle const cases. The test file should be
// generated with a known working version of go.
// launch with `go run arithConstGen.go` a file called arithConst_ssa.go
// will be written into the parent directory containing the tests
diff --git a/src/cmd/compile/internal/gc/testdata/loadstore_ssa.go b/src/cmd/compile/internal/gc/testdata/loadstore_ssa.go
index e0b0b4dfab..df51921e0c 100644
--- a/src/cmd/compile/internal/gc/testdata/loadstore_ssa.go
+++ b/src/cmd/compile/internal/gc/testdata/loadstore_ssa.go
@@ -42,7 +42,7 @@ func testStoreSize_ssa(p *uint16, q *uint16, v uint32) {
switch {
}
// Test to make sure that (Store ptr (Trunc32to16 val) mem)
- // does not end up as a 32-bit store. It must stay a 16 bit store
+ // does not end up as a 32-bit store. It must stay a 16 bit store
// even when Trunc32to16 is rewritten to be a nop.
// To ensure that we get rewrite the Trunc32to16 before
// we rewrite the Store, we force the truncate into an
diff --git a/src/cmd/compile/internal/gc/testdata/phi_ssa.go b/src/cmd/compile/internal/gc/testdata/phi_ssa.go
index e855070fc3..6469bfea44 100644
--- a/src/cmd/compile/internal/gc/testdata/phi_ssa.go
+++ b/src/cmd/compile/internal/gc/testdata/phi_ssa.go
@@ -85,7 +85,7 @@ func foo() int32 {
z = int32(data2[25])
}
// Lots of phis of the form phi(int32,int64) of type int32 happen here.
- // Some will be stack phis. For those stack phis, make sure the spill
+ // Some will be stack phis. For those stack phis, make sure the spill
// of the second argument uses the phi's width (4 bytes), not its width
// (8 bytes). Otherwise, a random stack slot gets clobbered.
diff --git a/src/cmd/compile/internal/gc/testdata/unsafe_ssa.go b/src/cmd/compile/internal/gc/testdata/unsafe_ssa.go
index d074eb1d5e..a3d9dbcc39 100644
--- a/src/cmd/compile/internal/gc/testdata/unsafe_ssa.go
+++ b/src/cmd/compile/internal/gc/testdata/unsafe_ssa.go
@@ -30,7 +30,7 @@ func f_ssa() *[8]uint {
} else {
x = 0
}
- // Clobber the global pointer. The only live ref
+ // Clobber the global pointer. The only live ref
// to the allocated object is now x.
a = nil
@@ -66,7 +66,7 @@ func g_ssa() *[7]uint {
} else {
x = 0
}
- // Clobber the global pointer. The only live ref
+ // Clobber the global pointer. The only live ref
// to the allocated object is now x.
a = nil
diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go
index f09094ce23..0f7842c5b2 100644
--- a/src/cmd/compile/internal/gc/type.go
+++ b/src/cmd/compile/internal/gc/type.go
@@ -117,7 +117,7 @@ func (t *Type) cmp(x *Type) ssa.Cmp {
if t.Sym != nil || x.Sym != nil {
// Special case: we keep byte and uint8 separate
- // for error messages. Treat them as equal.
+ // for error messages. Treat them as equal.
switch t.Etype {
case TUINT8:
if (t == Types[TUINT8] || t == bytetype) && (x == Types[TUINT8] || x == bytetype) {
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index 102235f94e..f912061423 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -2840,7 +2840,7 @@ func keydup(n *Node, hash map[uint32][]*Node) {
cmp.Right = a.Left
evconst(&cmp)
if cmp.Op == OLITERAL {
- // Sometimes evconst fails. See issue 12536.
+ // Sometimes evconst fails. See issue 12536.
b = cmp.Val().U.(bool)
}
}
@@ -3074,7 +3074,7 @@ func typecheckcomplit(np **Node) {
Yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
}
- // No pushtype allowed here. Must name fields for that.
+ // No pushtype allowed here. Must name fields for that.
ll.N = assignconv(ll.N, f.Type, "field value")
ll.N = Nod(OKEY, newname(f.Sym), ll.N)
@@ -3114,7 +3114,7 @@ func typecheckcomplit(np **Node) {
}
// Sym might have resolved to name in other top-level
- // package, because of import dot. Redirect to correct sym
+ // package, because of import dot. Redirect to correct sym
// before we do the lookup.
if s.Pkg != localpkg && exportname(s.Name) {
s1 = Lookup(s.Name)
@@ -3136,7 +3136,7 @@ func typecheckcomplit(np **Node) {
fielddup(newname(s), hash)
r = l.Right
- // No pushtype allowed here. Tried and rejected.
+ // No pushtype allowed here. Tried and rejected.
typecheck(&r, Erv)
l.Right = assignconv(r, f.Type, "field value")
@@ -3504,7 +3504,7 @@ func domethod(n *Node) {
// }
// then even though I.M looks like it doesn't care about the
// value of its argument, a specific implementation of I may
- // care. The _ would suppress the assignment to that argument
+ // care. The _ would suppress the assignment to that argument
// while generating a call, so remove it.
for t := getinargx(nt.Type).Type; t != nil; t = t.Down {
if t.Sym != nil && t.Sym.Name == "_" {
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 3e67f50620..04dac7ca2c 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -2788,7 +2788,7 @@ func appendslice(n *Node, init **NodeList) *Node {
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
- // modifying here. Fix explicitly.
+ // modifying here. Fix explicitly.
for l := n.List; l != nil; l = l.Next {
l.N = cheapexpr(l.N, init)
}
@@ -2907,7 +2907,7 @@ func walkappend(n *Node, init **NodeList, dst *Node) *Node {
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
- // modifying here. Fix explicitly.
+ // modifying here. Fix explicitly.
// Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
@@ -3241,7 +3241,7 @@ func walkcompare(np **Node, init **NodeList) {
return
}
- // Chose not to inline. Call equality function directly.
+ // Chose not to inline. Call equality function directly.
var needsize int
call := Nod(OCALL, eqfor(t, &needsize), nil)
diff --git a/src/cmd/compile/internal/mips64/cgen.go b/src/cmd/compile/internal/mips64/cgen.go
index 434bfc73cb..bdcb565839 100644
--- a/src/cmd/compile/internal/mips64/cgen.go
+++ b/src/cmd/compile/internal/mips64/cgen.go
@@ -129,7 +129,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// TODO: Instead of generating ADDV $-8,R8; ADDV
// $-8,R7; n*(MOVV 8(R8),R9; ADDV $8,R8; MOVV R9,8(R7);
// ADDV $8,R7;) just generate the offsets directly and
- // eliminate the ADDs. That will produce shorter, more
+ // eliminate the ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for ; c > 0; c-- {
diff --git a/src/cmd/compile/internal/mips64/peep.go b/src/cmd/compile/internal/mips64/peep.go
index f97be60a2a..bf8a4ca979 100644
--- a/src/cmd/compile/internal/mips64/peep.go
+++ b/src/cmd/compile/internal/mips64/peep.go
@@ -62,7 +62,7 @@ loop1:
// distinguish between moves that moves that *must*
// sign/zero extend and moves that don't care so they
// can eliminate moves that don't care without
- // breaking moves that do care. This might let us
+ // breaking moves that do care. This might let us
// simplify or remove the next peep loop, too.
if p.As == mips.AMOVV || p.As == mips.AMOVF || p.As == mips.AMOVD {
if regtyp(&p.To) {
@@ -697,7 +697,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
// copyas returns 1 if a and v address the same register.
//
// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means this operation
+// register in v. If a is the to operand, this means this operation
// writes the register in v.
func copyas(a *obj.Addr, v *obj.Addr) bool {
if regtyp(v) {
@@ -714,7 +714,7 @@ func copyas(a *obj.Addr, v *obj.Addr) bool {
// same register as v.
//
// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means the operation
+// register in v. If a is the to operand, this means the operation
// either reads or writes the register in v (if !copyas(a, v), then
// the operation reads the register in v).
func copyau(a *obj.Addr, v *obj.Addr) bool {
diff --git a/src/cmd/compile/internal/ppc64/cgen.go b/src/cmd/compile/internal/ppc64/cgen.go
index 740e64cc83..aea09503a8 100644
--- a/src/cmd/compile/internal/ppc64/cgen.go
+++ b/src/cmd/compile/internal/ppc64/cgen.go
@@ -123,7 +123,7 @@ func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
// TODO(austin): Instead of generating ADD $-8,R8; ADD
// $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just
// generate the offsets directly and eliminate the
- // ADDs. That will produce shorter, more
+ // ADDs. That will produce shorter, more
// pipeline-able code.
var p *obj.Prog
for ; c > 0; c-- {
diff --git a/src/cmd/compile/internal/ppc64/gsubr.go b/src/cmd/compile/internal/ppc64/gsubr.go
index 534ea6290a..ca8be50632 100644
--- a/src/cmd/compile/internal/ppc64/gsubr.go
+++ b/src/cmd/compile/internal/ppc64/gsubr.go
@@ -42,9 +42,9 @@ var resvd = []int{
ppc64.REGZERO,
ppc64.REGSP, // reserved for SP
// We need to preserve the C ABI TLS pointer because sigtramp
- // may happen during C code and needs to access the g. C
+ // may happen during C code and needs to access the g. C
// clobbers REGG, so if Go were to clobber REGTLS, sigtramp
- // won't know which convention to use. By preserving REGTLS,
+ // won't know which convention to use. By preserving REGTLS,
// we can just retrieve g from TLS when we aren't sure.
ppc64.REGTLS,
diff --git a/src/cmd/compile/internal/ppc64/opt.go b/src/cmd/compile/internal/ppc64/opt.go
index 99d2585d00..4f81aa9c1e 100644
--- a/src/cmd/compile/internal/ppc64/opt.go
+++ b/src/cmd/compile/internal/ppc64/opt.go
@@ -5,7 +5,7 @@
package ppc64
// Many Power ISA arithmetic and logical instructions come in four
-// standard variants. These bits let us map between variants.
+// standard variants. These bits let us map between variants.
const (
V_CC = 1 << 0 // xCC (affect CR field 0 flags)
V_V = 1 << 1 // xV (affect SO and OV flags)
diff --git a/src/cmd/compile/internal/ppc64/peep.go b/src/cmd/compile/internal/ppc64/peep.go
index 1ff3109a49..dfd023c766 100644
--- a/src/cmd/compile/internal/ppc64/peep.go
+++ b/src/cmd/compile/internal/ppc64/peep.go
@@ -62,7 +62,7 @@ loop1:
// distinguish between moves that moves that *must*
// sign/zero extend and moves that don't care so they
// can eliminate moves that don't care without
- // breaking moves that do care. This might let us
+ // breaking moves that do care. This might let us
// simplify or remove the next peep loop, too.
if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
if regtyp(&p.To) {
@@ -962,7 +962,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
// copyas returns 1 if a and v address the same register.
//
// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means this operation
+// register in v. If a is the to operand, this means this operation
// writes the register in v.
func copyas(a *obj.Addr, v *obj.Addr) bool {
if regtyp(v) {
@@ -979,7 +979,7 @@ func copyas(a *obj.Addr, v *obj.Addr) bool {
// same register as v.
//
// If a is the from operand, this means this operation reads the
-// register in v. If a is the to operand, this means the operation
+// register in v. If a is the to operand, this means the operation
// either reads or writes the register in v (if !copyas(a, v), then
// the operation reads the register in v).
func copyau(a *obj.Addr, v *obj.Addr) bool {
diff --git a/src/cmd/compile/internal/ppc64/prog.go b/src/cmd/compile/internal/ppc64/prog.go
index efeff86dca..4cf10d04a9 100644
--- a/src/cmd/compile/internal/ppc64/prog.go
+++ b/src/cmd/compile/internal/ppc64/prog.go
@@ -180,8 +180,8 @@ func proginfo(p *obj.Prog) {
}
}
-// Instruction variants table. Initially this contains entries only
-// for the "base" form of each instruction. On the first call to
+// Instruction variants table. Initially this contains entries only
+// for the "base" form of each instruction. On the first call to
// as2variant or variant2as, we'll add the variants to the table.
var varianttable = [ppc64.ALAST][4]int{
ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
index 7641811a5f..2e520da050 100644
--- a/src/cmd/compile/internal/ssa/block.go
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -8,29 +8,29 @@ import "fmt"
// Block represents a basic block in the control flow graph of a function.
type Block struct {
- // A unique identifier for the block. The system will attempt to allocate
+ // A unique identifier for the block. The system will attempt to allocate
// these IDs densely, but no guarantees.
ID ID
// The kind of block this is.
Kind BlockKind
- // Subsequent blocks, if any. The number and order depend on the block kind.
+ // Subsequent blocks, if any. The number and order depend on the block kind.
// All successors must be distinct (to make phi values in successors unambiguous).
Succs []*Block
// Inverse of successors.
// The order is significant to Phi nodes in the block.
Preds []*Block
- // TODO: predecessors is a pain to maintain. Can we somehow order phi
+ // TODO: predecessors is a pain to maintain. Can we somehow order phi
// arguments by block id and have this field computed explicitly when needed?
- // A value that determines how the block is exited. Its value depends on the kind
- // of the block. For instance, a BlockIf has a boolean control value and BlockExit
+ // A value that determines how the block is exited. Its value depends on the kind
+ // of the block. For instance, a BlockIf has a boolean control value and BlockExit
// has a memory control value.
Control *Value
- // Auxiliary info for the block. Its value depends on the Kind.
+ // Auxiliary info for the block. Its value depends on the Kind.
Aux interface{}
// The unordered set of Values that define the operation of this block.
@@ -97,7 +97,7 @@ func (b *Block) LongString() string {
return s
}
-// AddEdgeTo adds an edge from block b to block c. Used during building of the
+// AddEdgeTo adds an edge from block b to block c. Used during building of the
// SSA graph; do not use on an already-completed SSA graph.
func (b *Block) AddEdgeTo(c *Block) {
b.Succs = append(b.Succs, c)
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index 54f774004e..7243cdc310 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -33,7 +33,7 @@ func checkFunc(f *Func) {
// If the conditional is true, does v get the value of a or b?
// We could solve this other ways, but the easiest is just to
// require (by possibly adding empty control-flow blocks) that
- // all successors are distinct. They will need to be distinct
+ // all successors are distinct. They will need to be distinct
// anyway for register allocation (duplicate successors implies
// the existence of critical edges).
// After regalloc we can allow non-distinct predecessors.
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index f68819c3c2..5a13b147fc 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -114,9 +114,9 @@ type pass struct {
// PhaseOption sets the specified flag in the specified ssa phase,
// returning empty string if this was successful or a string explaining
-// the error if it was not. A version of the phase name with "_"
+// the error if it was not. A version of the phase name with "_"
// replaced by " " is also checked for a match.
-// See gc/lex.go for dissection of the option string. Example use:
+// See gc/lex.go for dissection of the option string. Example use:
// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash ...
//
func PhaseOption(phase, flag string, val int) string {
@@ -189,7 +189,7 @@ var passes = [...]pass{
// Double-check phase ordering constraints.
// This code is intended to document the ordering requirements
-// between different phases. It does not override the passes
+// between different phases. It does not override the passes
// list above.
type constraint struct {
a, b string // a must come before b
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 8657509c5c..7d345ae280 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -24,7 +24,7 @@ type Config struct {
optimize bool // Do optimization
curFunc *Func
- // TODO: more stuff. Compiler flags of interest, ...
+ // TODO: more stuff. Compiler flags of interest, ...
// Given an environment variable used for debug hash match,
// what file (if any) receives the yes/no logging?
@@ -95,7 +95,7 @@ type Frontend interface {
Line(int32) string
}
-// interface used to hold *gc.Node. We'd use *gc.Node directly but
+// interface used to hold *gc.Node. We'd use *gc.Node directly but
// that would lead to an import cycle.
type GCNode interface {
Typ() Type
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
index c44748535b..817ee4b341 100644
--- a/src/cmd/compile/internal/ssa/cse.go
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -14,7 +14,7 @@ const (
)
// cse does common-subexpression elimination on the Function.
-// Values are just relinked, nothing is deleted. A subsequent deadcode
+// Values are just relinked, nothing is deleted. A subsequent deadcode
// pass is required to actually remove duplicate expressions.
func cse(f *Func) {
// Two values are equivalent if they satisfy the following definition:
@@ -82,7 +82,7 @@ func cse(f *Func) {
}
// Find an equivalence class where some members of the class have
- // non-equivalent arguments. Split the equivalence class appropriately.
+ // non-equivalent arguments. Split the equivalence class appropriately.
// Repeat until we can't find any more splits.
for {
changed := false
@@ -117,7 +117,7 @@ func cse(f *Func) {
changed = true
continue eqloop
}
- // v and w are equivalent. Keep w in e.
+ // v and w are equivalent. Keep w in e.
j++
}
partition[i] = e
@@ -135,7 +135,7 @@ func cse(f *Func) {
idom := dominators(f)
sdom := newSparseTree(f, idom)
- // Compute substitutions we would like to do. We substitute v for w
+ // Compute substitutions we would like to do. We substitute v for w
// if v and w are in the same equivalence class and v dominates w.
rewrite := make([]*Value, f.NumValues())
for _, e := range partition {
@@ -191,7 +191,7 @@ func cse(f *Func) {
}
}
-// An eqclass approximates an equivalence class. During the
+// An eqclass approximates an equivalence class. During the
// algorithm it may represent the union of several of the
// final equivalence classes.
type eqclass []*Value
@@ -207,7 +207,7 @@ type eqclass []*Value
// - first two arg's opcodes and auxint
// - NOT first two arg's aux; that can break CSE.
// partitionValues returns a list of equivalence classes, each
-// being a sorted by ID list of *Values. The eqclass slices are
+// being a sorted by ID list of *Values. The eqclass slices are
// backed by the same storage as the input slice.
// Equivalence classes of size 1 are ignored.
func partitionValues(a []*Value, auxIDs auxmap) []eqclass {
diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go
index a33de438e2..819f6de247 100644
--- a/src/cmd/compile/internal/ssa/deadcode.go
+++ b/src/cmd/compile/internal/ssa/deadcode.go
@@ -84,9 +84,9 @@ func liveValues(f *Func, reachable []bool) []bool {
// deadcode removes dead code from f.
func deadcode(f *Func) {
- // deadcode after regalloc is forbidden for now. Regalloc
+ // deadcode after regalloc is forbidden for now. Regalloc
// doesn't quite generate legal SSA which will lead to some
- // required moves being eliminated. See the comment at the
+ // required moves being eliminated. See the comment at the
// top of regalloc.go for details.
if f.RegAlloc != nil {
f.Fatalf("deadcode after regalloc")
@@ -164,7 +164,7 @@ func deadcode(f *Func) {
}
f.Names = f.Names[:i]
- // Remove dead values from blocks' value list. Return dead
+ // Remove dead values from blocks' value list. Return dead
// values to the allocator.
for _, b := range f.Blocks {
i := 0
@@ -184,7 +184,7 @@ func deadcode(f *Func) {
b.Values = b.Values[:i]
}
- // Remove unreachable blocks. Return dead blocks to allocator.
+ // Remove unreachable blocks. Return dead blocks to allocator.
i = 0
for _, b := range f.Blocks {
if reachable[b.ID] {
@@ -235,11 +235,11 @@ func (b *Block) removePred(p *Block) {
v.Args[n] = nil // aid GC
v.Args = v.Args[:n]
phielimValue(v)
- // Note: this is trickier than it looks. Replacing
+ // Note: this is trickier than it looks. Replacing
// a Phi with a Copy can in general cause problems because
// Phi and Copy don't have exactly the same semantics.
// Phi arguments always come from a predecessor block,
- // whereas copies don't. This matters in loops like:
+ // whereas copies don't. This matters in loops like:
// 1: x = (Phi y)
// y = (Add x 1)
// goto 1
@@ -253,15 +253,15 @@ func (b *Block) removePred(p *Block) {
// will barf on it.
//
// Fortunately, this situation can only happen for dead
- // code loops. We know the code we're working with is
+ // code loops. We know the code we're working with is
// not dead, so we're ok.
// Proof: If we have a potential bad cycle, we have a
// situation like this:
// x = (Phi z)
// y = (op1 x ...)
// z = (op2 y ...)
- // Where opX are not Phi ops. But such a situation
- // implies a cycle in the dominator graph. In the
+ // Where opX are not Phi ops. But such a situation
+ // implies a cycle in the dominator graph. In the
// example, x.Block dominates y.Block, y.Block dominates
// z.Block, and z.Block dominates x.Block (treating
// "dominates" as reflexive). Cycles in the dominator
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index bad0e0096f..20e8368cd5 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -7,7 +7,7 @@ package ssa
// dse does dead-store elimination on the Function.
// Dead stores are those which are unconditionally followed by
// another store to the same location, with no intervening load.
-// This implementation only works within a basic block. TODO: use something more global.
+// This implementation only works within a basic block. TODO: use something more global.
func dse(f *Func) {
var stores []*Value
loadUse := f.newSparseSet(f.NumValues())
@@ -17,7 +17,7 @@ func dse(f *Func) {
shadowed := f.newSparseSet(f.NumValues())
defer f.retSparseSet(shadowed)
for _, b := range f.Blocks {
- // Find all the stores in this block. Categorize their uses:
+ // Find all the stores in this block. Categorize their uses:
// loadUse contains stores which are used by a subsequent load.
// storeUse contains stores which are used by a subsequent store.
loadUse.clear()
@@ -67,9 +67,9 @@ func dse(f *Func) {
b.Fatalf("no last store found - cycle?")
}
- // Walk backwards looking for dead stores. Keep track of shadowed addresses.
+ // Walk backwards looking for dead stores. Keep track of shadowed addresses.
// An "address" is an SSA Value which encodes both the address and size of
- // the write. This code will not remove dead stores to the same address
+ // the write. This code will not remove dead stores to the same address
// of different types.
shadowed.clear()
v := last
diff --git a/src/cmd/compile/internal/ssa/deadstore_test.go b/src/cmd/compile/internal/ssa/deadstore_test.go
index 9ded8bd6e6..c38f1cdbaf 100644
--- a/src/cmd/compile/internal/ssa/deadstore_test.go
+++ b/src/cmd/compile/internal/ssa/deadstore_test.go
@@ -65,7 +65,7 @@ func TestDeadStorePhi(t *testing.T) {
}
func TestDeadStoreTypes(t *testing.T) {
- // Make sure a narrow store can't shadow a wider one. We test an even
+ // Make sure a narrow store can't shadow a wider one. We test an even
// stronger restriction, that one store can't shadow another unless the
// types of the address fields are identical (where identicalness is
// decided by the CSE pass).
diff --git a/src/cmd/compile/internal/ssa/dom.go b/src/cmd/compile/internal/ssa/dom.go
index 2d53b5a957..7de8c354a1 100644
--- a/src/cmd/compile/internal/ssa/dom.go
+++ b/src/cmd/compile/internal/ssa/dom.go
@@ -16,7 +16,7 @@ const (
// of a control-flow graph.
// postorder computes a postorder traversal ordering for the
-// basic blocks in f. Unreachable blocks will not appear.
+// basic blocks in f. Unreachable blocks will not appear.
func postorder(f *Func) []*Block {
mark := make([]byte, f.NumBlocks())
@@ -31,12 +31,12 @@ func postorder(f *Func) []*Block {
b := s[len(s)-1]
switch mark[b.ID] {
case explored:
- // Children have all been visited. Pop & output block.
+ // Children have all been visited. Pop & output block.
s = s[:len(s)-1]
mark[b.ID] = done
order = append(order, b)
case notExplored:
- // Children have not been visited yet. Mark as explored
+ // Children have not been visited yet. Mark as explored
// and queue any children we haven't seen yet.
mark[b.ID] = explored
for _, c := range b.Succs {
@@ -140,9 +140,9 @@ func (f *Func) dfs(entries []*Block, succFn linkedBlocks, dfnum, order, parent [
return
}
-// dominators computes the dominator tree for f. It returns a slice
+// dominators computes the dominator tree for f. It returns a slice
// which maps block ID to the immediate dominator of that block.
-// Unreachable blocks map to nil. The entry block maps to nil.
+// Unreachable blocks map to nil. The entry block maps to nil.
func dominators(f *Func) []*Block {
preds := func(b *Block) []*Block { return b.Preds }
succs := func(b *Block) []*Block { return b.Succs }
@@ -298,9 +298,9 @@ func eval(v ID, ancestor []ID, semi []ID, dfnum []ID, best []ID) ID {
return best[v]
}
-// dominators computes the dominator tree for f. It returns a slice
+// dominators computes the dominator tree for f. It returns a slice
// which maps block ID to the immediate dominator of that block.
-// Unreachable blocks map to nil. The entry block maps to nil.
+// Unreachable blocks map to nil. The entry block maps to nil.
func dominatorsSimple(f *Func) []*Block {
// A simple algorithm for now
// Cooper, Harvey, Kennedy
diff --git a/src/cmd/compile/internal/ssa/flagalloc.go b/src/cmd/compile/internal/ssa/flagalloc.go
index 7ed1fe5908..b9a974155e 100644
--- a/src/cmd/compile/internal/ssa/flagalloc.go
+++ b/src/cmd/compile/internal/ssa/flagalloc.go
@@ -7,18 +7,18 @@ package ssa
const flagRegMask = regMask(1) << 33 // TODO: arch-specific
// flagalloc allocates the flag register among all the flag-generating
-// instructions. Flag values are recomputed if they need to be
+// instructions. Flag values are recomputed if they need to be
// spilled/restored.
func flagalloc(f *Func) {
// Compute the in-register flag value we want at the end of
- // each block. This is basically a best-effort live variable
+ // each block. This is basically a best-effort live variable
// analysis, so it can be much simpler than a full analysis.
// TODO: do we really need to keep flag values live across blocks?
// Could we force the flags register to be unused at basic block
// boundaries? Then we wouldn't need this computation.
end := make([]*Value, f.NumBlocks())
for n := 0; n < 2; n++ {
- // Walk blocks backwards. Poor-man's postorder traversal.
+ // Walk blocks backwards. Poor-man's postorder traversal.
for i := len(f.Blocks) - 1; i >= 0; i-- {
b := f.Blocks[i]
// Walk values backwards to figure out what flag
@@ -117,7 +117,7 @@ func flagalloc(f *Func) {
// subsequent blocks.
_ = v.copyInto(b)
// Note: this flag generator is not properly linked up
- // with the flag users. This breaks the SSA representation.
+ // with the flag users. This breaks the SSA representation.
// We could fix up the users with another pass, but for now
// we'll just leave it. (Regalloc has the same issue for
// standard regs, and it runs next.)
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
index 7cc5f6c8d9..b648832d64 100644
--- a/src/cmd/compile/internal/ssa/func.go
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -10,7 +10,7 @@ import (
)
// A Func represents a Go func declaration (or function literal) and
-// its body. This package compiles each Func independently.
+// its body. This package compiles each Func independently.
type Func struct {
Config *Config // architecture information
pass *pass // current pass information (name, options, etc.)
@@ -29,7 +29,7 @@ type Func struct {
// map from LocalSlot to set of Values that we want to store in that slot.
NamedValues map[LocalSlot][]*Value
- // Names is a copy of NamedValues.Keys. We keep a separate list
+ // Names is a copy of NamedValues.Keys. We keep a separate list
// of keys to make iteration order deterministic.
Names []LocalSlot
@@ -109,7 +109,7 @@ func (f *Func) logStat(key string, args ...interface{}) {
f.Config.Warnl(int(f.Entry.Line), "\t%s\t%s%s\t%s", f.pass.name, key, value, f.Name)
}
-// freeValue frees a value. It must no longer be referenced.
+// freeValue frees a value. It must no longer be referenced.
func (f *Func) freeValue(v *Value) {
if v.Block == nil {
f.Fatalf("trying to free an already freed value")
@@ -177,7 +177,7 @@ func (b *Block) NewValue0I(line int32, op Op, t Type, auxint int64) *Value {
// NewValue returns a new value in the block with no arguments and an aux value.
func (b *Block) NewValue0A(line int32, op Op, t Type, aux interface{}) *Value {
if _, ok := aux.(int64); ok {
- // Disallow int64 aux values. They should be in the auxint field instead.
+ // Disallow int64 aux values. They should be in the auxint field instead.
// Maybe we want to allow this at some point, but for now we disallow it
// to prevent errors like using NewValue1A instead of NewValue1I.
b.Fatalf("aux field has int64 type op=%s type=%s aux=%v", op, t, aux)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 167ec82d18..25a8861130 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -3,8 +3,8 @@
// license that can be found in the LICENSE file.
// x86 register conventions:
-// - Integer types live in the low portion of registers. Upper portions are junk.
-// - Boolean types use the low-order byte of a register. Upper bytes are junk.
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. Upper bytes are junk.
// - We do not use AH,BH,CH,DH registers.
// - Floating-point types will live in the low natural slot of an sse2 register.
// Unused portions are junk.
@@ -335,7 +335,7 @@
// ADDQ $16, SI
// MOVUPS X0, (DI)
// ADDQ $16, DI
-// and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
+// and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
// Large copying uses REP MOVSQ.
(Move [size] dst src mem) && size > 16*64 && size%8 == 0 ->
@@ -529,7 +529,7 @@
// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
// because the x86 instructions are defined to use all 5 bits of the shift even
-// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
// (SHLW x (MOVWconst [24])), but just in case.
(CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c])
@@ -598,7 +598,7 @@
// sign extended loads
// Note: The combined instruction must end up in the same block
-// as the original load. If not, we end up making a value with
+// as the original load. If not, we end up making a value with
// memory type live in two different blocks, which can lead to
// multiple memory values alive simultaneously.
(MOVBQSX (MOVBload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index af08d18978..59a94c887e 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -141,7 +141,7 @@ func init() {
// Suffixes encode the bit width of various instructions.
// Q = 64 bit, L = 32 bit, W = 16 bit, B = 8 bit
- // TODO: 2-address instructions. Mark ops as needing matching input/output regs.
+ // TODO: 2-address instructions. Mark ops as needing matching input/output regs.
var AMD64ops = []opData{
// fp ops
{name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS"}, // fp32 add
@@ -500,12 +500,12 @@ func init() {
// arg0=ptr/int arg1=mem, output=int/ptr
{name: "MOVQconvert", argLength: 2, reg: gp11nf, asm: "MOVQ"},
- // Constant flag values. For any comparison, there are 5 possible
+ // Constant flag values. For any comparison, there are 5 possible
// outcomes: the three from the signed total order (<,==,>) and the
- // three from the unsigned total order. The == cases overlap.
+ // three from the unsigned total order. The == cases overlap.
// Note: there's a sixth "unordered" outcome for floating-point
// comparisons, but we don't use such a beast yet.
- // These ops are for temporary use by rewrite rules. They
+ // These ops are for temporary use by rewrite rules. They
// cannot appear in the generated assembly.
{name: "FlagEQ"}, // equal
{name: "FlagLT_ULT"}, // signed < and unsigned <
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index 11c7b9d7a1..5c23fdf614 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -643,7 +643,7 @@
(Arg <t.FieldType(3)> {n} [off+t.FieldOff(3)]))
// strength reduction of divide by a constant.
-// Note: frontend does <=32 bits. We only need to do 64 bits here.
+// Note: frontend does <=32 bits. We only need to do 64 bits here.
// TODO: Do them all here?
// Div/mod by 1. Currently handled by frontend.
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 31e45c45ea..f1ab468b21 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -6,7 +6,7 @@ package main
var genericOps = []opData{
// 2-input arithmetic
- // Types must be consistent with Go typing. Add, for example, must take two values
+ // Types must be consistent with Go typing. Add, for example, must take two values
// of the same type and produces that same type.
{name: "Add8", argLength: 2, commutative: true}, // arg0 + arg1
{name: "Add16", argLength: 2, commutative: true},
@@ -250,7 +250,7 @@ var genericOps = []opData{
// arg0=ptr/int arg1=mem, output=int/ptr
{name: "Convert", argLength: 2},
- // constants. Constant values are stored in the aux or
+ // constants. Constant values are stored in the aux or
// auxint fields.
{name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true
{name: "ConstString", aux: "String"}, // value is aux.(string)
@@ -270,7 +270,7 @@ var genericOps = []opData{
// The address of a variable. arg0 is the base pointer (SB or SP, depending
// on whether it is a global or stack variable). The Aux field identifies the
- // variable. It will be either an *ExternSymbol (with arg0=SB), *ArgSymbol (arg0=SP),
+ // variable. It will be either an *ExternSymbol (with arg0=SB), *ArgSymbol (arg0=SP),
// or *AutoSymbol (arg0=SP).
{name: "Addr", argLength: 1, aux: "Sym"}, // Address of a variable. Arg0=SP or SB. Aux identifies the variable.
@@ -284,8 +284,8 @@ var genericOps = []opData{
{name: "Move", argLength: 3, aux: "Int64"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size. Returns memory.
{name: "Zero", argLength: 2, aux: "Int64"}, // arg0=destptr, arg1=mem, auxint=size. Returns memory.
- // Function calls. Arguments to the call have already been written to the stack.
- // Return values appear on the stack. The method receiver, if any, is treated
+ // Function calls. Arguments to the call have already been written to the stack.
+ // Return values appear on the stack. The method receiver, if any, is treated
// as a phantom first argument.
{name: "ClosureCall", argLength: 3, aux: "Int64"}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
{name: "StaticCall", argLength: 1, aux: "SymOff"}, // call function aux.(*gc.Sym), arg0=memory. auxint=arg size. Returns memory.
@@ -368,17 +368,17 @@ var genericOps = []opData{
{name: "StructMake4", argLength: 4}, // arg0..3=field0..3. Returns struct.
{name: "StructSelect", argLength: 1, aux: "Int64"}, // arg0=struct, auxint=field index. Returns the auxint'th field.
- // Spill&restore ops for the register allocator. These are
+ // Spill&restore ops for the register allocator. These are
// semantically identical to OpCopy; they do not take/return
- // stores like regular memory ops do. We can get away without memory
+ // stores like regular memory ops do. We can get away without memory
// args because we know there is no aliasing of spill slots on the stack.
{name: "StoreReg", argLength: 1},
{name: "LoadReg", argLength: 1},
- // Used during ssa construction. Like Copy, but the arg has not been specified yet.
+ // Used during ssa construction. Like Copy, but the arg has not been specified yet.
{name: "FwdRef"},
- // Unknown value. Used for Values whose values don't matter because they are dead code.
+ // Unknown value. Used for Values whose values don't matter because they are dead code.
{name: "Unknown"},
{name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem"}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go
index 660511e46c..2736ed72f7 100644
--- a/src/cmd/compile/internal/ssa/gen/main.go
+++ b/src/cmd/compile/internal/ssa/gen/main.go
@@ -149,8 +149,8 @@ func genOp() {
}
fmt.Fprintln(w, "reg:regInfo{")
- // Compute input allocation order. We allocate from the
- // most to the least constrained input. This order guarantees
+ // Compute input allocation order. We allocate from the
+ // most to the least constrained input. This order guarantees
// that we will always be able to find a register.
var s []intPair
for i, r := range v.reg.inputs {
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index e3e3efac41..61a420270f 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -39,8 +39,8 @@ import (
// variable ::= some token
// opcode ::= one of the opcodes from ../op.go (without the Op prefix)
-// extra conditions is just a chunk of Go that evaluates to a boolean. It may use
-// variables declared in the matching sexpr. The variable "v" is predefined to be
+// extra conditions is just a chunk of Go that evaluates to a boolean. It may use
+// variables declared in the matching sexpr. The variable "v" is predefined to be
// the value matched by the entire rule.
// If multiple rules match, the first one in file order is selected.
@@ -93,8 +93,8 @@ func genRules(arch arch) {
lineno++
line := scanner.Text()
if i := strings.Index(line, "//"); i >= 0 {
- // Remove comments. Note that this isn't string safe, so
- // it will truncate lines with // inside strings. Oh well.
+ // Remove comments. Note that this isn't string safe, so
+ // it will truncate lines with // inside strings. Oh well.
line = line[:i]
}
rule += " " + line
@@ -159,7 +159,7 @@ func genRules(arch arch) {
fmt.Fprintf(w, "return false\n")
fmt.Fprintf(w, "}\n")
- // Generate a routine per op. Note that we don't make one giant routine
+ // Generate a routine per op. Note that we don't make one giant routine
// because it is too big for some compilers.
for _, op := range ops {
fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value, config *Config) bool {\n", arch.name, opName(op, arch))
@@ -190,7 +190,7 @@ func genRules(arch arch) {
fmt.Fprintf(w, "}\n")
}
- // Generate block rewrite function. There are only a few block types
+ // Generate block rewrite function. There are only a few block types
// so we can make this one function with a switch.
fmt.Fprintf(w, "func rewriteBlock%s(b *Block) bool {\n", arch.name)
fmt.Fprintf(w, "switch b.Kind {\n")
@@ -229,7 +229,7 @@ func genRules(arch arch) {
fmt.Fprintf(w, "if !(%s) {\nbreak\n}\n", cond)
}
- // Rule matches. Generate result.
+ // Rule matches. Generate result.
t := split(result[1 : len(result)-1]) // remove parens, then split
newsuccs := t[2:]
@@ -316,7 +316,7 @@ func genMatch(w io.Writer, arch arch, match string) {
func genMatch0(w io.Writer, arch arch, match, v string, m map[string]string, top bool) {
if match[0] != '(' {
if _, ok := m[match]; ok {
- // variable already has a definition. Check whether
+ // variable already has a definition. Check whether
// the old definition and the new definition match.
// For example, (add x x). Equality is just pointer equality
// on Values (so cse is important to do before lowering).
@@ -332,7 +332,7 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]string, top
return
}
- // split body up into regions. Split by spaces/tabs, except those
+ // split body up into regions. Split by spaces/tabs, except those
// contained in () or {}.
s := split(match[1 : len(match)-1]) // remove parens, then split
@@ -348,7 +348,7 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]string, top
// type restriction
t := a[1 : len(a)-1] // remove <>
if !isVariable(t) {
- // code. We must match the results of this code.
+ // code. We must match the results of this code.
fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
} else {
// variable
diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go
index 6ce8705272..b01651971f 100755
--- a/src/cmd/compile/internal/ssa/likelyadjust.go
+++ b/src/cmd/compile/internal/ssa/likelyadjust.go
@@ -76,7 +76,7 @@ func describeBranchPrediction(f *Func, b *Block, likely, not int8, prediction Br
func likelyadjust(f *Func) {
// The values assigned to certain and local only matter
// in their rank order. 0 is default, more positive
- // is less likely. It's possible to assign a negative
+ // is less likely. It's possible to assign a negative
// unlikeliness (though not currently the case).
certain := make([]int8, f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit
local := make([]int8, f.NumBlocks()) // for our immediate predecessors.
@@ -113,7 +113,7 @@ func likelyadjust(f *Func) {
// Notice that this can act like a "reset" on unlikeliness at loops; the
// default "everything returns" unlikeliness is erased by min with the
// backedge likeliness; however a loop with calls on every path will be
- // tagged with call cost. Net effect is that loop entry is favored.
+ // tagged with call cost. Net effect is that loop entry is favored.
b0 := b.Succs[0].ID
b1 := b.Succs[1].ID
certain[b.ID] = min8(certain[b0], certain[b1])
@@ -204,7 +204,7 @@ func (l *loop) LongString() string {
// nearestOuterLoop returns the outer loop of loop most nearly
// containing block b; the header must dominate b. loop itself
-// is assumed to not be that loop. For acceptable performance,
+// is assumed to not be that loop. For acceptable performance,
// we're relying on loop nests to not be terribly deep.
func (l *loop) nearestOuterLoop(sdom sparseTree, b *Block) *loop {
var o *loop
diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go
index a8e84d5c93..f6297fdfa5 100644
--- a/src/cmd/compile/internal/ssa/magic.go
+++ b/src/cmd/compile/internal/ssa/magic.go
@@ -6,7 +6,7 @@ package ssa
// A copy of the code in ../gc/subr.go.
// We can't use it directly because it would generate
-// an import cycle. TODO: move to a common support package.
+// an import cycle. TODO: move to a common support package.
// argument passing to/from
// smagic and umagic
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index f8caa7b042..ccd443197a 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -43,7 +43,7 @@ func nilcheckelim(f *Func) {
work = append(work, bp{block: f.Entry})
// map from value ID to bool indicating if value is known to be non-nil
- // in the current dominator path being walked. This slice is updated by
+ // in the current dominator path being walked. This slice is updated by
// walkStates to maintain the known non-nil values.
nonNilValues := make([]bool, f.NumValues())
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index 7b2a8f8f04..d64a41ed45 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -52,7 +52,7 @@ const (
auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff
)
-// A ValAndOff is used by the several opcodes. It holds
+// A ValAndOff is used by the several opcodes. It holds
// both a value and a pointer offset.
// A ValAndOff is intended to be encoded into an AuxInt field.
// The zero ValAndOff encodes a value of 0 and an offset of 0.
diff --git a/src/cmd/compile/internal/ssa/phielim.go b/src/cmd/compile/internal/ssa/phielim.go
index d69449ee21..0461e6e079 100644
--- a/src/cmd/compile/internal/ssa/phielim.go
+++ b/src/cmd/compile/internal/ssa/phielim.go
@@ -5,8 +5,8 @@
package ssa
// phielim eliminates redundant phi values from f.
-// A phi is redundant if its arguments are all equal. For
-// purposes of counting, ignore the phi itself. Both of
+// A phi is redundant if its arguments are all equal. For
+// purposes of counting, ignore the phi itself. Both of
// these phis are redundant:
// v = phi(x,x,x)
// v = phi(x,v,x,v)
@@ -58,8 +58,8 @@ func phielimValue(v *Value) bool {
}
if w == nil {
- // v references only itself. It must be in
- // a dead code loop. Don't bother modifying it.
+ // v references only itself. It must be in
+ // a dead code loop. Don't bother modifying it.
return false
}
v.Op = OpCopy
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index e900a3cfb8..f9680e4202 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -4,9 +4,9 @@
// Register allocation.
//
-// We use a version of a linear scan register allocator. We treat the
+// We use a version of a linear scan register allocator. We treat the
// whole function as a single long basic block and run through
-// it using a greedy register allocator. Then all merge edges
+// it using a greedy register allocator. Then all merge edges
// (those targeting a block with len(Preds)>1) are processed to
// shuffle data into the place that the target of the edge expects.
//
@@ -15,7 +15,7 @@
// value whose next use is farthest in the future.
//
// The register allocator requires that a block is not scheduled until
-// at least one of its predecessors have been scheduled. The most recent
+// at least one of its predecessors have been scheduled. The most recent
// such predecessor provides the starting register state for a block.
//
// It also requires that there are no critical edges (critical =
@@ -29,28 +29,28 @@
// For every value, we generate a spill immediately after the value itself.
// x = Op y z : AX
// x2 = StoreReg x
-// While AX still holds x, any uses of x will use that value. When AX is needed
+// While AX still holds x, any uses of x will use that value. When AX is needed
// for another value, we simply reuse AX. Spill code has already been generated
-// so there is no code generated at "spill" time. When x is referenced
+// so there is no code generated at "spill" time. When x is referenced
// subsequently, we issue a load to restore x to a register using x2 as
// its argument:
// x3 = Restore x2 : CX
// x3 can then be used wherever x is referenced again.
// If the spill (x2) is never used, it will be removed at the end of regalloc.
//
-// Phi values are special, as always. We define two kinds of phis, those
+// Phi values are special, as always. We define two kinds of phis, those
// where the merge happens in a register (a "register" phi) and those where
// the merge happens in a stack location (a "stack" phi).
//
// A register phi must have the phi and all of its inputs allocated to the
-// same register. Register phis are spilled similarly to regular ops:
+// same register. Register phis are spilled similarly to regular ops:
// b1: y = ... : AX b2: z = ... : AX
// goto b3 goto b3
// b3: x = phi(y, z) : AX
// x2 = StoreReg x
//
// A stack phi must have the phi and all of its inputs allocated to the same
-// stack location. Stack phis start out life already spilled - each phi
+// stack location. Stack phis start out life already spilled - each phi
// input must be a store (using StoreReg) at the end of the corresponding
// predecessor block.
// b1: y = ... : AX b2: z = ... : BX
@@ -64,12 +64,12 @@
// TODO
// Use an affinity graph to mark two values which should use the
-// same register. This affinity graph will be used to prefer certain
-// registers for allocation. This affinity helps eliminate moves that
+// same register. This affinity graph will be used to prefer certain
+// registers for allocation. This affinity helps eliminate moves that
// are required for phi implementations and helps generate allocations
// for 2-register architectures.
-// Note: regalloc generates a not-quite-SSA output. If we have:
+// Note: regalloc generates a not-quite-SSA output. If we have:
//
// b1: x = ... : AX
// x2 = StoreReg x
@@ -85,8 +85,8 @@
// add a x4:CX->BX copy at the end of b4.
// But the definition of x3 doesn't dominate b2. We should really
// insert a dummy phi at the start of b2 (x5=phi(x3,x4):BX) to keep
-// SSA form. For now, we ignore this problem as remaining in strict
-// SSA form isn't needed after regalloc. We'll just leave the use
+// SSA form. For now, we ignore this problem as remaining in strict
+// SSA form isn't needed after regalloc. We'll just leave the use
// of x3 not dominated by the definition of x3, and the CX->BX copy
// will have no use (so don't run deadcode after regalloc!).
// TODO: maybe we should introduce these extra phis?
@@ -102,7 +102,7 @@ import (
const regDebug = false // TODO: compiler flag
const logSpills = false
-// regalloc performs register allocation on f. It sets f.RegAlloc
+// regalloc performs register allocation on f. It sets f.RegAlloc
// to the resulting allocation.
func regalloc(f *Func) {
var s regAllocState
@@ -276,7 +276,7 @@ type startReg struct {
vid ID // pre-regalloc value needed in this register
}
-// freeReg frees up register r. Any current user of r is kicked out.
+// freeReg frees up register r. Any current user of r is kicked out.
func (s *regAllocState) freeReg(r register) {
v := s.regs[r].v
if v == nil {
@@ -355,18 +355,18 @@ func (s *regAllocState) allocReg(v *Value, mask regMask) register {
return pickReg(mask)
}
- // Pick a value to spill. Spill the value with the
+ // Pick a value to spill. Spill the value with the
// farthest-in-the-future use.
// TODO: Prefer registers with already spilled Values?
// TODO: Modify preference using affinity graph.
// TODO: if a single value is in multiple registers, spill one of them
// before spilling a value in just a single register.
- // SP and SB are allocated specially. No regular value should
+ // SP and SB are allocated specially. No regular value should
// be allocated to them.
mask &^= 1<<4 | 1<<32
- // Find a register to spill. We spill the register containing the value
+ // Find a register to spill. We spill the register containing the value
// whose next use is as far in the future as possible.
// https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm
var r register
@@ -378,7 +378,7 @@ func (s *regAllocState) allocReg(v *Value, mask regMask) register {
v := s.regs[t].v
if n := s.values[v.ID].uses.dist; n > maxuse {
// v's next use is farther in the future than any value
- // we've seen so far. A new best spill candidate.
+ // we've seen so far. A new best spill candidate.
r = t
maxuse = n
}
@@ -476,7 +476,7 @@ func (s *regAllocState) init(f *Func) {
}
s.computeLive()
- // Compute block order. This array allows us to distinguish forward edges
+ // Compute block order. This array allows us to distinguish forward edges
// from backward edges and compute how far they go.
blockOrder := make([]int32, f.NumBlocks())
for i, b := range f.Blocks {
@@ -589,7 +589,7 @@ func (s *regAllocState) regalloc(f *Func) {
liveSet.remove(v.ID)
if v.Op == OpPhi {
// Remove v from the live set, but don't add
- // any inputs. This is the state the len(b.Preds)>1
+ // any inputs. This is the state the len(b.Preds)>1
// case below desires; it wants to process phis specially.
continue
}
@@ -653,7 +653,7 @@ func (s *regAllocState) regalloc(f *Func) {
}
}
} else {
- // This is the complicated case. We have more than one predecessor,
+ // This is the complicated case. We have more than one predecessor,
// which means we may have Phi ops.
// Copy phi ops into new schedule.
@@ -674,7 +674,7 @@ func (s *regAllocState) regalloc(f *Func) {
}
}
- // Decide on registers for phi ops. Use the registers determined
+ // Decide on registers for phi ops. Use the registers determined
// by the primary predecessor if we can.
// TODO: pick best of (already processed) predecessors?
// Majority vote? Deepest nesting level?
@@ -728,7 +728,7 @@ func (s *regAllocState) regalloc(f *Func) {
}
}
- // Set registers for phis. Add phi spill code.
+ // Set registers for phis. Add phi spill code.
for i, v := range phis {
if !s.values[v.ID].needReg {
continue
@@ -861,8 +861,8 @@ func (s *regAllocState) regalloc(f *Func) {
continue
}
if v.Op == OpArg {
- // Args are "pre-spilled" values. We don't allocate
- // any register here. We just set up the spill pointer to
+ // Args are "pre-spilled" values. We don't allocate
+ // any register here. We just set up the spill pointer to
// point at itself and any later user will restore it to use it.
s.values[v.ID].spill = v
s.values[v.ID].spillUsed = true // use is guaranteed
@@ -886,7 +886,7 @@ func (s *regAllocState) regalloc(f *Func) {
continue
}
- // Move arguments to registers. Process in an ordering defined
+ // Move arguments to registers. Process in an ordering defined
// by the register specification (most constrained first).
args = append(args[:0], v.Args...)
for _, i := range regspec.inputs {
@@ -926,7 +926,7 @@ func (s *regAllocState) regalloc(f *Func) {
}
b.Values = append(b.Values, v)
- // Issue a spill for this value. We issue spills unconditionally,
+ // Issue a spill for this value. We issue spills unconditionally,
// then at the end of regalloc delete the ones we never use.
// TODO: schedule the spill at a point that dominates all restores.
// The restore may be off in an unlikely branch somewhere and it
@@ -1002,7 +1002,7 @@ func (s *regAllocState) regalloc(f *Func) {
// If a value is live at the end of the block and
// isn't in a register, remember that its spill location
- // is live. We need to remember this information so that
+ // is live. We need to remember this information so that
// the liveness analysis in stackalloc is correct.
for _, e := range s.live[b.ID] {
if s.values[e.ID].regs != 0 {
@@ -1201,7 +1201,7 @@ func (e *edgeState) process() {
}
}
if i < len(dsts) {
- // Made some progress. Go around again.
+ // Made some progress. Go around again.
dsts = dsts[:i]
// Append any extras destinations we generated.
@@ -1210,7 +1210,7 @@ func (e *edgeState) process() {
continue
}
- // We made no progress. That means that any
+ // We made no progress. That means that any
// remaining unsatisfied moves are in simple cycles.
// For example, A -> B -> C -> D -> A.
// A ----> B
@@ -1229,7 +1229,7 @@ func (e *edgeState) process() {
// When we resume the outer loop, the A->B move can now proceed,
// and eventually the whole cycle completes.
- // Copy any cycle location to a temp register. This duplicates
+ // Copy any cycle location to a temp register. This duplicates
// one of the cycle entries, allowing the just duplicated value
// to be overwritten and the cycle to proceed.
loc := dsts[0].loc
@@ -1248,7 +1248,7 @@ func (e *edgeState) process() {
}
}
-// processDest generates code to put value vid into location loc. Returns true
+// processDest generates code to put value vid into location loc. Returns true
// if progress was made.
func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
occupant := e.contents[loc]
@@ -1258,7 +1258,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
if splice != nil {
*splice = occupant.c
}
- // Note: if splice==nil then c will appear dead. This is
+ // Note: if splice==nil then c will appear dead. This is
// non-SSA formed code, so be careful after this pass not to run
// deadcode elimination.
return true
@@ -1306,7 +1306,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
if dstReg {
x = v.copyInto(e.p)
} else {
- // Rematerialize into stack slot. Need a free
+ // Rematerialize into stack slot. Need a free
// register to accomplish this.
e.erase(loc) // see pre-clobber comment below
r := e.findRegFor(v.Type)
@@ -1330,15 +1330,15 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool {
if dstReg {
x = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c)
} else {
- // mem->mem. Use temp register.
+ // mem->mem. Use temp register.
- // Pre-clobber destination. This avoids the
+ // Pre-clobber destination. This avoids the
// following situation:
// - v is currently held in R0 and stacktmp0.
// - We want to copy stacktmp1 to stacktmp0.
// - We choose R0 as the temporary register.
// During the copy, both R0 and stacktmp0 are
- // clobbered, losing both copies of v. Oops!
+ // clobbered, losing both copies of v. Oops!
// Erasing the destination early means R0 will not
// be chosen as the temp register, as it will then
// be the last copy of v.
@@ -1438,7 +1438,7 @@ func (e *edgeState) findRegFor(typ Type) Location {
m = e.s.compatRegs(e.s.f.Config.fe.TypeInt64())
}
- // Pick a register. In priority order:
+ // Pick a register. In priority order:
// 1) an unused register
// 2) a non-unique register not holding a final value
// 3) a non-unique register
@@ -1455,9 +1455,9 @@ func (e *edgeState) findRegFor(typ Type) Location {
return &registers[pickReg(x)]
}
- // No register is available. Allocate a temp location to spill a register to.
+ // No register is available. Allocate a temp location to spill a register to.
// The type of the slot is immaterial - it will not be live across
- // any safepoint. Just use a type big enough to hold any register.
+ // any safepoint. Just use a type big enough to hold any register.
typ = e.s.f.Config.fe.TypeInt64()
t := LocalSlot{e.s.f.Config.fe.Auto(typ), typ, 0}
// TODO: reuse these slots.
@@ -1471,7 +1471,7 @@ func (e *edgeState) findRegFor(typ Type) Location {
if regDebug {
fmt.Printf(" SPILL %s->%s %s\n", r.Name(), t.Name(), x.LongString())
}
- // r will now be overwritten by the caller. At some point
+ // r will now be overwritten by the caller. At some point
// later, the newly saved value will be moved back to its
// final destination in processDest.
return r
@@ -1508,10 +1508,10 @@ type liveInfo struct {
}
// computeLive computes a map from block ID to a list of value IDs live at the end
-// of that block. Together with the value ID is a count of how many instructions
-// to the next use of that value. The resulting map is stored at s.live.
+// of that block. Together with the value ID is a count of how many instructions
+// to the next use of that value. The resulting map is stored at s.live.
// TODO: this could be quadratic if lots of variables are live across lots of
-// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
// of this function) require only linear size & time.
func (s *regAllocState) computeLive() {
f := s.f
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index 60509d214e..86f3c2010e 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -105,7 +105,7 @@ func addOff(x, y int64) int64 {
return z
}
-// mergeSym merges two symbolic offsets. There is no real merging of
+// mergeSym merges two symbolic offsets. There is no real merging of
// offsets, we just pick the non-nil one.
func mergeSym(x, y interface{}) interface{} {
if x == nil {
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
index dd0a42a5dd..f47f93c5c0 100644
--- a/src/cmd/compile/internal/ssa/schedule.go
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -15,10 +15,10 @@ const (
ScoreCount // not a real score
)
-// Schedule the Values in each Block. After this phase returns, the
+// Schedule the Values in each Block. After this phase returns, the
// order of b.Values matters and is the order in which those values
-// will appear in the assembly output. For now it generates a
-// reasonable valid schedule using a priority queue. TODO(khr):
+// will appear in the assembly output. For now it generates a
+// reasonable valid schedule using a priority queue. TODO(khr):
// schedule smarter.
func schedule(f *Func) {
// For each value, the number of times it is used in the block
@@ -28,7 +28,7 @@ func schedule(f *Func) {
// "priority" for a value
score := make([]uint8, f.NumValues())
- // scheduling order. We queue values in this list in reverse order.
+ // scheduling order. We queue values in this list in reverse order.
var order []*Value
// priority queue of legally schedulable (0 unscheduled uses) values
@@ -36,7 +36,7 @@ func schedule(f *Func) {
// maps mem values to the next live memory value
nextMem := make([]*Value, f.NumValues())
- // additional pretend arguments for each Value. Used to enforce load/store ordering.
+ // additional pretend arguments for each Value. Used to enforce load/store ordering.
additionalArgs := make([][]*Value, f.NumValues())
for _, b := range f.Blocks {
@@ -77,12 +77,12 @@ func schedule(f *Func) {
uses[v.ID]++
}
}
- // Compute score. Larger numbers are scheduled closer to the end of the block.
+ // Compute score. Larger numbers are scheduled closer to the end of the block.
for _, v := range b.Values {
switch {
case v.Op == OpAMD64LoweredGetClosurePtr:
// We also score GetLoweredClosurePtr as early as possible to ensure that the
- // context register is not stomped. GetLoweredClosurePtr should only appear
+ // context register is not stomped. GetLoweredClosurePtr should only appear
// in the entry block where there are no phi functions, so there is no
// conflict or ambiguity here.
if b != f.Entry {
@@ -96,8 +96,8 @@ func schedule(f *Func) {
// We want all the vardefs next.
score[v.ID] = ScoreVarDef
case v.Type.IsMemory():
- // Schedule stores as early as possible. This tends to
- // reduce register pressure. It also helps make sure
+ // Schedule stores as early as possible. This tends to
+ // reduce register pressure. It also helps make sure
// VARDEF ops are scheduled before the corresponding LEA.
score[v.ID] = ScoreMemory
case v.Type.IsFlags():
@@ -117,7 +117,7 @@ func schedule(f *Func) {
// Schedule values dependent on the control value at the end.
// This reduces the number of register spills. We don't find
// all values that depend on the control, just values with a
- // direct dependency. This is cheaper and in testing there
+ // direct dependency. This is cheaper and in testing there
// was no difference in the number of spills.
for _, v := range b.Values {
if v.Op != OpPhi {
diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go
index 9a08f35d9d..cae91e7ddb 100644
--- a/src/cmd/compile/internal/ssa/sparsetree.go
+++ b/src/cmd/compile/internal/ssa/sparsetree.go
@@ -99,7 +99,7 @@ func (t sparseTree) numberBlock(b *Block, n int32) int32 {
// Sibling returns a sibling of x in the dominator tree (i.e.,
// a node with the same immediate dominator) or nil if there
// are no remaining siblings in the arbitrary but repeatable
-// order chosen. Because the Child-Sibling order is used
+// order chosen. Because the Child-Sibling order is used
// to assign entry and exit numbers in the treewalk, those
// numbers are also consistent with this order (i.e.,
// Sibling(x) has entry number larger than x's exit number).
@@ -108,7 +108,7 @@ func (t sparseTree) Sibling(x *Block) *Block {
}
// Child returns a child of x in the dominator tree, or
-// nil if there are none. The choice of first child is
+// nil if there are none. The choice of first child is
// arbitrary but repeatable.
func (t sparseTree) Child(x *Block) *Block {
return t[x.ID].child
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
index 0e6cae0924..ef8a5846b0 100644
--- a/src/cmd/compile/internal/ssa/stackalloc.go
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -91,8 +91,8 @@ func (s *stackAllocState) stackalloc() {
// For each type, we keep track of all the stack slots we
// have allocated for that type.
- // TODO: share slots among equivalent types. We would need to
- // only share among types with the same GC signature. See the
+ // TODO: share slots among equivalent types. We would need to
+ // only share among types with the same GC signature. See the
// type.Equal calls below for where this matters.
locations := map[Type][]LocalSlot{}
@@ -177,7 +177,7 @@ func (s *stackAllocState) stackalloc() {
// computeLive computes a map from block ID to a list of
// stack-slot-needing value IDs live at the end of that block.
// TODO: this could be quadratic if lots of variables are live across lots of
-// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
// of this function) require only linear size & time.
func (s *stackAllocState) computeLive(spillLive [][]ID) {
s.live = make([][]ID, s.f.NumBlocks())
@@ -206,7 +206,7 @@ func (s *stackAllocState) computeLive(spillLive [][]ID) {
if v.Op == OpPhi {
// Save phi for later.
// Note: its args might need a stack slot even though
- // the phi itself doesn't. So don't use needSlot.
+ // the phi itself doesn't. So don't use needSlot.
if !v.Type.IsMemory() && !v.Type.IsVoid() {
phis = append(phis, v)
}
@@ -299,7 +299,7 @@ func (s *stackAllocState) buildInterferenceGraph() {
if v.Op == OpArg && s.values[v.ID].needSlot {
// OpArg is an input argument which is pre-spilled.
// We add back v.ID here because we want this value
- // to appear live even before this point. Being live
+ // to appear live even before this point. Being live
// all the way to the start of the entry block prevents other
// values from being allocated to the same slot and clobbering
// the input value before we have a chance to load it.
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
index cc8c9fe871..3ec788355d 100644
--- a/src/cmd/compile/internal/ssa/value.go
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -10,21 +10,21 @@ import (
)
// A Value represents a value in the SSA representation of the program.
-// The ID and Type fields must not be modified. The remainder may be modified
+// The ID and Type fields must not be modified. The remainder may be modified
// if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)).
type Value struct {
- // A unique identifier for the value. For performance we allocate these IDs
+ // A unique identifier for the value. For performance we allocate these IDs
// densely starting at 1. There is no guarantee that there won't be occasional holes, though.
ID ID
- // The operation that computes this value. See op.go.
+ // The operation that computes this value. See op.go.
Op Op
- // The type of this value. Normally this will be a Go type, but there
+ // The type of this value. Normally this will be a Go type, but there
// are a few other pseudo-types, see type.go.
Type Type
- // Auxiliary info for this value. The type of this information depends on the opcode and type.
+ // Auxiliary info for this value. The type of this information depends on the opcode and type.
// AuxInt is used for integer values, Aux is used for other values.
AuxInt int64
Aux interface{}
@@ -49,7 +49,7 @@ type Value struct {
// OpConst int64 0 int64 constant
// OpAddcq int64 1 amd64 op: v = arg[0] + constant
-// short form print. Just v#.
+// short form print. Just v#.
func (v *Value) String() string {
if v == nil {
return "nil" // should never happen, but not panicking helps with debugging
diff --git a/src/cmd/compile/internal/x86/cgen64.go b/src/cmd/compile/internal/x86/cgen64.go
index 7e40a32db3..8bb7d371a3 100644
--- a/src/cmd/compile/internal/x86/cgen64.go
+++ b/src/cmd/compile/internal/x86/cgen64.go
@@ -95,7 +95,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
split64(r, &lo2, &hi2)
}
- // Do op. Leave result in DX:AX.
+ // Do op. Leave result in DX:AX.
switch n.Op {
// TODO: Constants
case gc.OADD:
diff --git a/src/cmd/compile/internal/x86/gsubr.go b/src/cmd/compile/internal/x86/gsubr.go
index 98595716cf..4fd6680cb4 100644
--- a/src/cmd/compile/internal/x86/gsubr.go
+++ b/src/cmd/compile/internal/x86/gsubr.go
@@ -1511,7 +1511,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
// The way the code generator uses floating-point
// registers, a move from F0 to F0 is intended as a no-op.
// On the x86, it's not: it pushes a second copy of F0
- // on the floating point stack. So toss it away here.
+ // on the floating point stack. So toss it away here.
// Also, F0 is the *only* register we ever evaluate
// into, so we should only see register/register as F0/F0.
/*
diff --git a/src/cmd/compile/internal/x86/peep.go b/src/cmd/compile/internal/x86/peep.go
index 239e9cc35f..b9f05d3b47 100644
--- a/src/cmd/compile/internal/x86/peep.go
+++ b/src/cmd/compile/internal/x86/peep.go
@@ -221,7 +221,7 @@ loop1:
// MOVSD removal.
// We never use packed registers, so a MOVSD between registers
// can be replaced by MOVAPD, which moves the pair of float64s
- // instead of just the lower one. We only use the lower one, but
+ // instead of just the lower one. We only use the lower one, but
// the processor can do better if we do moves using both.
for r := g.Start; r != nil; r = r.Link {
p = r.Prog