diff options
| author | Russ Cox <rsc@golang.org> | 2022-02-03 14:12:08 -0500 |
|---|---|---|
| committer | Russ Cox <rsc@golang.org> | 2022-04-11 16:34:30 +0000 |
| commit | 19309779ac5e2f5a2fd3cbb34421dafb2855ac21 (patch) | |
| tree | 67dfd3e5d96250325e383183f95b6f5fe1968514 /src/cmd/compile | |
| parent | 017933163ab6a2b254f0310c61b57db65cded92e (diff) | |
| download | go-19309779ac5e2f5a2fd3cbb34421dafb2855ac21.tar.xz | |
all: gofmt main repo
[This CL is part of a sequence implementing the proposal #51082.
The design doc is at https://go.dev/s/godocfmt-design.]
Run the updated gofmt, which reformats doc comments,
on the main repository. Vendored files are excluded.
For #51082.
Change-Id: I7332f099b60f716295fb34719c98c04eb1a85407
Reviewed-on: https://go-review.googlesource.com/c/go/+/384268
Reviewed-by: Jonathan Amsterdam <jba@google.com>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Diffstat (limited to 'src/cmd/compile')
60 files changed, 504 insertions, 399 deletions
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 9628ce5644..2dae55ba86 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -111,7 +111,9 @@ func moveByType(t *types.Type) obj.As { } // opregreg emits instructions for -// dest := dest(To) op src(From) +// +// dest := dest(To) op src(From) +// // and also returns the created obj.Prog so it // may be further adjusted (offset, scale, etc). func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index be01914d08..8c2ea49c8f 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -522,7 +522,8 @@ func InlineCalls(fn *ir.Func) { // but then you may as well do it here. so this is cleaner and // shorter and less complicated. // The result of inlnode MUST be assigned back to n, e.g. -// n.Left = inlnode(n.Left) +// +// n.Left = inlnode(n.Left) func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node { if n == nil { return n @@ -657,7 +658,8 @@ var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCa // inlined function body, and (List, Rlist) contain the (input, output) // parameters. // The result of mkinlcall MUST be assigned back to n, e.g. -// n.Left = mkinlcall(n.Left, fn, isddd) +// +// n.Left = mkinlcall(n.Left, fn, isddd) func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node { if fn.Inl == nil { if logopt.Enabled() { diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index b5c0983d6a..4f1f582fa1 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -951,11 +951,11 @@ var IsIntrinsicCall = func(*CallExpr) bool { return false } // instead of computing both. SameSafeExpr assumes that l and r are // used in the same statement or expression. In order for it to be // safe to reuse l or r, they must: -// * be the same expression -// * not have side-effects (no function calls, no channel ops); -// however, panics are ok -// * not cause inappropriate aliasing; e.g. two string to []byte -// conversions, must result in two distinct slices +// - be the same expression +// - not have side-effects (no function calls, no channel ops); +// however, panics are ok +// - not cause inappropriate aliasing; e.g. two string to []byte +// conversions, must result in two distinct slices // // The handling of OINDEXMAP is subtle. OINDEXMAP can occur both // as an lvalue (map assignment) and an rvalue (map access). This is diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 5e5868abb2..24908f3a13 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -551,7 +551,8 @@ func SetPos(n Node) src.XPos { } // The result of InitExpr MUST be assigned back to n, e.g. -// n.X = InitExpr(init, n.X) +// +// n.X = InitExpr(init, n.X) func InitExpr(init []Node, expr Node) Node { if len(init) == 0 { return expr diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go index 3202e506c8..bd0a6fa1a3 100644 --- a/src/cmd/compile/internal/liveness/plive.go +++ b/src/cmd/compile/internal/liveness/plive.go @@ -244,8 +244,10 @@ func (lv *liveness) initcache() { // liveness effects on a variable. // // The possible flags are: +// // uevar - used by the instruction // varkill - killed by the instruction (set) +// // A kill happens after the use (for an instruction that updates a value, for example). type liveEffect int @@ -1460,14 +1462,14 @@ func (lv *liveness) emitStackObjects() *obj.LSym { // isfat reports whether a variable of type t needs multiple assignments to initialize. // For example: // -// type T struct { x, y int } -// x := T{x: 0, y: 1} +// type T struct { x, y int } +// x := T{x: 0, y: 1} // // Then we need: // -// var t T -// t.x = 0 -// t.y = 1 +// var t T +// t.x = 0 +// t.y = 1 // // to fully initialize t. func isfat(t *types.Type) bool { diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go index 2c1f2362ad..e7a4001cec 100644 --- a/src/cmd/compile/internal/noder/unified.go +++ b/src/cmd/compile/internal/noder/unified.go @@ -33,38 +33,38 @@ var localPkgReader *pkgReader // // The pipeline contains 2 steps: // -// 1) Generate package export data "stub". +// 1. Generate package export data "stub". // -// 2) Generate package IR from package export data. +// 2. Generate package IR from package export data. // // The package data "stub" at step (1) contains everything from the local package, // but nothing that have been imported. When we're actually writing out export data // to the output files (see writeNewExport function), we run the "linker", which does // a few things: // -// + Updates compiler extensions data (e.g., inlining cost, escape analysis results). +// - Updates compiler extensions data (e.g., inlining cost, escape analysis results). // -// + Handles re-exporting any transitive dependencies. +// - Handles re-exporting any transitive dependencies. // -// + Prunes out any unnecessary details (e.g., non-inlineable functions, because any -// downstream importers only care about inlinable functions). +// - Prunes out any unnecessary details (e.g., non-inlineable functions, because any +// downstream importers only care about inlinable functions). // // The source files are typechecked twice, once before writing export data // using types2 checker, once after read export data using gc/typecheck. // This duplication of work will go away once we always use types2 checker, // we can remove the gc/typecheck pass. The reason it is still here: // -// + It reduces engineering costs in maintaining a fork of typecheck -// (e.g., no need to backport fixes like CL 327651). +// - It reduces engineering costs in maintaining a fork of typecheck +// (e.g., no need to backport fixes like CL 327651). // -// + It makes it easier to pass toolstash -cmp. +// - It makes it easier to pass toolstash -cmp. // -// + Historically, we would always re-run the typechecker after import, even though -// we know the imported data is valid. It's not ideal, but also not causing any -// problem either. +// - Historically, we would always re-run the typechecker after import, even though +// we know the imported data is valid. It's not ideal, but also not causing any +// problem either. // -// + There's still transformation that being done during gc/typecheck, like rewriting -// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP. +// - There's still transformation that being done during gc/typecheck, like rewriting +// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP. // // Using syntax+types2 tree, which already has a complete representation of generics, // the unified IR has the full typed AST for doing introspection during step (1). diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go index 40f1408260..32e95bedc2 100644 --- a/src/cmd/compile/internal/pkginit/init.go +++ b/src/cmd/compile/internal/pkginit/init.go @@ -65,9 +65,9 @@ func MakeInit() { // Task makes and returns an initialization record for the package. // See runtime/proc.go:initTask for its layout. // The 3 tasks for initialization are: -// 1) Initialize all of the packages the current package depends on. -// 2) Initialize all the variables that have initializers. -// 3) Run any init functions. +// 1. Initialize all of the packages the current package depends on. +// 2. Initialize all the variables that have initializers. +// 3. Run any init functions. func Task() *ir.Name { var deps []*obj.LSym // initTask records for packages the current package depends on var fns []*obj.LSym // functions to call for package initialization diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go index 526315d557..9fe90da0fe 100644 --- a/src/cmd/compile/internal/reflectdata/alg.go +++ b/src/cmd/compile/internal/reflectdata/alg.go @@ -681,7 +681,8 @@ func anyCall(fn *ir.Func) bool { } // eqfield returns the node -// p.field == q.field +// +// p.field == q.field func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node { nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field) ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field) @@ -690,9 +691,13 @@ func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node { } // EqString returns the nodes -// len(s) == len(t) +// +// len(s) == len(t) +// // and -// memequal(s.ptr, t.ptr, len(s)) +// +// memequal(s.ptr, t.ptr, len(s)) +// // which can be used to construct string equality comparison. // eqlen must be evaluated before eqmem, and shortcircuiting is required. func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { @@ -714,9 +719,13 @@ func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) { } // EqInterface returns the nodes -// s.tab == t.tab (or s.typ == t.typ, as appropriate) +// +// s.tab == t.tab (or s.typ == t.typ, as appropriate) +// // and -// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate) +// +// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate) +// // which can be used to construct interface equality comparison. // eqtab must be evaluated before eqdata, and shortcircuiting is required. func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { @@ -750,7 +759,8 @@ func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) { } // eqmem returns the node -// memequal(&p.field, &q.field [, size]) +// +// memequal(&p.field, &q.field [, size]) func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node { nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field))) ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field))) diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 9961c8f65a..a8d81b9a21 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -667,10 +667,10 @@ var kinds = []int{ // tflag is documented in reflect/type.go. // // tflag values must be kept in sync with copies in: -// - cmd/compile/internal/reflectdata/reflect.go -// - cmd/link/internal/ld/decodesym.go -// - reflect/type.go -// - runtime/type.go +// - cmd/compile/internal/reflectdata/reflect.go +// - cmd/link/internal/ld/decodesym.go +// - reflect/type.go +// - runtime/type.go const ( tflagUncommon = 1 << 0 tflagExtraStar = 1 << 1 @@ -1794,13 +1794,17 @@ func NeedEmit(typ *types.Type) bool { // Also wraps methods on instantiated generic types for use in itab entries. // For an instantiated generic type G[int], we generate wrappers like: // G[int] pointer shaped: +// // func (x G[int]) f(arg) { // .inst.G[int].f(dictionary, x, arg) -// } +// } +// // G[int] not pointer shaped: +// // func (x *G[int]) f(arg) { // .inst.G[int].f(dictionary, *x, arg) -// } +// } +// // These wrappers are always fully stenciled. func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym { orig := rcvr diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index deb6c79006..8f9c157d9a 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -132,7 +132,9 @@ func moveByType(t *types.Type) obj.As { } // opregreg emits instructions for -// dest := dest(To) op src(From) +// +// dest := dest(To) op src(From) +// // and also returns the created obj.Prog so it // may be further adjusted (offset, scale, etc). func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { @@ -145,7 +147,9 @@ func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { } // opregregimm emits instructions for +// // dest := src(From) op off +// // and also returns the created obj.Prog so it // may be further adjusted (offset, scale, etc). func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog { diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go index d600e31666..c18ea68665 100644 --- a/src/cmd/compile/internal/ssa/addressingmodes.go +++ b/src/cmd/compile/internal/ssa/addressingmodes.go @@ -131,10 +131,14 @@ var needSplit = map[Op]bool{ } // For each entry k, v in this map, if we have a value x with: -// x.Op == k[0] -// x.Args[0].Op == k[1] +// +// x.Op == k[0] +// x.Args[0].Op == k[1] +// // then we can set x.Op to v and set x.Args like this: -// x.Args[0].Args + x.Args[1:] +// +// x.Args[0].Args + x.Args[1:] +// // Additionally, the Aux/AuxInt from x.Args[0] is merged into x. var combine = map[[2]Op]Op{ // amd64 diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index 4d21ade3e3..db7df3f338 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -71,19 +71,25 @@ type Block struct { // Edge represents a CFG edge. // Example edges for b branching to either c or d. // (c and d have other predecessors.) -// b.Succs = [{c,3}, {d,1}] -// c.Preds = [?, ?, ?, {b,0}] -// d.Preds = [?, {b,1}, ?] +// +// b.Succs = [{c,3}, {d,1}] +// c.Preds = [?, ?, ?, {b,0}] +// d.Preds = [?, {b,1}, ?] +// // These indexes allow us to edit the CFG in constant time. // In addition, it informs phi ops in degenerate cases like: -// b: -// if k then c else c -// c: -// v = Phi(x, y) +// +// b: +// if k then c else c +// c: +// v = Phi(x, y) +// // Then the indexes tell you whether x is chosen from // the if or else branch from b. -// b.Succs = [{c,0},{c,1}] -// c.Preds = [{b,0},{b,1}] +// +// b.Succs = [{c,0},{c,1}] +// c.Preds = [{b,0},{b,1}] +// // means x is chosen if k is true. type Edge struct { // block edge goes to (in a Succs list) or from (in a Preds list) @@ -106,12 +112,13 @@ func (e Edge) String() string { } // BlockKind is the kind of SSA block. -// kind controls successors -// ------------------------------------------ -// Exit [return mem] [] -// Plain [] [next] -// If [boolean Value] [then, else] -// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc) +// +// kind controls successors +// ------------------------------------------ +// Exit [return mem] [] +// Plain [] [next] +// If [boolean Value] [then, else] +// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc) type BlockKind int8 // short form print @@ -330,10 +337,12 @@ func (b *Block) swapSuccessors() { // // b.removePred(i) // for _, v := range b.Values { -// if v.Op != OpPhi { -// continue -// } -// b.removeArg(v, i) +// +// if v.Op != OpPhi { +// continue +// } +// b.removeArg(v, i) +// // } func (b *Block) removePhiArg(phi *Value, i int) { n := len(b.Preds) diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go index 59773ef31b..7a08654f4e 100644 --- a/src/cmd/compile/internal/ssa/branchelim.go +++ b/src/cmd/compile/internal/ssa/branchelim.go @@ -11,11 +11,11 @@ import "cmd/internal/src" // // Search for basic blocks that look like // -// bb0 bb0 -// | \ / \ -// | bb1 or bb1 bb2 <- trivial if/else blocks -// | / \ / -// bb2 bb3 +// bb0 bb0 +// | \ / \ +// | bb1 or bb1 bb2 <- trivial if/else blocks +// | / \ / +// bb2 bb3 // // where the intermediate blocks are mostly empty (with no side-effects); // rewrite Phis in the postdominator as CondSelects. diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index f95140eaf9..5e898ab96f 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -250,8 +250,8 @@ var GenssaDump map[string]bool = make(map[string]bool) // names of functions to // version is used as a regular expression to match the phase name(s). // // Special cases that have turned out to be useful: -// - ssa/check/on enables checking after each phase -// - ssa/all/time enables time reporting for all phases +// - ssa/check/on enables checking after each phase +// - ssa/all/time enables time reporting for all phases // // See gc/lex.go for dissection of the option string. // Example uses: diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go index ade5e0648e..f4b799394c 100644 --- a/src/cmd/compile/internal/ssa/cse.go +++ b/src/cmd/compile/internal/ssa/cse.go @@ -235,14 +235,15 @@ type eqclass []*Value // partitionValues partitions the values into equivalence classes // based on having all the following features match: -// - opcode -// - type -// - auxint -// - aux -// - nargs -// - block # if a phi op -// - first two arg's opcodes and auxint -// - NOT first two arg's aux; that can break CSE. +// - opcode +// - type +// - auxint +// - aux +// - nargs +// - block # if a phi op +// - first two arg's opcodes and auxint +// - NOT first two arg's aux; that can break CSE. +// // partitionValues returns a list of equivalence classes, each // being a sorted by ID list of *Values. The eqclass slices are // backed by the same storage as the input slice. diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 08dc5c468e..2c18d35204 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -402,28 +402,28 @@ func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot { // OpArg{Int,Float}Reg values, inserting additional values in // cases where they are missing. Example: // -// func foo(s string, used int, notused int) int { -// return len(s) + used -// } +// func foo(s string, used int, notused int) int { +// return len(s) + used +// } // // In the function above, the incoming parameter "used" is fully live, // "notused" is not live, and "s" is partially live (only the length // field of the string is used). At the point where debug value // analysis runs, we might expect to see an entry block with: // -// b1: -// v4 = ArgIntReg <uintptr> {s+8} [0] : BX -// v5 = ArgIntReg <int> {used} [0] : CX +// b1: +// v4 = ArgIntReg <uintptr> {s+8} [0] : BX +// v5 = ArgIntReg <int> {used} [0] : CX // // While this is an accurate picture of the live incoming params, // we also want to have debug locations for non-live params (or // their non-live pieces), e.g. something like // -// b1: -// v9 = ArgIntReg <*uint8> {s+0} [0] : AX -// v4 = ArgIntReg <uintptr> {s+8} [0] : BX -// v5 = ArgIntReg <int> {used} [0] : CX -// v10 = ArgIntReg <int> {unused} [0] : DI +// b1: +// v9 = ArgIntReg <*uint8> {s+0} [0] : AX +// v4 = ArgIntReg <uintptr> {s+8} [0] : BX +// v5 = ArgIntReg <int> {used} [0] : CX +// v10 = ArgIntReg <int> {unused} [0] : DI // // This function examines the live OpArg{Int,Float}Reg values and // synthesizes new (dead) values for the non-live params or the @@ -1489,14 +1489,14 @@ func setupLocList(ctxt *obj.Link, f *Func, list []byte, st, en ID) ([]byte, int) // that spills a register arg. It returns the ID of that instruction // Example: // -// b1: -// v3 = ArgIntReg <int> {p1+0} [0] : AX -// ... more arg regs .. -// v4 = ArgFloatReg <float32> {f1+0} [0] : X0 -// v52 = MOVQstore <mem> {p1} v2 v3 v1 -// ... more stores ... -// v68 = MOVSSstore <mem> {f4} v2 v67 v66 -// v38 = MOVQstoreconst <mem> {blob} [val=0,off=0] v2 v32 +// b1: +// v3 = ArgIntReg <int> {p1+0} [0] : AX +// ... more arg regs .. +// v4 = ArgFloatReg <float32> {f1+0} [0] : X0 +// v52 = MOVQstore <mem> {p1} v2 v3 v1 +// ... more stores ... +// v68 = MOVSSstore <mem> {f4} v2 v67 v66 +// v38 = MOVQstoreconst <mem> {blob} [val=0,off=0] v2 v32 // // Important: locatePrologEnd is expected to work properly only with // optimization turned off (e.g. "-N"). If optimization is enabled diff --git a/src/cmd/compile/internal/ssa/debug_test.go b/src/cmd/compile/internal/ssa/debug_test.go index 2fc12557c0..c807863ea6 100644 --- a/src/cmd/compile/internal/ssa/debug_test.go +++ b/src/cmd/compile/internal/ssa/debug_test.go @@ -84,7 +84,7 @@ var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gog // "O" is an explicit indication that we expect it to be optimized out. // For example: // -// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A) +// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A) // // TODO: not implemented for Delve yet, but this is the plan // diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index a3cea855f2..b774ea78b1 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -656,15 +656,16 @@ outer: // It decomposes a Load or an Arg into smaller parts and returns the new mem. // If the type does not match one of the expected aggregate types, it returns nil instead. // Parameters: -// pos -- the location of any generated code. -// b -- the block into which any generated code should normally be placed -// source -- the value, possibly an aggregate, to be stored. -// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it) -// t -- the type of the value to be stored -// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset -// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg. -// storeRc -- storeRC; if the value is stored in registers, this specifies the registers. -// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation. +// +// pos -- the location of any generated code. +// b -- the block into which any generated code should normally be placed +// source -- the value, possibly an aggregate, to be stored. +// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it) +// t -- the type of the value to be stored +// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset +// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg. +// storeRc -- storeRC; if the value is stored in registers, this specifies the registers. +// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation. func (x *expandState) decomposeArg(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { pa := x.prAssignForArg(source) @@ -777,15 +778,16 @@ func (x *expandState) splitSlotsIntoNames(locs []*LocalSlot, suffix string, off // It decomposes a Load into smaller parts and returns the new mem. // If the type does not match one of the expected aggregate types, it returns nil instead. // Parameters: -// pos -- the location of any generated code. -// b -- the block into which any generated code should normally be placed -// source -- the value, possibly an aggregate, to be stored. -// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it) -// t -- the type of the value to be stored -// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset -// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg. -// storeRc -- storeRC; if the value is stored in registers, this specifies the registers. -// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation. +// +// pos -- the location of any generated code. +// b -- the block into which any generated code should normally be placed +// source -- the value, possibly an aggregate, to be stored. +// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it) +// t -- the type of the value to be stored +// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset +// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg. +// storeRc -- storeRC; if the value is stored in registers, this specifies the registers. +// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation. // // TODO -- this needs cleanup; it just works for SSA-able aggregates, and won't fully generalize to register-args aggregates. func (x *expandState) decomposeLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value { diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 0b5392f0f0..35a9382663 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -820,17 +820,22 @@ func (f *Func) invalidateCFG() { } // DebugHashMatch reports whether environment variable evname -// 1) is empty (this is a special more-quickly implemented case of 3) -// 2) is "y" or "Y" -// 3) is a suffix of the sha1 hash of name -// 4) is a suffix of the environment variable +// 1. is empty (this is a special more-quickly implemented case of 3) +// 2. is "y" or "Y" +// 3. is a suffix of the sha1 hash of name +// 4. is a suffix of the environment variable // fmt.Sprintf("%s%d", evname, n) // provided that all such variables are nonempty for 0 <= i <= n +// // Otherwise it returns false. // When true is returned the message -// "%s triggered %s\n", evname, name +// +// "%s triggered %s\n", evname, name +// // is printed on the file named in environment variable -// GSHS_LOGFILE +// +// GSHS_LOGFILE +// // or standard out if that is empty or there is an error // opening the file. func (f *Func) DebugHashMatch(evname string) bool { diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go index fec2ba8773..2b176dfa7b 100644 --- a/src/cmd/compile/internal/ssa/fuse.go +++ b/src/cmd/compile/internal/ssa/fuse.go @@ -55,19 +55,21 @@ func fuse(f *Func, typ fuseType) { // fuseBlockIf handles the following cases where s0 and s1 are empty blocks. // -// b b b b -// \ / \ / | \ / \ / | | | -// s0 s1 | s1 s0 | | | -// \ / | / \ | | | -// ss ss ss ss +// b b b b +// \ / \ / | \ / \ / | | | +// s0 s1 | s1 s0 | | | +// \ / | / \ | | | +// ss ss ss ss // // If all Phi ops in ss have identical variables for slots corresponding to // s0, s1 and b then the branch can be dropped. // This optimization often comes up in switch statements with multiple // expressions in a case clause: -// switch n { -// case 1,2,3: return 4 -// } +// +// switch n { +// case 1,2,3: return 4 +// } +// // TODO: If ss doesn't contain any OpPhis, are s0 and s1 dead code anyway. func fuseBlockIf(b *Block) bool { if b.Kind != BlockIf { diff --git a/src/cmd/compile/internal/ssa/fuse_branchredirect.go b/src/cmd/compile/internal/ssa/fuse_branchredirect.go index 27449db55a..59570968a2 100644 --- a/src/cmd/compile/internal/ssa/fuse_branchredirect.go +++ b/src/cmd/compile/internal/ssa/fuse_branchredirect.go @@ -8,21 +8,24 @@ package ssa // of an If block can be derived from its predecessor If block, in // some such cases, we can redirect the predecessor If block to the // corresponding successor block directly. For example: -// p: -// v11 = Less64 <bool> v10 v8 -// If v11 goto b else u -// b: <- p ... -// v17 = Leq64 <bool> v10 v8 -// If v17 goto s else o +// +// p: +// v11 = Less64 <bool> v10 v8 +// If v11 goto b else u +// b: <- p ... +// v17 = Leq64 <bool> v10 v8 +// If v17 goto s else o +// // We can redirect p to s directly. // // The implementation here borrows the framework of the prove pass. -// 1, Traverse all blocks of function f to find If blocks. -// 2, For any If block b, traverse all its predecessors to find If blocks. -// 3, For any If block predecessor p, update relationship p->b. -// 4, Traverse all successors of b. -// 5, For any successor s of b, try to update relationship b->s, if a -// contradiction is found then redirect p to another successor of b. +// +// 1, Traverse all blocks of function f to find If blocks. +// 2, For any If block b, traverse all its predecessors to find If blocks. +// 3, For any If block predecessor p, update relationship p->b. +// 4, Traverse all successors of b. +// 5, For any successor s of b, try to update relationship b->s, if a +// contradiction is found then redirect p to another successor of b. func fuseBranchRedirect(f *Func) bool { ft := newFactsTable(f) ft.checkpoint() diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go index d843fc3fda..f5fb84b0d7 100644 --- a/src/cmd/compile/internal/ssa/fuse_comparisons.go +++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go @@ -9,22 +9,22 @@ package ssa // // Look for branch structure like: // -// p -// |\ -// | b -// |/ \ -// s0 s1 +// p +// |\ +// | b +// |/ \ +// s0 s1 // // In our example, p has control '1 <= x', b has control 'x < 5', // and s0 and s1 are the if and else results of the comparison. // // This will be optimized into: // -// p -// \ -// b -// / \ -// s0 s1 +// p +// \ +// b +// / \ +// s0 s1 // // where b has the combined control value 'unsigned(x-1) < 4'. // Later passes will then fuse p and b. diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go index d69db404ed..00aea87936 100644 --- a/src/cmd/compile/internal/ssa/location.go +++ b/src/cmd/compile/internal/ssa/location.go @@ -46,19 +46,19 @@ func (r *Register) GCNum() int16 { // variable that has been decomposed into multiple stack slots. // As an example, a string could have the following configurations: // -// stack layout LocalSlots +// stack layout LocalSlots // -// Optimizations are disabled. s is on the stack and represented in its entirety. -// [ ------- s string ---- ] { N: s, Type: string, Off: 0 } +// Optimizations are disabled. s is on the stack and represented in its entirety. +// [ ------- s string ---- ] { N: s, Type: string, Off: 0 } // -// s was not decomposed, but the SSA operates on its parts individually, so -// there is a LocalSlot for each of its fields that points into the single stack slot. -// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8} +// s was not decomposed, but the SSA operates on its parts individually, so +// there is a LocalSlot for each of its fields that points into the single stack slot. +// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8} // -// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot. -// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0}, -// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8} -// parent = &{N: s, Type: string} +// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot. +// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0}, +// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8} +// parent = &{N: s, Type: string} type LocalSlot struct { N *ir.Name // an ONAME *ir.Name representing a stack location. Type *types.Type // type of slot diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go index 206aab2c5e..dd63541771 100644 --- a/src/cmd/compile/internal/ssa/loopbce.go +++ b/src/cmd/compile/internal/ssa/loopbce.go @@ -31,9 +31,10 @@ type indVar struct { // parseIndVar checks whether the SSA value passed as argument is a valid induction // variable, and, if so, extracts: -// * the minimum bound -// * the increment value -// * the "next" value (SSA value that is Phi'd into the induction variable every loop) +// - the minimum bound +// - the increment value +// - the "next" value (SSA value that is Phi'd into the induction variable every loop) +// // Currently, we detect induction variables that match (Phi min nxt), // with nxt being (Add inc ind). // If it can't parse the induction variable correctly, it returns (nil, nil, nil). @@ -66,19 +67,18 @@ func parseIndVar(ind *Value) (min, inc, nxt *Value) { // // Look for variables and blocks that satisfy the following // -// loop: -// ind = (Phi min nxt), -// if ind < max -// then goto enter_loop -// else goto exit_loop -// -// enter_loop: -// do something -// nxt = inc + ind -// goto loop +// loop: +// ind = (Phi min nxt), +// if ind < max +// then goto enter_loop +// else goto exit_loop // -// exit_loop: +// enter_loop: +// do something +// nxt = inc + ind +// goto loop // +// exit_loop: // // TODO: handle 32 bit operations func findIndVar(f *Func) []indVar { diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go index 35010a78d8..2eefda1c8b 100644 --- a/src/cmd/compile/internal/ssa/looprotate.go +++ b/src/cmd/compile/internal/ssa/looprotate.go @@ -8,19 +8,19 @@ package ssa // to loops with a check-loop-condition-at-end. // This helps loops avoid extra unnecessary jumps. // -// loop: -// CMPQ ... -// JGE exit -// ... -// JMP loop -// exit: +// loop: +// CMPQ ... +// JGE exit +// ... +// JMP loop +// exit: // -// JMP entry -// loop: -// ... -// entry: -// CMPQ ... -// JLT loop +// JMP entry +// loop: +// ... +// entry: +// CMPQ ... +// JLT loop func loopRotate(f *Func) { loopnest := f.loopnest() if loopnest.hasIrreducible { diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go index 93f8801bce..e903d92bb6 100644 --- a/src/cmd/compile/internal/ssa/magic.go +++ b/src/cmd/compile/internal/ssa/magic.go @@ -110,7 +110,8 @@ type umagicData struct { // umagic computes the constants needed to strength reduce unsigned n-bit divides by the constant uint64(c). // The return values satisfy for all 0 <= x < 2^n -// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s) +// +// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s) func umagic(n uint, c int64) umagicData { // Convert from ConstX auxint values to the real uint64 constant they represent. d := uint64(c) << (64 - n) >> (64 - n) @@ -183,7 +184,8 @@ type smagicData struct { // magic computes the constants needed to strength reduce signed n-bit divides by the constant c. // Must have c>0. // The return values satisfy for all -2^(n-1) <= x < 2^(n-1) -// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0) +// +// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0) func smagic(n uint, c int64) smagicData { C := new(big.Int).SetInt64(c) s := C.BitLen() - 1 diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index a1835dcd30..a3e8dcd2f6 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -391,9 +391,9 @@ const ( // A Sym represents a symbolic offset from a base register. // Currently a Sym can be one of 3 things: -// - a *gc.Node, for an offset from SP (the stack pointer) -// - a *obj.LSym, for an offset from SB (the global pointer) -// - nil, for no offset +// - a *gc.Node, for an offset from SP (the stack pointer) +// - a *obj.LSym, for an offset from SB (the global pointer) +// - nil, for no offset type Sym interface { CanBeAnSSASym() CanBeAnSSAAux() @@ -479,12 +479,13 @@ const ( ) // boundsAPI determines which register arguments a bounds check call should use. For an [a:b:c] slice, we do: -// CMPQ c, cap -// JA fail1 -// CMPQ b, c -// JA fail2 -// CMPQ a, b -// JA fail3 +// +// CMPQ c, cap +// JA fail1 +// CMPQ b, c +// JA fail2 +// CMPQ a, b +// JA fail3 // // fail1: CALL panicSlice3Acap (c, cap) // fail2: CALL panicSlice3B (b, c) diff --git a/src/cmd/compile/internal/ssa/phielim.go b/src/cmd/compile/internal/ssa/phielim.go index 761cb7a392..4fc942375f 100644 --- a/src/cmd/compile/internal/ssa/phielim.go +++ b/src/cmd/compile/internal/ssa/phielim.go @@ -8,13 +8,19 @@ package ssa // A phi is redundant if its arguments are all equal. For // purposes of counting, ignore the phi itself. Both of // these phis are redundant: -// v = phi(x,x,x) -// v = phi(x,v,x,v) +// +// v = phi(x,x,x) +// v = phi(x,v,x,v) +// // We repeat this process to also catch situations like: -// v = phi(x, phi(x, x), phi(x, v)) +// +// v = phi(x, phi(x, x), phi(x, v)) +// // TODO: Can we also simplify cases like: -// v = phi(v, w, x) -// w = phi(v, w, x) +// +// v = phi(v, w, x) +// w = phi(v, w, x) +// // and would that be useful? func phielim(f *Func) { for { diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go index 0357442ae9..037845eacf 100644 --- a/src/cmd/compile/internal/ssa/phiopt.go +++ b/src/cmd/compile/internal/ssa/phiopt.go @@ -7,20 +7,22 @@ package ssa // phiopt eliminates boolean Phis based on the previous if. // // Main use case is to transform: -// x := false -// if b { -// x = true -// } +// +// x := false +// if b { +// x = true +// } +// // into x = b. // // In SSA code this appears as // -// b0 -// If b -> b1 b2 -// b1 -// Plain -> b2 -// b2 -// x = (OpPhi (ConstBool [true]) (ConstBool [false])) +// b0 +// If b -> b1 b2 +// b1 +// Plain -> b2 +// b2 +// x = (OpPhi (ConstBool [true]) (ConstBool [false])) // // In this case we can replace x with a copy of b. func phiopt(f *Func) { diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go index 200106e66d..a3b4f0fea4 100644 --- a/src/cmd/compile/internal/ssa/poset.go +++ b/src/cmd/compile/internal/ssa/poset.go @@ -140,11 +140,11 @@ type posetNode struct { // to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the // following DAG: // -// A -// / \ -// I extra -// / \ -// J K +// A +// / \ +// I extra +// / \ +// J K type poset struct { lastidx uint32 // last generated dense index flags uint8 // internal flags diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index d0c9a190ad..8f86e16112 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -27,17 +27,17 @@ const ( // // E.g. // -// r := relation(...) +// r := relation(...) // -// if v < w { -// newR := r & lt -// } -// if v >= w { -// newR := r & (eq|gt) -// } -// if v != w { -// newR := r & (lt|gt) -// } +// if v < w { +// newR := r & lt +// } +// if v >= w { +// newR := r & (eq|gt) +// } +// if v != w { +// newR := r & (lt|gt) +// } type relation uint const ( @@ -746,19 +746,19 @@ func (ft *factsTable) cleanup(f *Func) { // By far, the most common redundant pair are generated by bounds checking. // For example for the code: // -// a[i] = 4 -// foo(a[i]) +// a[i] = 4 +// foo(a[i]) // // The compiler will generate the following code: // -// if i >= len(a) { -// panic("not in bounds") -// } -// a[i] = 4 -// if i >= len(a) { -// panic("not in bounds") -// } -// foo(a[i]) +// if i >= len(a) { +// panic("not in bounds") +// } +// a[i] = 4 +// if i >= len(a) { +// panic("not in bounds") +// } +// foo(a[i]) // // The second comparison i >= len(a) is clearly redundant because if the // else branch of the first comparison is executed, we already know that i < len(a). diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index eb8fa0c02a..248060d27d 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -962,8 +962,9 @@ found: // clobber invalidates values. Returns true. // clobber is used by rewrite rules to: -// A) make sure the values are really dead and never used again. -// B) decrement use counts of the values' args. +// +// A) make sure the values are really dead and never used again. +// B) decrement use counts of the values' args. func clobber(vv ...*Value) bool { for _, v := range vv { v.reset(OpInvalid) @@ -985,7 +986,9 @@ func clobberIfDead(v *Value) bool { // noteRule is an easy way to track if a rule is matched when writing // new ones. Make the rule of interest also conditional on -// noteRule("note to self: rule of interest matched") +// +// noteRule("note to self: rule of interest matched") +// // and that message will print when the rule matches. func noteRule(s string) bool { fmt.Println(s) @@ -1789,9 +1792,11 @@ func sequentialAddresses(x, y *Value, n int64) bool { // We happen to match the semantics to those of arm/arm64. // Note that these semantics differ from x86: the carry flag has the opposite // sense on a subtraction! -// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C. -// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C. -// (because it does x + ^y + C). +// +// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C. +// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C. +// (because it does x + ^y + C). +// // See https://en.wikipedia.org/wiki/Carry_flag#Vs._borrow_flag type flagConstant uint8 diff --git a/src/cmd/compile/internal/ssa/rewriteCond_test.go b/src/cmd/compile/internal/ssa/rewriteCond_test.go index 2c26fdf142..ca74ed5947 100644 --- a/src/cmd/compile/internal/ssa/rewriteCond_test.go +++ b/src/cmd/compile/internal/ssa/rewriteCond_test.go @@ -68,8 +68,10 @@ func TestCondRewrite(t *testing.T) { } // Profile the aforementioned optimization from two angles: -// SoloJump: generated branching code has one 'jump', for '<' and '>=' -// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>' +// +// SoloJump: generated branching code has one 'jump', for '<' and '>=' +// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>' +// // We expect that 'CombJump' is generally on par with the non-optimized code, and // 'SoloJump' demonstrates some improvement. // It's for arm64 initially, please see https://github.com/golang/go/issues/38740 diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index c5130b2ee5..170d8b7095 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -338,13 +338,15 @@ func schedule(f *Func) { // if v transitively depends on store s, v is ordered after s, // otherwise v is ordered before s. // Specifically, values are ordered like -// store1 -// NilCheck that depends on store1 -// other values that depends on store1 -// store2 -// NilCheck that depends on store2 -// other values that depends on store2 -// ... +// +// store1 +// NilCheck that depends on store1 +// other values that depends on store1 +// store2 +// NilCheck that depends on store2 +// other values that depends on store2 +// ... +// // The order of non-store and non-NilCheck values are undefined // (not necessarily dependency order). This should be cheaper // than a full scheduling as done above. diff --git a/src/cmd/compile/internal/ssa/shift_test.go b/src/cmd/compile/internal/ssa/shift_test.go index 3876d8df12..06c2f6720f 100644 --- a/src/cmd/compile/internal/ssa/shift_test.go +++ b/src/cmd/compile/internal/ssa/shift_test.go @@ -85,7 +85,7 @@ func TestShiftToExtensionAMD64(t *testing.T) { // makeShiftExtensionFunc generates a function containing: // -// (rshift (lshift (Const64 [amount])) (Const64 [amount])) +// (rshift (lshift (Const64 [amount])) (Const64 [amount])) // // This may be equivalent to a sign or zero extension. func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ *types.Type) fun { diff --git a/src/cmd/compile/internal/ssa/shortcircuit.go b/src/cmd/compile/internal/ssa/shortcircuit.go index c0b9eacf41..5f1f892120 100644 --- a/src/cmd/compile/internal/ssa/shortcircuit.go +++ b/src/cmd/compile/internal/ssa/shortcircuit.go @@ -67,11 +67,11 @@ func shortcircuit(f *Func) { // // (1) Look for a CFG of the form // -// p other pred(s) -// \ / -// b -// / \ -// t other succ +// p other pred(s) +// \ / +// b +// / \ +// t other succ // // in which b is an If block containing a single phi value with a single use (b's Control), // which has a ConstBool arg. @@ -80,21 +80,21 @@ func shortcircuit(f *Func) { // // Rewrite this into // -// p other pred(s) -// | / -// | b -// |/ \ -// t u +// p other pred(s) +// | / +// | b +// |/ \ +// t u // // and remove the appropriate phi arg(s). // // (2) Look for a CFG of the form // -// p q -// \ / -// b -// / \ -// t u +// p q +// \ / +// b +// / \ +// t u // // in which b is as described in (1). // However, b may also contain other phi values. diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go index 732bb8e321..9f4e0007d3 100644 --- a/src/cmd/compile/internal/ssa/sparsetree.go +++ b/src/cmd/compile/internal/ssa/sparsetree.go @@ -210,6 +210,7 @@ func (t SparseTree) isAncestor(x, y *Block) bool { // 1. If domorder(x) > domorder(y) then x does not dominate y. // 2. If domorder(x) < domorder(y) and domorder(y) < domorder(z) and x does not dominate y, // then x does not dominate z. +// // Property (1) means that blocks sorted by domorder always have a maximal dominant block first. // Property (2) allows searches for dominated blocks to exit early. func (t SparseTree) domorder(x *Block) int32 { diff --git a/src/cmd/compile/internal/ssa/trim.go b/src/cmd/compile/internal/ssa/trim.go index c930a205c1..1fd7b33d5f 100644 --- a/src/cmd/compile/internal/ssa/trim.go +++ b/src/cmd/compile/internal/ssa/trim.go @@ -130,11 +130,11 @@ func emptyBlock(b *Block) bool { // trimmableBlock reports whether the block can be trimmed from the CFG, // subject to the following criteria: -// - it should not be the first block -// - it should be BlockPlain -// - it should not loop back to itself -// - it either is the single predecessor of the successor block or -// contains no actual instructions +// - it should not be the first block +// - it should be BlockPlain +// - it should not loop back to itself +// - it either is the single predecessor of the successor block or +// contains no actual instructions func trimmableBlock(b *Block) bool { if b.Kind != BlockPlain || b == b.Func.Entry { return false diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 883772b341..bd0b925019 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -286,10 +286,10 @@ func dvarint(x *obj.LSym, off int, v int64) int { // for stack variables are specified as the number of bytes below varp (pointer to the // top of the local variables) for their starting address. The format is: // -// - Offset of the deferBits variable -// - Number of defers in the function -// - Information about each defer call, in reverse order of appearance in the function: -// - Offset of the closure value to call +// - Offset of the deferBits variable +// - Number of defers in the function +// - Information about each defer call, in reverse order of appearance in the function: +// - Offset of the closure value to call func (s *state) emitOpenDeferInfo() { x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer") x.Set(obj.AttrContentAddressable, true) diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go index 56e97c71d8..6079097426 100644 --- a/src/cmd/compile/internal/syntax/branches.go +++ b/src/cmd/compile/internal/syntax/branches.go @@ -11,10 +11,10 @@ import "fmt" // checkBranches checks correct use of labels and branch // statements (break, continue, goto) in a function body. // It catches: -// - misplaced breaks and continues -// - bad labeled breaks and continues -// - invalid, unused, duplicate, and missing labels -// - gotos jumping over variable declarations and into blocks +// - misplaced breaks and continues +// - bad labeled breaks and continues +// - invalid, unused, duplicate, and missing labels +// - gotos jumping over variable declarations and into blocks func checkBranches(body *BlockStmt, errh ErrorHandler) { if body == nil { return diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go index 1422ab0031..a626c000be 100644 --- a/src/cmd/compile/internal/typecheck/const.go +++ b/src/cmd/compile/internal/typecheck/const.go @@ -620,7 +620,8 @@ func OrigInt(n ir.Node, v int64) ir.Node { // get the same type going out. // force means must assign concrete (non-ideal) type. // The results of defaultlit2 MUST be assigned back to l and r, e.g. -// n.Left, n.Right = defaultlit2(n.Left, n.Right, force) +// +// n.Left, n.Right = defaultlit2(n.Left, n.Right, force) func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) { if l.Type() == nil || r.Type() == nil { return l, r diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index e6adc05a65..f0b7b74aed 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -76,8 +76,9 @@ func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { // tcArith typechecks operands of a binary arithmetic expression. // The result of tcArith MUST be assigned back to original operands, // t is the type of the expression, and should be set by the caller. e.g: -// n.X, n.Y, t = tcArith(n, op, n.X, n.Y) -// n.SetType(t) +// +// n.X, n.Y, t = tcArith(n, op, n.X, n.Y) +// n.SetType(t) func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { l, r = defaultlit2(l, r, false) if l.Type() == nil || r.Type() == nil { @@ -194,7 +195,8 @@ func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) } // The result of tcCompLit MUST be assigned back to n, e.g. -// n.Left = tcCompLit(n.Left) +// +// n.Left = tcCompLit(n.Left) func tcCompLit(n *ir.CompLitExpr) (res ir.Node) { if base.EnableTrace && base.Flag.LowerT { defer tracePrint("tcCompLit", n)(&res) diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go index 5d319eaca3..12159b71e1 100644 --- a/src/cmd/compile/internal/typecheck/iexport.go +++ b/src/cmd/compile/internal/typecheck/iexport.go @@ -258,7 +258,7 @@ import ( // 1: added column details to Pos // 2: added information for generic function/types. The export of non-generic // functions/types remains largely backward-compatible. Breaking changes include: -// - a 'kind' byte is added to constant values +// - a 'kind' byte is added to constant values const ( iexportVersionGo1_11 = 0 iexportVersionPosCol = 1 diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go index 6c2e84680b..1f60f31851 100644 --- a/src/cmd/compile/internal/typecheck/syms.go +++ b/src/cmd/compile/internal/typecheck/syms.go @@ -24,7 +24,8 @@ func LookupRuntime(name string) *ir.Name { // successive occurrences of the "any" placeholder in the // type syntax expression n.Type. // The result of SubstArgTypes MUST be assigned back to old, e.g. -// n.Left = SubstArgTypes(n.Left, t1, t2) +// +// n.Left = SubstArgTypes(n.Left, t1, t2) func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { for _, t := range types_ { types.CalcSize(t) diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go index 85de653a82..2eb9e6d718 100644 --- a/src/cmd/compile/internal/typecheck/typecheck.go +++ b/src/cmd/compile/internal/typecheck/typecheck.go @@ -240,7 +240,8 @@ func typecheckNtype(n ir.Ntype) ir.Ntype { // typecheck type checks node n. // The result of typecheck MUST be assigned back to n, e.g. -// n.Left = typecheck(n.Left, top) +// +// n.Left = typecheck(n.Left, top) func typecheck(n ir.Node, top int) (res ir.Node) { // cannot type check until all the source has been parsed if !TypecheckAllowed { @@ -414,7 +415,8 @@ func typecheck(n ir.Node, top int) (res ir.Node) { // but also accepts untyped numeric values representable as // value of type int (see also checkmake for comparison). // The result of indexlit MUST be assigned back to n, e.g. -// n.Left = indexlit(n.Left) +// +// n.Left = indexlit(n.Left) func indexlit(n ir.Node) ir.Node { if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL { return DefaultLit(n, types.Types[types.TINT]) @@ -961,7 +963,8 @@ func checksliceconst(lo ir.Node, hi ir.Node) bool { } // The result of implicitstar MUST be assigned back to n, e.g. -// n.Left = implicitstar(n.Left) +// +// n.Left = implicitstar(n.Left) func implicitstar(n ir.Node) ir.Node { // insert implicit * if needed for fixed array t := n.Type() @@ -1607,7 +1610,8 @@ func checkassignto(src *types.Type, dst ir.Node) { } // The result of stringtoruneslit MUST be assigned back to n, e.g. -// n.Left = stringtoruneslit(n.Left) +// +// n.Left = stringtoruneslit(n.Left) func stringtoruneslit(n *ir.ConvExpr) ir.Node { if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String { base.Fatalf("stringtoarraylit %v", n) diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 147194c369..987352babc 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -492,9 +492,9 @@ type Slice struct { // A Field is a (Sym, Type) pairing along with some other information, and, // depending on the context, is used to represent: -// - a field in a struct -// - a method in an interface or associated with a named type -// - a function parameter +// - a field in a struct +// - a method in an interface or associated with a named type +// - a function parameter type Field struct { flags bitset8 @@ -1121,9 +1121,10 @@ func (t *Type) SimpleString() string { } // Cmp is a comparison between values a and b. -// -1 if a < b -// 0 if a == b -// 1 if a > b +// +// -1 if a < b +// 0 if a == b +// 1 if a > b type Cmp int8 const ( diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go index 34bb29cadc..54cddaee28 100644 --- a/src/cmd/compile/internal/types2/api.go +++ b/src/cmd/compile/internal/types2/api.go @@ -21,7 +21,6 @@ // Type inference computes the type (Type) of every expression (syntax.Expr) // and checks for compliance with the language specification. // Use Info.Types[expr].Type for the results of type inference. -// package types2 import ( diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go index ec242c5e22..2e1ae0d2be 100644 --- a/src/cmd/compile/internal/types2/check_test.go +++ b/src/cmd/compile/internal/types2/check_test.go @@ -263,7 +263,7 @@ func testFiles(t *testing.T, filenames []string, colDelta uint, manual bool) { // (and a separating "--"). For instance, to test the package made // of the files foo.go and bar.go, use: // -// go test -run Manual -- foo.go bar.go +// go test -run Manual -- foo.go bar.go // // If no source arguments are provided, the file testdata/manual.go // is used instead. diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go index 9f7e593eeb..9e77d67a7d 100644 --- a/src/cmd/compile/internal/types2/infer.go +++ b/src/cmd/compile/internal/types2/infer.go @@ -23,10 +23,10 @@ const useConstraintTypeInference = true // // Inference proceeds as follows. Starting with given type arguments: // -// 1) apply FTI (function type inference) with typed arguments, -// 2) apply CTI (constraint type inference), -// 3) apply FTI with untyped function arguments, -// 4) apply CTI. +// 1. apply FTI (function type inference) with typed arguments, +// 2. apply CTI (constraint type inference), +// 3. apply FTI with untyped function arguments, +// 4. apply CTI. // // The process stops as soon as all type arguments are known or an error occurs. func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (result []Type) { diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go index 93defd6618..684bbf7a8b 100644 --- a/src/cmd/compile/internal/types2/lookup.go +++ b/src/cmd/compile/internal/types2/lookup.go @@ -25,9 +25,9 @@ import ( // The last index entry is the field or method index in the (possibly embedded) // type where the entry was found, either: // -// 1) the list of declared methods of a named type; or -// 2) the list of all methods (method set) of an interface type; or -// 3) the list of fields of a struct type. +// 1. the list of declared methods of a named type; or +// 2. the list of all methods (method set) of an interface type; or +// 3. the list of fields of a struct type. // // The earlier index entries are the indices of the embedded struct fields // traversed to get to the found entry, starting at depth 0. @@ -35,12 +35,12 @@ import ( // If no entry is found, a nil object is returned. In this case, the returned // index and indirect values have the following meaning: // -// - If index != nil, the index sequence points to an ambiguous entry -// (the same name appeared more than once at the same embedding level). +// - If index != nil, the index sequence points to an ambiguous entry +// (the same name appeared more than once at the same embedding level). // -// - If indirect is set, a method with a pointer receiver type was found -// but there was no pointer on the path from the actual receiver type to -// the method's formal receiver base type, nor was the receiver addressable. +// - If indirect is set, a method with a pointer receiver type was found +// but there was no pointer on the path from the actual receiver type to +// the method's formal receiver base type, nor was the receiver addressable. func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) { if T == nil { panic("LookupFieldOrMethod on nil type") diff --git a/src/cmd/compile/internal/types2/selection.go b/src/cmd/compile/internal/types2/selection.go index ee63214407..c820a29fad 100644 --- a/src/cmd/compile/internal/types2/selection.go +++ b/src/cmd/compile/internal/types2/selection.go @@ -92,9 +92,9 @@ func (s *Selection) Type() Type { // The last index entry is the field or method index of the type declaring f; // either: // -// 1) the list of declared methods of a named type; or -// 2) the list of methods of an interface type; or -// 3) the list of fields of a struct type. +// 1. the list of declared methods of a named type; or +// 2. the list of methods of an interface type; or +// 3. the list of fields of a struct type. // // The earlier index entries are the indices of the embedded fields implicitly // traversed to get from (the type of) x to f, starting at embedding depth 0. @@ -111,6 +111,7 @@ func (s *Selection) String() string { return SelectionString(s, nil) } // package-level objects, and may be nil. // // Examples: +// // "field (T) f int" // "method (T) f(X) Y" // "method expr (T) f(X) Y" diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go index 7a34b6474c..f530849a9d 100644 --- a/src/cmd/compile/internal/types2/sizes.go +++ b/src/cmd/compile/internal/types2/sizes.go @@ -24,19 +24,19 @@ type Sizes interface { // StdSizes is a convenience type for creating commonly used Sizes. // It makes the following simplifying assumptions: // -// - The size of explicitly sized basic types (int16, etc.) is the -// specified size. -// - The size of strings and interfaces is 2*WordSize. -// - The size of slices is 3*WordSize. -// - The size of an array of n elements corresponds to the size of -// a struct of n consecutive fields of the array's element type. -// - The size of a struct is the offset of the last field plus that -// field's size. As with all element types, if the struct is used -// in an array its size must first be aligned to a multiple of the -// struct's alignment. -// - All other types have size WordSize. -// - Arrays and structs are aligned per spec definition; all other -// types are naturally aligned with a maximum alignment MaxAlign. +// - The size of explicitly sized basic types (int16, etc.) is the +// specified size. +// - The size of strings and interfaces is 2*WordSize. +// - The size of slices is 3*WordSize. +// - The size of an array of n elements corresponds to the size of +// a struct of n consecutive fields of the array's element type. +// - The size of a struct is the offset of the last field plus that +// field's size. As with all element types, if the struct is used +// in an array its size must first be aligned to a multiple of the +// struct's alignment. +// - All other types have size WordSize. +// - Arrays and structs are aligned per spec definition; all other +// types are naturally aligned with a maximum alignment MaxAlign. // // *StdSizes implements Sizes. type StdSizes struct { diff --git a/src/cmd/compile/internal/types2/typeterm.go b/src/cmd/compile/internal/types2/typeterm.go index 3d82a37ab8..97791324e1 100644 --- a/src/cmd/compile/internal/types2/typeterm.go +++ b/src/cmd/compile/internal/types2/typeterm.go @@ -6,10 +6,10 @@ package types2 // A term describes elementary type sets: // -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t type term struct { tilde bool // valid if typ != nil typ Type diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index 9b09e097fa..c44d934f21 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -242,6 +242,7 @@ func walkReturn(n *ir.ReturnStmt) ir.Node { // check assign type list to // an expression list. called in +// // expr-list = func() func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { if len(nl) != nr.NumFields() { @@ -273,6 +274,7 @@ func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { // check assign expression list to // an expression list. called in +// // expr-list = expr-list func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node { // cannot happen: should have been rejected during type checking @@ -455,17 +457,18 @@ func readsMemory(n ir.Node) bool { } // expand append(l1, l2...) to -// init { -// s := l1 -// n := len(s) + len(l2) -// // Compare as uint so growslice can panic on overflow. -// if uint(n) > uint(cap(s)) { -// s = growslice(s, n) -// } -// s = s[:n] -// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) -// } -// s +// +// init { +// s := l1 +// n := len(s) + len(l2) +// // Compare as uint so growslice can panic on overflow. +// if uint(n) > uint(cap(s)) { +// s = growslice(s, n) +// } +// s = s[:n] +// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) +// } +// s // // l2 is allowed to be a string. func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { @@ -597,32 +600,33 @@ func isAppendOfMake(n ir.Node) bool { } // extendSlice rewrites append(l1, make([]T, l2)...) to -// init { -// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true) -// } else { -// panicmakeslicelen() -// } -// s := l1 -// n := len(s) + l2 -// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2. -// // cap is a positive int and n can become negative when len(s) + l2 -// // overflows int. Interpreting n when negative as uint makes it larger -// // than cap(s). growslice will check the int n arg and panic if n is -// // negative. This prevents the overflow from being undetected. -// if uint(n) > uint(cap(s)) { -// s = growslice(T, s, n) -// } -// s = s[:n] -// lptr := &l1[0] -// sptr := &s[0] -// if lptr == sptr || !T.HasPointers() { -// // growslice did not clear the whole underlying array (or did not get called) -// hp := &s[len(l1)] -// hn := l2 * sizeof(T) -// memclr(hp, hn) -// } -// } -// s +// +// init { +// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true) +// } else { +// panicmakeslicelen() +// } +// s := l1 +// n := len(s) + l2 +// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2. +// // cap is a positive int and n can become negative when len(s) + l2 +// // overflows int. Interpreting n when negative as uint makes it larger +// // than cap(s). growslice will check the int n arg and panic if n is +// // negative. This prevents the overflow from being undetected. +// if uint(n) > uint(cap(s)) { +// s = growslice(T, s, n) +// } +// s = s[:n] +// lptr := &l1[0] +// sptr := &s[0] +// if lptr == sptr || !T.HasPointers() { +// // growslice did not clear the whole underlying array (or did not get called) +// hp := &s[len(l1)] +// hn := l2 * sizeof(T) +// memclr(hp, hn) +// } +// } +// s func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { // isAppendOfMake made sure all possible positive values of l2 fit into an uint. // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 7ec5494d99..d7b553ed0c 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -26,19 +26,19 @@ import ( // // For race detector, expand append(src, a [, b]* ) to // -// init { -// s := src -// const argc = len(args) - 1 -// if cap(s) - len(s) < argc { -// s = growslice(s, len(s)+argc) -// } -// n := len(s) -// s = s[:n+argc] -// s[n] = a -// s[n+1] = b -// ... -// } -// s +// init { +// s := src +// const argc = len(args) - 1 +// if cap(s) - len(s) < argc { +// s = growslice(s, len(s)+argc) +// } +// n := len(s) +// s = s[:n+argc] +// s[n] = a +// s[n+1] = b +// ... +// } +// s func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { if !ir.SameSafeExpr(dst, n.Args[0]) { n.Args[0] = safeExpr(n.Args[0], init) diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go index 625e216050..993f1392aa 100644 --- a/src/cmd/compile/internal/walk/compare.go +++ b/src/cmd/compile/internal/walk/compare.go @@ -16,7 +16,8 @@ import ( ) // The result of walkCompare MUST be assigned back to n, e.g. -// n.Left = walkCompare(n.Left, init) +// +// n.Left = walkCompare(n.Left, init) func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL { return walkCompareInterface(n, init) @@ -404,7 +405,8 @@ func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { } // The result of finishCompare MUST be assigned back to n, e.g. -// n.Left = finishCompare(n.Left, x, r, init) +// +// n.Left = finishCompare(n.Left, x, r, init) func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node { r = typecheck.Expr(r) r = typecheck.Conv(r, n.Type()) diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 4c1e7adddd..26a23c4d09 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -20,7 +20,8 @@ import ( ) // The result of walkExpr MUST be assigned back to n, e.g. -// n.Left = walkExpr(n.Left, init) +// +// n.Left = walkExpr(n.Left, init) func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { if n == nil { return n diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index cc37f95764..80806478be 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -237,7 +237,8 @@ func isaddrokay(n ir.Node) bool { // If the original argument n is not okay, addrTemp creates a tmp, emits // tmp = n, and then returns tmp. // The result of addrTemp MUST be assigned back to n, e.g. -// n.Left = o.addrTemp(n.Left) +// +// n.Left = o.addrTemp(n.Left) func (o *orderState) addrTemp(n ir.Node) ir.Node { if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { // TODO: expand this to all static composite literal nodes? @@ -316,8 +317,10 @@ func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node { // Returns a bool that signals if a modification was made. // // For: -// x = m[string(k)] -// x = m[T1{... Tn{..., string(k), ...}] +// +// x = m[string(k)] +// x = m[T1{... Tn{..., string(k), ...}] +// // where k is []byte, T1 to Tn is a nesting of struct and array literals, // the allocation of backing bytes for the string can be avoided // by reusing the []byte backing array. These are special cases @@ -400,9 +403,12 @@ func (o *orderState) stmtList(l ir.Nodes) { } // orderMakeSliceCopy matches the pattern: -// m = OMAKESLICE([]T, x); OCOPY(m, s) +// +// m = OMAKESLICE([]T, x); OCOPY(m, s) +// // and rewrites it to: -// m = OMAKESLICECOPY([]T, x, s); nil +// +// m = OMAKESLICECOPY([]T, x, s); nil func orderMakeSliceCopy(s []ir.Node) { if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { return @@ -473,7 +479,8 @@ func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { // exprInPlace orders the side effects in *np and // leaves them as the init list of the final *np. // The result of exprInPlace MUST be assigned back to n, e.g. -// n.Left = o.exprInPlace(n.Left) +// +// n.Left = o.exprInPlace(n.Left) func (o *orderState) exprInPlace(n ir.Node) ir.Node { var order orderState order.free = o.free @@ -489,7 +496,9 @@ func (o *orderState) exprInPlace(n ir.Node) ir.Node { // orderStmtInPlace orders the side effects of the single statement *np // and replaces it with the resulting statement list. // The result of orderStmtInPlace MUST be assigned back to n, e.g. -// n.Left = orderStmtInPlace(n.Left) +// +// n.Left = orderStmtInPlace(n.Left) +// // free is a map that can be used to obtain temporary variables by type. func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node { var order orderState @@ -1087,7 +1096,8 @@ func (o *orderState) exprNoLHS(n ir.Node) ir.Node { // Otherwise lhs == nil. (When lhs != nil it may be possible // to avoid copying the result of the expression to a temporary.) // The result of expr MUST be assigned back to n, e.g. -// n.Left = o.expr(n.Left, lhs) +// +// n.Left = o.expr(n.Left, lhs) func (o *orderState) expr(n, lhs ir.Node) ir.Node { if n == nil { return n @@ -1451,10 +1461,14 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { // as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment. // The caller should order the right-hand side of the assignment before calling order.as2func. // It rewrites, +// // a, b, a = ... +// // as +// // tmp1, tmp2, tmp3 = ... // a, b, a = tmp1, tmp2, tmp3 +// // This is necessary to ensure left to right assignment order. func (o *orderState) as2func(n *ir.AssignListStmt) { results := n.Rhs[0].Type() diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go index f09e916546..4f38cb2c81 100644 --- a/src/cmd/compile/internal/walk/stmt.go +++ b/src/cmd/compile/internal/walk/stmt.go @@ -10,7 +10,8 @@ import ( ) // The result of walkStmt MUST be assigned back to n, e.g. -// n.Left = walkStmt(n.Left) +// +// n.Left = walkStmt(n.Left) func walkStmt(n ir.Node) ir.Node { if n == nil { return n diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index 32e29f347b..12d9d0f365 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -106,7 +106,9 @@ func moveByType(t *types.Type) obj.As { } // opregreg emits instructions for -// dest := dest(To) op src(From) +// +// dest := dest(To) op src(From) +// // and also returns the created obj.Prog so it // may be further adjusted (offset, scale, etc). func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { |
