aboutsummaryrefslogtreecommitdiff
path: root/src/cmd
diff options
context:
space:
mode:
authorRobert Griesemer <gri@golang.org>2021-02-02 11:24:42 -0800
committerRobert Griesemer <gri@golang.org>2021-02-02 11:24:43 -0800
commit0d2d6c74644c4e09655225894e6eca2a06aeeef4 (patch)
tree7f583f291f01335cb3173fc3bf39794d318de545 /src/cmd
parent3d5c715bf299fb662104d70d612f3f0303e542d9 (diff)
parent23b0c1f76e647a615fd0911df26f2ddf241607a1 (diff)
downloadgo-0d2d6c74644c4e09655225894e6eca2a06aeeef4.tar.xz
[dev.typeparams] all: merge dev.regabi (23b0c1f) into dev.typeparams
Merge List: + 2021-02-02 23b0c1f76e [dev.regabi] all: merge master (fca94ab) into dev.regabi + 2021-02-02 fca94ab3ab spec: improve the example in Type assertions section + 2021-02-02 98f8454a73 cmd/link: don't decode type symbol in shared library in deadcode + 2021-02-02 1426a571b7 cmd/link: fix off-by-1 error in findShlibSection + 2021-02-01 32e789f4fb test: fix incorrectly laid out instructions in issue11656.go + 2021-02-01 ca6999e27c [dev.regabi] test: add a test for inlining closures + 2021-02-01 0b6cfea634 doc/go1.16: document that on OpenBSD syscalls are now made through libc + 2021-02-01 26e29aa15a cmd/link: disable TestPIESize if CGO isn't enabled + 2021-02-01 6ac91e460c doc/go1.16: minor markup fixes + 2021-01-29 44361140c0 embed: update docs for proposal tweaks + 2021-01-29 68058edc39 runtime: document pointer write atomicity for memclrNoHeapPointers + 2021-01-28 c8bd8010ff syscall: generate readlen/writelen for openbsd libc + 2021-01-28 41bb49b878 cmd/go: revert TestScript/build_trimpath to use ioutil.ReadFile + 2021-01-28 725a642c2d runtime: correct syscall10/syscall10X on openbsd/amd64 + 2021-01-28 4b068cafb5 doc/go1.16: document go/build/constraint package + 2021-01-28 376518d77f runtime,syscall: convert syscall on openbsd/arm64 to libc + 2021-01-27 aca22bddf2 [dev.regabi] cmd/compile: remove nested functions from expands_calls.go + 2021-01-27 667e08ba8c [dev.regabi] cmd/go: Use GOMAXPROCS to limit default build, compile parallelism + 2021-01-27 00f2ff5c94 api/go1.16: add go/build/constraint APIs + 2021-01-27 35334caf18 crypto/x509: remove leftover CertificateRequest field + 2021-01-27 a5a5e2c968 runtime: make sure to remove open-coded defer entries in all cases after a recover + 2021-01-27 8cfa01943a runtime: block console ctrlhandler when the signal is handled + 2021-01-27 ff9e8364c6 cmd/go: skip issue33139 when the 'cc' script command is unavailable + 2021-01-27 cd176b3615 runtime: switch runtime to libc for openbsd/arm64 + 2021-01-27 6c8fbfbdcf runtime: convert openbsd/arm64 locking to libc + 2021-01-27 5cdf0da1bf syscall: clean up mkasm related changes + 2021-01-27 210f70e298 doc/go1.16: fix closing brace in .Export format + 2021-01-27 0f797f168d math: fix typo in sqrt.go code comment + 2021-01-26 9b636feafe [dev.regabi] cmd/compile: missing last patch set for cl286013 + 2021-01-26 f7dad5eae4 [dev.regabi] cmd/compile: remove leftover code form late call lowering work + 2021-01-26 8634a234df runtime,syscall: convert syscall on openbsd/amd64 to libc + 2021-01-26 1d5e14632e os: further document limitations around naked file descriptors + 2021-01-26 cf263e9f77 os: correct names in CreateTemp and MkdirTemp doc comments + 2021-01-26 ce8b318624 net/http/fcgi: remove locking added to prevent a test-only race Change-Id: Ibd38d559c8a5b0aa32dd0d3a8cdf6876368a3aeb
Diffstat (limited to 'src/cmd')
-rw-r--r--src/cmd/compile/internal/ssa/compile.go1
-rw-r--r--src/cmd/compile/internal/ssa/config.go8
-rw-r--r--src/cmd/compile/internal/ssa/decompose.go4
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go1202
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64.rules8
-rw-r--r--src/cmd/compile/internal/ssa/gen/decArgs.rules58
-rw-r--r--src/cmd/compile/internal/ssa/gen/decArgsOps.go20
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go16
-rw-r--r--src/cmd/compile/internal/ssa/rewritedecArgs.go247
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go217
-rw-r--r--src/cmd/go/alldocs.go2
-rw-r--r--src/cmd/go/internal/cfg/cfg.go24
-rw-r--r--src/cmd/go/internal/work/build.go2
-rw-r--r--src/cmd/go/internal/work/gc.go17
-rw-r--r--src/cmd/go/testdata/script/build_trimpath.txt3
-rw-r--r--src/cmd/go/testdata/script/link_syso_issue33139.txt2
-rw-r--r--src/cmd/link/elf_test.go6
-rw-r--r--src/cmd/link/internal/ld/deadcode.go16
-rw-r--r--src/cmd/link/internal/ld/decodesym.go2
19 files changed, 711 insertions, 1144 deletions
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index 63994d1778..c267274366 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -431,7 +431,6 @@ var passes = [...]pass{
{name: "early copyelim", fn: copyelim},
{name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
{name: "short circuit", fn: shortcircuit},
- {name: "decompose args", fn: decomposeArgs, required: !go116lateCallExpansion, disabled: go116lateCallExpansion}, // handled by late call lowering
{name: "decompose user", fn: decomposeUser, required: true},
{name: "pre-opt deadcode", fn: deadcode},
{name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 8dc2ee8213..e952c73d9b 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -179,14 +179,6 @@ type Frontend interface {
MyImportPath() string
}
-const go116lateCallExpansion = true
-
-// LateCallExpansionEnabledWithin returns true if late call expansion should be tested
-// within compilation of a function/method.
-func LateCallExpansionEnabledWithin(f *Func) bool {
- return go116lateCallExpansion
-}
-
// NewConfig returns a new configuration object for the given architecture.
func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
c := &Config{arch: arch, Types: types}
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
index bf7f1e826b..ea988e44f6 100644
--- a/src/cmd/compile/internal/ssa/decompose.go
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -219,10 +219,6 @@ func decomposeInterfacePhi(v *Value) {
v.AddArg(data)
}
-func decomposeArgs(f *Func) {
- applyRewrite(f, rewriteBlockdecArgs, rewriteValuedecArgs, removeDeadValues)
-}
-
func decomposeUser(f *Func) {
for _, b := range f.Blocks {
for _, v := range b.Values {
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index af994d4b5b..579818e4f3 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -28,658 +28,666 @@ func isBlockMultiValueExit(b *Block) bool {
return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && len(b.Controls) > 0 && b.Controls[0].Op == OpMakeResult
}
-// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
-// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
-// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
-// reached. On the callee side, OpArg nodes are not decomposed until this phase is run.
-// TODO results should not be lowered until this phase.
-func expandCalls(f *Func) {
- // Calls that need lowering have some number of inputs, including a memory input,
- // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+// removeTrivialWrapperTypes unwraps layers of
+// struct { singleField SomeType } and [1]SomeType
+// until a non-wrapper type is reached. This is useful
+// for working with assignments to/from interface data
+// fields (either second operand to OpIMake or OpIData)
+// where the wrapping or type conversion can be elided
+// because of type conversions/assertions in source code
+// that do not appear in SSA.
+func removeTrivialWrapperTypes(t *types.Type) *types.Type {
+ for {
+ if t.IsStruct() && t.NumFields() == 1 {
+ t = t.Field(0).Type
+ continue
+ }
+ if t.IsArray() && t.NumElem() == 1 {
+ t = t.Elem()
+ continue
+ }
+ break
+ }
+ return t
+}
- // With the current ABI those inputs need to be converted into stores to memory,
- // rethreading the call's memory input to the first, and the new call now receiving the last.
+type expandState struct {
+ f *Func
+ debug bool
+ canSSAType func(*types.Type) bool
+ regSize int64
+ sp *Value
+ typs *Types
+ ptrSize int64
+ hiOffset int64
+ lowOffset int64
+ namedSelects map[*Value][]namedVal
+ sdom SparseTree
+ common map[selKey]*Value
+ offsets map[offsetKey]*Value
+}
- // With the current ABI, the outputs need to be converted to loads, which will all use the call's
- // memory output as their input.
- if !LateCallExpansionEnabledWithin(f) {
- return
+// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
+// that has no 64-bit integer registers.
+func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) {
+ tHi = x.typs.UInt32
+ if et == types.TINT64 {
+ tHi = x.typs.Int32
}
- debug := f.pass.debug > 0
+ tLo = x.typs.UInt32
+ return
+}
- if debug {
- fmt.Printf("\nexpandsCalls(%s)\n", f.Name)
+// isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
+// that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
+// so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
+// integer on 32-bit).
+func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool {
+ if !x.canSSAType(t) {
+ return false
}
+ return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
+ t.Size() > x.regSize && t.IsInteger()
+}
- canSSAType := f.fe.CanSSA
- regSize := f.Config.RegSize
- sp, _ := f.spSb()
- typ := &f.Config.Types
- ptrSize := f.Config.PtrSize
+// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
+// TODO should also optimize offsets from SB?
+func (x *expandState) offsetFrom(from *Value, offset int64, pt *types.Type) *Value {
+ if offset == 0 && from.Type == pt { // this is not actually likely
+ return from
+ }
+ // Simplify, canonicalize
+ for from.Op == OpOffPtr {
+ offset += from.AuxInt
+ from = from.Args[0]
+ }
+ if from == x.sp {
+ return x.f.ConstOffPtrSP(pt, offset, x.sp)
+ }
+ key := offsetKey{from, offset, pt}
+ v := x.offsets[key]
+ if v != nil {
+ return v
+ }
+ v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
+ x.offsets[key] = v
+ return v
+}
- // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
- var hiOffset, lowOffset int64
- if f.Config.BigEndian {
- lowOffset = 4
- } else {
- hiOffset = 4
+// splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
+func (x *expandState) splitSlots(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot {
+ var locs []LocalSlot
+ for i := range ls {
+ locs = append(locs, x.f.fe.SplitSlot(&ls[i], sfx, offset, ty))
}
+ return locs
+}
- namedSelects := make(map[*Value][]namedVal)
+// Calls that need lowering have some number of inputs, including a memory input,
+// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
- sdom := f.Sdom()
+// With the current ABI those inputs need to be converted into stores to memory,
+// rethreading the call's memory input to the first, and the new call now receiving the last.
- common := make(map[selKey]*Value)
+// With the current ABI, the outputs need to be converted to loads, which will all use the call's
+// memory output as their input.
- // intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
- // that has no 64-bit integer registers.
- intPairTypes := func(et types.Kind) (tHi, tLo *types.Type) {
- tHi = typ.UInt32
- if et == types.TINT64 {
- tHi = typ.Int32
- }
- tLo = typ.UInt32
- return
+// rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
+// through a chain of Struct/Array/builtin Select operations. If the chain of selectors does not
+// end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
+// The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
+// accumulates the offset.
+// It emits the code necessary to implement the leaf select operation that leads to the root.
+//
+// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
+func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64) []LocalSlot {
+ if x.debug {
+ fmt.Printf("rewriteSelect(%s, %s, %d)\n", leaf.LongString(), selector.LongString(), offset)
}
-
- // isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
- // that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
- // so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
- // integer on 32-bit).
- isAlreadyExpandedAggregateType := func(t *types.Type) bool {
- if !canSSAType(t) {
- return false
+ var locs []LocalSlot
+ leafType := leaf.Type
+ if len(selector.Args) > 0 {
+ w := selector.Args[0]
+ if w.Op == OpCopy {
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ }
+ selector.SetArg(0, w)
}
- return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
- t.Size() > regSize && t.IsInteger()
}
-
- offsets := make(map[offsetKey]*Value)
-
- // offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
- // TODO should also optimize offsets from SB?
- offsetFrom := func(from *Value, offset int64, pt *types.Type) *Value {
- if offset == 0 && from.Type == pt { // this is not actually likely
- return from
+ switch selector.Op {
+ case OpArg:
+ if !x.isAlreadyExpandedAggregateType(selector.Type) {
+ if leafType == selector.Type { // OpIData leads us here, sometimes.
+ leaf.copyOf(selector)
+ } else {
+ x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
+ }
+ if x.debug {
+ fmt.Printf("\tOpArg, break\n")
+ }
+ break
}
- // Simplify, canonicalize
- for from.Op == OpOffPtr {
- offset += from.AuxInt
- from = from.Args[0]
+ switch leaf.Op {
+ case OpIData, OpStructSelect, OpArraySelect:
+ leafType = removeTrivialWrapperTypes(leaf.Type)
}
- if from == sp {
- return f.ConstOffPtrSP(pt, offset, sp)
+ aux := selector.Aux
+ auxInt := selector.AuxInt + offset
+ if leaf.Block == selector.Block {
+ leaf.reset(OpArg)
+ leaf.Aux = aux
+ leaf.AuxInt = auxInt
+ leaf.Type = leafType
+ } else {
+ w := selector.Block.NewValue0IA(leaf.Pos, OpArg, leafType, auxInt, aux)
+ leaf.copyOf(w)
+ if x.debug {
+ fmt.Printf("\tnew %s\n", w.LongString())
+ }
}
- key := offsetKey{from, offset, pt}
- v := offsets[key]
- if v != nil {
- return v
+ for _, s := range x.namedSelects[selector] {
+ locs = append(locs, x.f.Names[s.locIndex])
}
- v = from.Block.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
- offsets[key] = v
- return v
- }
- // splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
- splitSlots := func(ls []LocalSlot, sfx string, offset int64, ty *types.Type) []LocalSlot {
- var locs []LocalSlot
- for i := range ls {
- locs = append(locs, f.fe.SplitSlot(&ls[i], sfx, offset, ty))
+ case OpLoad: // We end up here because of IData of immediate structures.
+ // Failure case:
+ // (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
+ // the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
+ //
+ // GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
+ // cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
+ // b2: ← b1
+ // v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
+ // v21 (142) = SelectN <mem> [1] v20
+ // v22 (142) = SelectN <interface {}> [0] v20
+ // b15: ← b8
+ // v71 (+143) = IData <Nodes> v22 (v[Nodes])
+ // v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
+ //
+ // translates (w/o the "case OpLoad:" above) to:
+ //
+ // b2: ← b1
+ // v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
+ // v23 (142) = Load <*uintptr> v19 v20
+ // v823 (142) = IsNonNil <bool> v23
+ // v67 (+143) = Load <*[]*Node> v880 v20
+ // b15: ← b8
+ // v827 (146) = StructSelect <*[]*Node> [0] v67
+ // v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
+ // v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
+ // i.e., the struct select is generated and remains in because it is not applied to an actual structure.
+ // The OpLoad was created to load the single field of the IData
+ // This case removes that StructSelect.
+ if leafType != selector.Type {
+ x.f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
}
- return locs
- }
-
- // removeTrivialWrapperTypes unwraps layers of
- // struct { singleField SomeType } and [1]SomeType
- // until a non-wrapper type is reached. This is useful
- // for working with assignments to/from interface data
- // fields (either second operand to OpIMake or OpIData)
- // where the wrapping or type conversion can be elided
- // because of type conversions/assertions in source code
- // that do not appear in SSA.
- removeTrivialWrapperTypes := func(t *types.Type) *types.Type {
- for {
- if t.IsStruct() && t.NumFields() == 1 {
- t = t.Field(0).Type
- continue
- }
- if t.IsArray() && t.NumElem() == 1 {
- t = t.Elem()
- continue
- }
- break
+ leaf.copyOf(selector)
+ for _, s := range x.namedSelects[selector] {
+ locs = append(locs, x.f.Names[s.locIndex])
}
- return t
- }
-
- // Calls that need lowering have some number of inputs, including a memory input,
- // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
-
- // With the current ABI those inputs need to be converted into stores to memory,
- // rethreading the call's memory input to the first, and the new call now receiving the last.
-
- // With the current ABI, the outputs need to be converted to loads, which will all use the call's
- // memory output as their input.
- // rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
- // through a chain of Struct/Array/builtin Select operations. If the chain of selectors does not
- // end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
- // The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
- // accumulates the offset.
- // It emits the code necessary to implement the leaf select operation that leads to the root.
- //
- // TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
- var rewriteSelect func(leaf *Value, selector *Value, offset int64) []LocalSlot
- rewriteSelect = func(leaf *Value, selector *Value, offset int64) []LocalSlot {
- if debug {
- fmt.Printf("rewriteSelect(%s, %s, %d)\n", leaf.LongString(), selector.LongString(), offset)
- }
- var locs []LocalSlot
- leafType := leaf.Type
- if len(selector.Args) > 0 {
- w := selector.Args[0]
- if w.Op == OpCopy {
- for w.Op == OpCopy {
- w = w.Args[0]
- }
- selector.SetArg(0, w)
- }
- }
- switch selector.Op {
- case OpArg:
- if !isAlreadyExpandedAggregateType(selector.Type) {
- if leafType == selector.Type { // OpIData leads us here, sometimes.
- leaf.copyOf(selector)
+ case OpSelectN:
+ // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
+ call := selector.Args[0]
+ aux := call.Aux.(*AuxCall)
+ which := selector.AuxInt
+ if which == aux.NResults() { // mem is after the results.
+ // rewrite v as a Copy of call -- the replacement call will produce a mem.
+ leaf.copyOf(call)
+ } else {
+ leafType := removeTrivialWrapperTypes(leaf.Type)
+ if x.canSSAType(leafType) {
+ pt := types.NewPtr(leafType)
+ off := x.offsetFrom(x.sp, offset+aux.OffsetOfResult(which), pt)
+ // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
+ if leaf.Block == call.Block {
+ leaf.reset(OpLoad)
+ leaf.SetArgs2(off, call)
+ leaf.Type = leafType
} else {
- f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
+ w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
+ leaf.copyOf(w)
+ if x.debug {
+ fmt.Printf("\tnew %s\n", w.LongString())
+ }
}
- if debug {
- fmt.Printf("\tOpArg, break\n")
+ for _, s := range x.namedSelects[selector] {
+ locs = append(locs, x.f.Names[s.locIndex])
}
- break
- }
- switch leaf.Op {
- case OpIData, OpStructSelect, OpArraySelect:
- leafType = removeTrivialWrapperTypes(leaf.Type)
- }
- aux := selector.Aux
- auxInt := selector.AuxInt + offset
- if leaf.Block == selector.Block {
- leaf.reset(OpArg)
- leaf.Aux = aux
- leaf.AuxInt = auxInt
- leaf.Type = leafType
- } else {
- w := selector.Block.NewValue0IA(leaf.Pos, OpArg, leafType, auxInt, aux)
- leaf.copyOf(w)
- if debug {
- fmt.Printf("\tnew %s\n", w.LongString())
- }
- }
- for _, s := range namedSelects[selector] {
- locs = append(locs, f.Names[s.locIndex])
- }
-
- case OpLoad: // We end up here because of IData of immediate structures.
- // Failure case:
- // (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
- // the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
- //
- // GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
- // cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
- // b2: ← b1
- // v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
- // v21 (142) = SelectN <mem> [1] v20
- // v22 (142) = SelectN <interface {}> [0] v20
- // b15: ← b8
- // v71 (+143) = IData <Nodes> v22 (v[Nodes])
- // v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
- //
- // translates (w/o the "case OpLoad:" above) to:
- //
- // b2: ← b1
- // v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
- // v23 (142) = Load <*uintptr> v19 v20
- // v823 (142) = IsNonNil <bool> v23
- // v67 (+143) = Load <*[]*Node> v880 v20
- // b15: ← b8
- // v827 (146) = StructSelect <*[]*Node> [0] v67
- // v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
- // v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
- // i.e., the struct select is generated and remains in because it is not applied to an actual structure.
- // The OpLoad was created to load the single field of the IData
- // This case removes that StructSelect.
- if leafType != selector.Type {
- f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
- }
- leaf.copyOf(selector)
- for _, s := range namedSelects[selector] {
- locs = append(locs, f.Names[s.locIndex])
- }
-
- case OpSelectN:
- // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
- call := selector.Args[0]
- aux := call.Aux.(*AuxCall)
- which := selector.AuxInt
- if which == aux.NResults() { // mem is after the results.
- // rewrite v as a Copy of call -- the replacement call will produce a mem.
- leaf.copyOf(call)
} else {
- leafType := removeTrivialWrapperTypes(leaf.Type)
- if canSSAType(leafType) {
- pt := types.NewPtr(leafType)
- off := offsetFrom(sp, offset+aux.OffsetOfResult(which), pt)
- // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
- if leaf.Block == call.Block {
- leaf.reset(OpLoad)
- leaf.SetArgs2(off, call)
- leaf.Type = leafType
- } else {
- w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
- leaf.copyOf(w)
- if debug {
- fmt.Printf("\tnew %s\n", w.LongString())
- }
- }
- for _, s := range namedSelects[selector] {
- locs = append(locs, f.Names[s.locIndex])
- }
- } else {
- f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
- }
+ x.f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
}
+ }
- case OpStructSelect:
- w := selector.Args[0]
- var ls []LocalSlot
- if w.Type.Kind() != types.TSTRUCT { // IData artifact
- ls = rewriteSelect(leaf, w, offset)
- } else {
- ls = rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt)))
- if w.Op != OpIData {
- for _, l := range ls {
- locs = append(locs, f.fe.SplitStruct(l, int(selector.AuxInt)))
- }
+ case OpStructSelect:
+ w := selector.Args[0]
+ var ls []LocalSlot
+ if w.Type.Kind() != types.TSTRUCT { // IData artifact
+ ls = x.rewriteSelect(leaf, w, offset)
+ } else {
+ ls = x.rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt)))
+ if w.Op != OpIData {
+ for _, l := range ls {
+ locs = append(locs, x.f.fe.SplitStruct(l, int(selector.AuxInt)))
}
}
+ }
- case OpArraySelect:
- w := selector.Args[0]
- rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt)
-
- case OpInt64Hi:
- w := selector.Args[0]
- ls := rewriteSelect(leaf, w, offset+hiOffset)
- locs = splitSlots(ls, ".hi", hiOffset, leafType)
+ case OpArraySelect:
+ w := selector.Args[0]
+ x.rewriteSelect(leaf, w, offset+selector.Type.Size()*selector.AuxInt)
- case OpInt64Lo:
- w := selector.Args[0]
- ls := rewriteSelect(leaf, w, offset+lowOffset)
- locs = splitSlots(ls, ".lo", lowOffset, leafType)
+ case OpInt64Hi:
+ w := selector.Args[0]
+ ls := x.rewriteSelect(leaf, w, offset+x.hiOffset)
+ locs = x.splitSlots(ls, ".hi", x.hiOffset, leafType)
- case OpStringPtr:
- ls := rewriteSelect(leaf, selector.Args[0], offset)
- locs = splitSlots(ls, ".ptr", 0, typ.BytePtr)
+ case OpInt64Lo:
+ w := selector.Args[0]
+ ls := x.rewriteSelect(leaf, w, offset+x.lowOffset)
+ locs = x.splitSlots(ls, ".lo", x.lowOffset, leafType)
- case OpSlicePtr:
- w := selector.Args[0]
- ls := rewriteSelect(leaf, w, offset)
- locs = splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
+ case OpStringPtr:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset)
+ locs = x.splitSlots(ls, ".ptr", 0, x.typs.BytePtr)
- case OpITab:
- w := selector.Args[0]
- ls := rewriteSelect(leaf, w, offset)
- sfx := ".itab"
- if w.Type.IsEmptyInterface() {
- sfx = ".type"
- }
- locs = splitSlots(ls, sfx, 0, typ.Uintptr)
+ case OpSlicePtr:
+ w := selector.Args[0]
+ ls := x.rewriteSelect(leaf, w, offset)
+ locs = x.splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
- case OpComplexReal:
- ls := rewriteSelect(leaf, selector.Args[0], offset)
- locs = splitSlots(ls, ".real", 0, leafType)
+ case OpITab:
+ w := selector.Args[0]
+ ls := x.rewriteSelect(leaf, w, offset)
+ sfx := ".itab"
+ if w.Type.IsEmptyInterface() {
+ sfx = ".type"
+ }
+ locs = x.splitSlots(ls, sfx, 0, x.typs.Uintptr)
- case OpComplexImag:
- ls := rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part.
- locs = splitSlots(ls, ".imag", leafType.Width, leafType)
+ case OpComplexReal:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset)
+ locs = x.splitSlots(ls, ".real", 0, leafType)
- case OpStringLen, OpSliceLen:
- ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize)
- locs = splitSlots(ls, ".len", ptrSize, leafType)
+ case OpComplexImag:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+leafType.Width) // result is FloatNN, width of result is offset of imaginary part.
+ locs = x.splitSlots(ls, ".imag", leafType.Width, leafType)
- case OpIData:
- ls := rewriteSelect(leaf, selector.Args[0], offset+ptrSize)
- locs = splitSlots(ls, ".data", ptrSize, leafType)
+ case OpStringLen, OpSliceLen:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize)
+ locs = x.splitSlots(ls, ".len", x.ptrSize, leafType)
- case OpSliceCap:
- ls := rewriteSelect(leaf, selector.Args[0], offset+2*ptrSize)
- locs = splitSlots(ls, ".cap", 2*ptrSize, leafType)
+ case OpIData:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize)
+ locs = x.splitSlots(ls, ".data", x.ptrSize, leafType)
- case OpCopy: // If it's an intermediate result, recurse
- locs = rewriteSelect(leaf, selector.Args[0], offset)
- for _, s := range namedSelects[selector] {
- // this copy may have had its own name, preserve that, too.
- locs = append(locs, f.Names[s.locIndex])
- }
+ case OpSliceCap:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+2*x.ptrSize)
+ locs = x.splitSlots(ls, ".cap", 2*x.ptrSize, leafType)
- default:
- // Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
+ case OpCopy: // If it's an intermediate result, recurse
+ locs = x.rewriteSelect(leaf, selector.Args[0], offset)
+ for _, s := range x.namedSelects[selector] {
+ // this copy may have had its own name, preserve that, too.
+ locs = append(locs, x.f.Names[s.locIndex])
}
- return locs
+ default:
+ // Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
}
- // storeArgOrLoad converts stores of SSA-able aggregate arguments (passed to a call) into a series of primitive-typed
- // stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg.
- // If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
- var storeArgOrLoad func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value
+ return locs
+}
- // decomposeArgOrLoad is a helper for storeArgOrLoad.
- // It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions
- // passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead.
- decomposeArgOrLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64,
- decomposeOne func(pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value,
- decomposeTwo func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value {
- u := source.Type
- switch u.Kind() {
- case types.TARRAY:
- elem := u.Elem()
- for i := int64(0); i < u.NumElem(); i++ {
- elemOff := i * elem.Size()
- mem = decomposeOne(pos, b, base, source, mem, elem, source.AuxInt+elemOff, offset+elemOff)
- pos = pos.WithNotStmt()
- }
- return mem
- case types.TSTRUCT:
- for i := 0; i < u.NumFields(); i++ {
- fld := u.Field(i)
- mem = decomposeOne(pos, b, base, source, mem, fld.Type, source.AuxInt+fld.Offset, offset+fld.Offset)
- pos = pos.WithNotStmt()
- }
- return mem
- case types.TINT64, types.TUINT64:
- if t.Width == regSize {
- break
- }
- tHi, tLo := intPairTypes(t.Kind())
- mem = decomposeOne(pos, b, base, source, mem, tHi, source.AuxInt+hiOffset, offset+hiOffset)
- pos = pos.WithNotStmt()
- return decomposeOne(pos, b, base, source, mem, tLo, source.AuxInt+lowOffset, offset+lowOffset)
- case types.TINTER:
- return decomposeTwo(pos, b, base, source, mem, typ.Uintptr, typ.BytePtr, source.AuxInt, offset)
- case types.TSTRING:
- return decomposeTwo(pos, b, base, source, mem, typ.BytePtr, typ.Int, source.AuxInt, offset)
- case types.TCOMPLEX64:
- return decomposeTwo(pos, b, base, source, mem, typ.Float32, typ.Float32, source.AuxInt, offset)
- case types.TCOMPLEX128:
- return decomposeTwo(pos, b, base, source, mem, typ.Float64, typ.Float64, source.AuxInt, offset)
- case types.TSLICE:
- mem = decomposeTwo(pos, b, base, source, mem, typ.BytePtr, typ.Int, source.AuxInt, offset)
- return decomposeOne(pos, b, base, source, mem, typ.Int, source.AuxInt+2*ptrSize, offset+2*ptrSize)
- }
- return nil
+func (x *expandState) rewriteDereference(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value {
+ source := a.Args[0]
+ dst := x.offsetFrom(base, offset, source.Type)
+ if a.Uses == 1 && a.Block == b {
+ a.reset(OpMove)
+ a.Pos = pos
+ a.Type = types.TypeMem
+ a.Aux = typ
+ a.AuxInt = size
+ a.SetArgs3(dst, source, mem)
+ mem = a
+ } else {
+ mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem)
+ mem.AuxInt = size
}
+ return mem
+}
- // storeOneArg creates a decomposed (one step) arg that is then stored.
- // pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input,
- // mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
- storeOneArg := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
- w := common[selKey{source, offArg, t.Width, t}]
- if w == nil {
- w = source.Block.NewValue0IA(source.Pos, OpArg, t, offArg, source.Aux)
- common[selKey{source, offArg, t.Width, t}] = w
+// decomposeArgOrLoad is a helper for storeArgOrLoad.
+// It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions
+// passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead.
+func (x *expandState) decomposeArgOrLoad(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64,
+ decomposeOne func(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value,
+ decomposeTwo func(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value {
+ u := source.Type
+ switch u.Kind() {
+ case types.TARRAY:
+ elem := u.Elem()
+ for i := int64(0); i < u.NumElem(); i++ {
+ elemOff := i * elem.Size()
+ mem = decomposeOne(x, pos, b, base, source, mem, elem, source.AuxInt+elemOff, offset+elemOff)
+ pos = pos.WithNotStmt()
}
- return storeArgOrLoad(pos, b, base, w, mem, t, offStore)
- }
-
- // storeOneLoad creates a decomposed (one step) load that is then stored.
- storeOneLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
- from := offsetFrom(source.Args[0], offArg, types.NewPtr(t))
- w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
- return storeArgOrLoad(pos, b, base, w, mem, t, offStore)
- }
-
- storeTwoArg := func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
- mem = storeOneArg(pos, b, base, source, mem, t1, offArg, offStore)
+ return mem
+ case types.TSTRUCT:
+ for i := 0; i < u.NumFields(); i++ {
+ fld := u.Field(i)
+ mem = decomposeOne(x, pos, b, base, source, mem, fld.Type, source.AuxInt+fld.Offset, offset+fld.Offset)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+ case types.TINT64, types.TUINT64:
+ if t.Width == x.regSize {
+ break
+ }
+ tHi, tLo := x.intPairTypes(t.Kind())
+ mem = decomposeOne(x, pos, b, base, source, mem, tHi, source.AuxInt+x.hiOffset, offset+x.hiOffset)
pos = pos.WithNotStmt()
- t1Size := t1.Size()
- return storeOneArg(pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
+ return decomposeOne(x, pos, b, base, source, mem, tLo, source.AuxInt+x.lowOffset, offset+x.lowOffset)
+ case types.TINTER:
+ return decomposeTwo(x, pos, b, base, source, mem, x.typs.Uintptr, x.typs.BytePtr, source.AuxInt, offset)
+ case types.TSTRING:
+ return decomposeTwo(x, pos, b, base, source, mem, x.typs.BytePtr, x.typs.Int, source.AuxInt, offset)
+ case types.TCOMPLEX64:
+ return decomposeTwo(x, pos, b, base, source, mem, x.typs.Float32, x.typs.Float32, source.AuxInt, offset)
+ case types.TCOMPLEX128:
+ return decomposeTwo(x, pos, b, base, source, mem, x.typs.Float64, x.typs.Float64, source.AuxInt, offset)
+ case types.TSLICE:
+ mem = decomposeTwo(x, pos, b, base, source, mem, x.typs.BytePtr, x.typs.Int, source.AuxInt, offset)
+ return decomposeOne(x, pos, b, base, source, mem, x.typs.Int, source.AuxInt+2*x.ptrSize, offset+2*x.ptrSize)
}
+ return nil
+}
- storeTwoLoad := func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
- mem = storeOneLoad(pos, b, base, source, mem, t1, offArg, offStore)
- pos = pos.WithNotStmt()
- t1Size := t1.Size()
- return storeOneLoad(pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
+// storeOneArg creates a decomposed (one step) arg that is then stored.
+// pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input,
+// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
+func storeOneArg(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
+ w := x.common[selKey{source, offArg, t.Width, t}]
+ if w == nil {
+ w = source.Block.NewValue0IA(source.Pos, OpArg, t, offArg, source.Aux)
+ x.common[selKey{source, offArg, t.Width, t}] = w
}
+ return x.storeArgOrLoad(pos, b, base, w, mem, t, offStore)
+}
- storeArgOrLoad = func(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value {
- if debug {
- fmt.Printf("\tstoreArgOrLoad(%s; %s; %s; %s; %d)\n", base.LongString(), source.LongString(), mem.String(), t.String(), offset)
- }
+// storeOneLoad creates a decomposed (one step) load that is then stored.
+func storeOneLoad(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offArg, offStore int64) *Value {
+ from := x.offsetFrom(source.Args[0], offArg, types.NewPtr(t))
+ w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
+ return x.storeArgOrLoad(pos, b, base, w, mem, t, offStore)
+}
- switch source.Op {
- case OpCopy:
- return storeArgOrLoad(pos, b, base, source.Args[0], mem, t, offset)
+func storeTwoArg(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
+ mem = storeOneArg(x, pos, b, base, source, mem, t1, offArg, offStore)
+ pos = pos.WithNotStmt()
+ t1Size := t1.Size()
+ return storeOneArg(x, pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
+}
- case OpLoad:
- ret := decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneLoad, storeTwoLoad)
- if ret != nil {
- return ret
- }
+func storeTwoLoad(x *expandState, pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value {
+ mem = storeOneLoad(x, pos, b, base, source, mem, t1, offArg, offStore)
+ pos = pos.WithNotStmt()
+ t1Size := t1.Size()
+ return storeOneLoad(x, pos, b, base, source, mem, t2, offArg+t1Size, offStore+t1Size)
+}
- case OpArg:
- ret := decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneArg, storeTwoArg)
- if ret != nil {
- return ret
- }
+// storeArgOrLoad converts stores of SSA-able aggregate arguments (passed to a call) into a series of primitive-typed
+// stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg.
+// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
+func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, base, source, mem *Value, t *types.Type, offset int64) *Value {
+ if x.debug {
+ fmt.Printf("\tstoreArgOrLoad(%s; %s; %s; %s; %d)\n", base.LongString(), source.LongString(), mem.String(), t.String(), offset)
+ }
- case OpArrayMake0, OpStructMake0:
- return mem
+ switch source.Op {
+ case OpCopy:
+ return x.storeArgOrLoad(pos, b, base, source.Args[0], mem, t, offset)
- case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
- for i := 0; i < t.NumFields(); i++ {
- fld := t.Field(i)
- mem = storeArgOrLoad(pos, b, base, source.Args[i], mem, fld.Type, offset+fld.Offset)
- pos = pos.WithNotStmt()
- }
- return mem
+ case OpLoad:
+ ret := x.decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneLoad, storeTwoLoad)
+ if ret != nil {
+ return ret
+ }
- case OpArrayMake1:
- return storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset)
+ case OpArg:
+ ret := x.decomposeArgOrLoad(pos, b, base, source, mem, t, offset, storeOneArg, storeTwoArg)
+ if ret != nil {
+ return ret
+ }
- case OpInt64Make:
- tHi, tLo := intPairTypes(t.Kind())
- mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+hiOffset)
- pos = pos.WithNotStmt()
- return storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+lowOffset)
+ case OpArrayMake0, OpStructMake0:
+ return mem
- case OpComplexMake:
- tPart := typ.Float32
- wPart := t.Width / 2
- if wPart == 8 {
- tPart = typ.Float64
- }
- mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tPart, offset)
+ case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
+ for i := 0; i < t.NumFields(); i++ {
+ fld := t.Field(i)
+ mem = x.storeArgOrLoad(pos, b, base, source.Args[i], mem, fld.Type, offset+fld.Offset)
pos = pos.WithNotStmt()
- return storeArgOrLoad(pos, b, base, source.Args[1], mem, tPart, offset+wPart)
+ }
+ return mem
- case OpIMake:
- mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.Uintptr, offset)
- pos = pos.WithNotStmt()
- return storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.BytePtr, offset+ptrSize)
+ case OpArrayMake1:
+ return x.storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset)
- case OpStringMake:
- mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.BytePtr, offset)
- pos = pos.WithNotStmt()
- return storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.Int, offset+ptrSize)
+ case OpInt64Make:
+ tHi, tLo := x.intPairTypes(t.Kind())
+ mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+x.hiOffset)
+ pos = pos.WithNotStmt()
+ return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+x.lowOffset)
- case OpSliceMake:
- mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, typ.BytePtr, offset)
- pos = pos.WithNotStmt()
- mem = storeArgOrLoad(pos, b, base, source.Args[1], mem, typ.Int, offset+ptrSize)
- return storeArgOrLoad(pos, b, base, source.Args[2], mem, typ.Int, offset+2*ptrSize)
+ case OpComplexMake:
+ tPart := x.typs.Float32
+ wPart := t.Width / 2
+ if wPart == 8 {
+ tPart = x.typs.Float64
}
+ mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, tPart, offset)
+ pos = pos.WithNotStmt()
+ return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, tPart, offset+wPart)
- // For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
- switch t.Kind() {
- case types.TARRAY:
- elt := t.Elem()
- if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == regSize {
- t = removeTrivialWrapperTypes(t)
- // it could be a leaf type, but the "leaf" could be complex64 (for example)
- return storeArgOrLoad(pos, b, base, source, mem, t, offset)
- }
- for i := int64(0); i < t.NumElem(); i++ {
- sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, elt, offset+i*elt.Width)
- pos = pos.WithNotStmt()
- }
- return mem
+ case OpIMake:
+ mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.Uintptr, offset)
+ pos = pos.WithNotStmt()
+ return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.BytePtr, offset+x.ptrSize)
- case types.TSTRUCT:
- if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == regSize {
- // This peculiar test deals with accesses to immediate interface data.
- // It works okay because everything is the same size.
- // Example code that triggers this can be found in go/constant/value.go, function ToComplex
- // v119 (+881) = IData <intVal> v6
- // v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
- // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
- // Guard against "struct{struct{*foo}}"
- // Other rewriting phases create minor glitches when they transform IData, for instance the
- // interface-typed Arg "x" of ToFloat in go/constant/value.go
- // v6 (858) = Arg <Value> {x} (x[Value], x[Value])
- // is rewritten by decomposeArgs into
- // v141 (858) = Arg <uintptr> {x}
- // v139 (858) = Arg <*uint8> {x} [8]
- // because of a type case clause on line 862 of go/constant/value.go
- // case intVal:
- // return itof(x)
- // v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
- // of a *uint8, which does not succeed.
- t = removeTrivialWrapperTypes(t)
- // it could be a leaf type, but the "leaf" could be complex64 (for example)
- return storeArgOrLoad(pos, b, base, source, mem, t, offset)
- }
+ case OpStringMake:
+ mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.BytePtr, offset)
+ pos = pos.WithNotStmt()
+ return x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.Int, offset+x.ptrSize)
- for i := 0; i < t.NumFields(); i++ {
- fld := t.Field(i)
- sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, fld.Type, offset+fld.Offset)
- pos = pos.WithNotStmt()
- }
- return mem
+ case OpSliceMake:
+ mem = x.storeArgOrLoad(pos, b, base, source.Args[0], mem, x.typs.BytePtr, offset)
+ pos = pos.WithNotStmt()
+ mem = x.storeArgOrLoad(pos, b, base, source.Args[1], mem, x.typs.Int, offset+x.ptrSize)
+ return x.storeArgOrLoad(pos, b, base, source.Args[2], mem, x.typs.Int, offset+2*x.ptrSize)
+ }
- case types.TINT64, types.TUINT64:
- if t.Width == regSize {
- break
- }
- tHi, tLo := intPairTypes(t.Kind())
- sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+hiOffset)
+ // For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
+ switch t.Kind() {
+ case types.TARRAY:
+ elt := t.Elem()
+ if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == x.regSize {
+ t = removeTrivialWrapperTypes(t)
+ // it could be a leaf type, but the "leaf" could be complex64 (for example)
+ return x.storeArgOrLoad(pos, b, base, source, mem, t, offset)
+ }
+ for i := int64(0); i < t.NumElem(); i++ {
+ sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, elt, offset+i*elt.Width)
pos = pos.WithNotStmt()
- sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
- return storeArgOrLoad(pos, b, base, sel, mem, tLo, offset+lowOffset)
+ }
+ return mem
- case types.TINTER:
- sel := source.Block.NewValue1(pos, OpITab, typ.BytePtr, source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset)
- pos = pos.WithNotStmt()
- sel = source.Block.NewValue1(pos, OpIData, typ.BytePtr, source)
- return storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset+ptrSize)
+ case types.TSTRUCT:
+ if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Width == t.Width && t.Width == x.regSize {
+ // This peculiar test deals with accesses to immediate interface data.
+ // It works okay because everything is the same size.
+ // Example code that triggers this can be found in go/constant/value.go, function ToComplex
+ // v119 (+881) = IData <intVal> v6
+ // v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
+ // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
+ // Guard against "struct{struct{*foo}}"
+ // Other rewriting phases create minor glitches when they transform IData, for instance the
+ // interface-typed Arg "x" of ToFloat in go/constant/value.go
+ // v6 (858) = Arg <Value> {x} (x[Value], x[Value])
+ // is rewritten by decomposeArgs into
+ // v141 (858) = Arg <uintptr> {x}
+ // v139 (858) = Arg <*uint8> {x} [8]
+ // because of a type case clause on line 862 of go/constant/value.go
+ // case intVal:
+ // return itof(x)
+ // v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
+ // of a *uint8, which does not succeed.
+ t = removeTrivialWrapperTypes(t)
+ // it could be a leaf type, but the "leaf" could be complex64 (for example)
+ return x.storeArgOrLoad(pos, b, base, source, mem, t, offset)
+ }
- case types.TSTRING:
- sel := source.Block.NewValue1(pos, OpStringPtr, typ.BytePtr, source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, typ.BytePtr, offset)
+ for i := 0; i < t.NumFields(); i++ {
+ fld := t.Field(i)
+ sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, fld.Type, offset+fld.Offset)
pos = pos.WithNotStmt()
- sel = source.Block.NewValue1(pos, OpStringLen, typ.Int, source)
- return storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+ptrSize)
+ }
+ return mem
- case types.TSLICE:
- et := types.NewPtr(t.Elem())
- sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, et, offset)
- pos = pos.WithNotStmt()
- sel = source.Block.NewValue1(pos, OpSliceLen, typ.Int, source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+ptrSize)
- sel = source.Block.NewValue1(pos, OpSliceCap, typ.Int, source)
- return storeArgOrLoad(pos, b, base, sel, mem, typ.Int, offset+2*ptrSize)
+ case types.TINT64, types.TUINT64:
+ if t.Width == x.regSize {
+ break
+ }
+ tHi, tLo := x.intPairTypes(t.Kind())
+ sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+x.hiOffset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
+ return x.storeArgOrLoad(pos, b, base, sel, mem, tLo, offset+x.lowOffset)
- case types.TCOMPLEX64:
- sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float32, source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Float32, offset)
- pos = pos.WithNotStmt()
- sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float32, source)
- return storeArgOrLoad(pos, b, base, sel, mem, typ.Float32, offset+4)
+ case types.TINTER:
+ sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source)
+ return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset+x.ptrSize)
- case types.TCOMPLEX128:
- sel := source.Block.NewValue1(pos, OpComplexReal, typ.Float64, source)
- mem = storeArgOrLoad(pos, b, base, sel, mem, typ.Float64, offset)
- pos = pos.WithNotStmt()
- sel = source.Block.NewValue1(pos, OpComplexImag, typ.Float64, source)
- return storeArgOrLoad(pos, b, base, sel, mem, typ.Float64, offset+8)
- }
+ case types.TSTRING:
+ sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.BytePtr, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source)
+ return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+x.ptrSize)
- dst := offsetFrom(base, offset, types.NewPtr(t))
- x := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
- if debug {
- fmt.Printf("\t\tstoreArg returns %s\n", x.LongString())
- }
- return x
+ case types.TSLICE:
+ et := types.NewPtr(t.Elem())
+ sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, et, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+x.ptrSize)
+ sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source)
+ return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Int, offset+2*x.ptrSize)
+
+ case types.TCOMPLEX64:
+ sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float32, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
+ return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float32, offset+4)
+
+ case types.TCOMPLEX128:
+ sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
+ mem = x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float64, offset)
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
+ return x.storeArgOrLoad(pos, b, base, sel, mem, x.typs.Float64, offset+8)
}
- rewriteDereference := func(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value {
- source := a.Args[0]
- dst := offsetFrom(base, offset, source.Type)
- if a.Uses == 1 && a.Block == b {
- a.reset(OpMove)
- a.Pos = pos
- a.Type = types.TypeMem
- a.Aux = typ
- a.AuxInt = size
- a.SetArgs3(dst, source, mem)
- mem = a
- } else {
- mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem)
- mem.AuxInt = size
- }
- return mem
+ dst := x.offsetFrom(base, offset, types.NewPtr(t))
+ s := b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
+ if x.debug {
+ fmt.Printf("\t\tstoreArg returns %s\n", s.LongString())
}
+ return s
+}
- // rewriteArgs removes all the Args from a call and converts the call args into appropriate
- // stores (or later, register movement). Extra args for interface and closure calls are ignored,
- // but removed.
- rewriteArgs := func(v *Value, firstArg int) *Value {
- // Thread the stores on the memory arg
- aux := v.Aux.(*AuxCall)
- pos := v.Pos.WithNotStmt()
- m0 := v.MemoryArg()
- mem := m0
- for i, a := range v.Args {
- if i < firstArg {
- continue
- }
- if a == m0 { // mem is last.
- break
+// rewriteArgs removes all the Args from a call and converts the call args into appropriate
+// stores (or later, register movement). Extra args for interface and closure calls are ignored,
+// but removed.
+func (x *expandState) rewriteArgs(v *Value, firstArg int) *Value {
+ // Thread the stores on the memory arg
+ aux := v.Aux.(*AuxCall)
+ pos := v.Pos.WithNotStmt()
+ m0 := v.MemoryArg()
+ mem := m0
+ for i, a := range v.Args {
+ if i < firstArg {
+ continue
+ }
+ if a == m0 { // mem is last.
+ break
+ }
+ auxI := int64(i - firstArg)
+ if a.Op == OpDereference {
+ if a.MemoryArg() != m0 {
+ x.f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
}
- auxI := int64(i - firstArg)
- if a.Op == OpDereference {
- if a.MemoryArg() != m0 {
- f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
- }
- // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
- // TODO this will be more complicated with registers in the picture.
- mem = rewriteDereference(v.Block, sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos)
- } else {
- if debug {
- fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
- }
- mem = storeArgOrLoad(pos, v.Block, sp, a, mem, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
+ // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
+ // TODO this will be more complicated with registers in the picture.
+ mem = x.rewriteDereference(v.Block, x.sp, a, mem, aux.OffsetOfArg(auxI), aux.SizeOfArg(auxI), aux.TypeOfArg(auxI), pos)
+ } else {
+ if x.debug {
+ fmt.Printf("storeArg %s, %v, %d\n", a.LongString(), aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
}
+ mem = x.storeArgOrLoad(pos, v.Block, x.sp, a, mem, aux.TypeOfArg(auxI), aux.OffsetOfArg(auxI))
}
- v.resetArgs()
- return mem
+ }
+ v.resetArgs()
+ return mem
+}
+
+// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
+// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
+// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
+// reached. On the callee side, OpArg nodes are not decomposed until this phase is run.
+// TODO results should not be lowered until this phase.
+func expandCalls(f *Func) {
+ // Calls that need lowering have some number of inputs, including a memory input,
+ // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+
+ // With the current ABI those inputs need to be converted into stores to memory,
+ // rethreading the call's memory input to the first, and the new call now receiving the last.
+
+ // With the current ABI, the outputs need to be converted to loads, which will all use the call's
+ // memory output as their input.
+ sp, _ := f.spSb()
+ x := &expandState{
+ f: f,
+ debug: f.pass.debug > 0,
+ canSSAType: f.fe.CanSSA,
+ regSize: f.Config.RegSize,
+ sp: sp,
+ typs: &f.Config.Types,
+ ptrSize: f.Config.PtrSize,
+ namedSelects: make(map[*Value][]namedVal),
+ sdom: f.Sdom(),
+ common: make(map[selKey]*Value),
+ offsets: make(map[offsetKey]*Value),
+ }
+
+ // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
+ if f.Config.BigEndian {
+ x.lowOffset = 4
+ } else {
+ x.hiOffset = 4
+ }
+
+ if x.debug {
+ fmt.Printf("\nexpandsCalls(%s)\n", f.Name)
}
// TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here.
@@ -689,16 +697,16 @@ func expandCalls(f *Func) {
for _, v := range b.Values {
switch v.Op {
case OpStaticLECall:
- mem := rewriteArgs(v, 0)
+ mem := x.rewriteArgs(v, 0)
v.SetArgs1(mem)
case OpClosureLECall:
code := v.Args[0]
context := v.Args[1]
- mem := rewriteArgs(v, 2)
+ mem := x.rewriteArgs(v, 2)
v.SetArgs3(code, context, mem)
case OpInterLECall:
code := v.Args[0]
- mem := rewriteArgs(v, 1)
+ mem := x.rewriteArgs(v, 1)
v.SetArgs2(code, mem)
}
}
@@ -715,7 +723,7 @@ func expandCalls(f *Func) {
break
}
auxType := aux.TypeOfResult(i)
- auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, sp, mem)
+ auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.results[i].Name, x.sp, mem)
auxOffset := int64(0)
auxSize := aux.SizeOfResult(i)
if a.Op == OpDereference {
@@ -727,7 +735,7 @@ func expandCalls(f *Func) {
}
continue
}
- mem = rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos)
+ mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, pos)
} else {
if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
addr := a.Args[0]
@@ -735,7 +743,7 @@ func expandCalls(f *Func) {
continue
}
}
- mem = storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset)
+ mem = x.storeArgOrLoad(v.Pos, b, auxBase, a, mem, aux.TypeOfResult(i), auxOffset)
}
}
b.SetControl(mem)
@@ -745,11 +753,11 @@ func expandCalls(f *Func) {
for i, name := range f.Names {
t := name.Type
- if isAlreadyExpandedAggregateType(t) {
+ if x.isAlreadyExpandedAggregateType(t) {
for j, v := range f.NamedValues[name] {
- if v.Op == OpSelectN || v.Op == OpArg && isAlreadyExpandedAggregateType(v.Type) {
- ns := namedSelects[v]
- namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
+ if v.Op == OpSelectN || v.Op == OpArg && x.isAlreadyExpandedAggregateType(v.Type) {
+ ns := x.namedSelects[v]
+ x.namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
}
}
}
@@ -763,22 +771,22 @@ func expandCalls(f *Func) {
t := v.Aux.(*types.Type)
source := v.Args[1]
tSrc := source.Type
- iAEATt := isAlreadyExpandedAggregateType(t)
+ iAEATt := x.isAlreadyExpandedAggregateType(t)
if !iAEATt {
// guarding against store immediate struct into interface data field -- store type is *uint8
// TODO can this happen recursively?
- iAEATt = isAlreadyExpandedAggregateType(tSrc)
+ iAEATt = x.isAlreadyExpandedAggregateType(tSrc)
if iAEATt {
t = tSrc
}
}
if iAEATt {
- if debug {
+ if x.debug {
fmt.Printf("Splitting store %s\n", v.LongString())
}
dst, mem := v.Args[0], v.Args[2]
- mem = storeArgOrLoad(v.Pos, b, dst, source, mem, t, 0)
+ mem = x.storeArgOrLoad(v.Pos, b, dst, source, mem, t, 0)
v.copyOf(mem)
}
}
@@ -807,7 +815,7 @@ func expandCalls(f *Func) {
switch w.Op {
case OpStructSelect, OpArraySelect, OpSelectN, OpArg:
val2Preds[w] += 1
- if debug {
+ if x.debug {
fmt.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w])
}
}
@@ -816,18 +824,18 @@ func expandCalls(f *Func) {
case OpSelectN:
if _, ok := val2Preds[v]; !ok {
val2Preds[v] = 0
- if debug {
+ if x.debug {
fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
}
}
case OpArg:
- if !isAlreadyExpandedAggregateType(v.Type) {
+ if !x.isAlreadyExpandedAggregateType(v.Type) {
continue
}
if _, ok := val2Preds[v]; !ok {
val2Preds[v] = 0
- if debug {
+ if x.debug {
fmt.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
}
}
@@ -838,7 +846,7 @@ func expandCalls(f *Func) {
which := v.AuxInt
aux := call.Aux.(*AuxCall)
pt := v.Type
- off := offsetFrom(sp, aux.OffsetOfResult(which), pt)
+ off := x.offsetFrom(x.sp, aux.OffsetOfResult(which), pt)
v.copyOf(off)
}
}
@@ -860,7 +868,7 @@ func expandCalls(f *Func) {
if bi == bj {
return vi.ID < vj.ID
}
- return sdom.domorder(bi) > sdom.domorder(bj) // reverse the order to put dominators last.
+ return x.sdom.domorder(bi) > x.sdom.domorder(bj) // reverse the order to put dominators last.
}
// Accumulate order in allOrdered
@@ -894,7 +902,7 @@ func expandCalls(f *Func) {
}
}
- common = make(map[selKey]*Value)
+ x.common = make(map[selKey]*Value)
// Rewrite duplicate selectors as copies where possible.
for i := len(allOrdered) - 1; i >= 0; i-- {
v := allOrdered[i]
@@ -926,26 +934,26 @@ func expandCalls(f *Func) {
case OpSelectN:
offset = w.Aux.(*AuxCall).OffsetOfResult(v.AuxInt)
case OpInt64Hi:
- offset = hiOffset
+ offset = x.hiOffset
case OpInt64Lo:
- offset = lowOffset
+ offset = x.lowOffset
case OpStringLen, OpSliceLen, OpIData:
- offset = ptrSize
+ offset = x.ptrSize
case OpSliceCap:
- offset = 2 * ptrSize
+ offset = 2 * x.ptrSize
case OpComplexImag:
offset = size
}
sk := selKey{from: w, size: size, offset: offset, typ: typ}
- dupe := common[sk]
+ dupe := x.common[sk]
if dupe == nil {
- common[sk] = v
- } else if sdom.IsAncestorEq(dupe.Block, v.Block) {
+ x.common[sk] = v
+ } else if x.sdom.IsAncestorEq(dupe.Block, v.Block) {
v.copyOf(dupe)
} else {
// Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen.
// Installing the new value might match some future values.
- common[sk] = v
+ x.common[sk] = v
}
}
@@ -954,7 +962,7 @@ func expandCalls(f *Func) {
// Rewrite selectors.
for i, v := range allOrdered {
- if debug {
+ if x.debug {
b := v.Block
fmt.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses)
}
@@ -965,13 +973,13 @@ func expandCalls(f *Func) {
if v.Op == OpCopy {
continue
}
- locs := rewriteSelect(v, v, 0)
+ locs := x.rewriteSelect(v, v, 0)
// Install new names.
if v.Type.IsMemory() {
continue
}
// Leaf types may have debug locations
- if !isAlreadyExpandedAggregateType(v.Type) {
+ if !x.isAlreadyExpandedAggregateType(v.Type) {
for _, l := range locs {
f.NamedValues[l] = append(f.NamedValues[l], v)
}
@@ -979,7 +987,7 @@ func expandCalls(f *Func) {
continue
}
// Not-leaf types that had debug locations need to lose them.
- if ns, ok := namedSelects[v]; ok {
+ if ns, ok := x.namedSelects[v]; ok {
toDelete = append(toDelete, ns...)
}
}
diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules
index 9297ed8d2e..b0f10d0a0f 100644
--- a/src/cmd/compile/internal/ssa/gen/dec64.rules
+++ b/src/cmd/compile/internal/ssa/gen/dec64.rules
@@ -42,20 +42,20 @@
(Store {hi.Type} dst hi mem))
// These are not enabled during decomposeBuiltin if late call expansion, but they are always enabled for softFloat
-(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
(Int64Make
(Arg <typ.Int32> {n} [off+4])
(Arg <typ.UInt32> {n} [off]))
-(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
(Int64Make
(Arg <typ.UInt32> {n} [off+4])
(Arg <typ.UInt32> {n} [off]))
-(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
(Int64Make
(Arg <typ.Int32> {n} [off])
(Arg <typ.UInt32> {n} [off+4]))
-(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin") =>
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
(Int64Make
(Arg <typ.UInt32> {n} [off])
(Arg <typ.UInt32> {n} [off+4]))
diff --git a/src/cmd/compile/internal/ssa/gen/decArgs.rules b/src/cmd/compile/internal/ssa/gen/decArgs.rules
deleted file mode 100644
index 1c9a0bb23d..0000000000
--- a/src/cmd/compile/internal/ssa/gen/decArgs.rules
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Decompose compound argument values
-// Do this early to simplify tracking names for debugging.
-
-(Arg {n} [off]) && v.Type.IsString() =>
- (StringMake
- (Arg <typ.BytePtr> {n} [off])
- (Arg <typ.Int> {n} [off+int32(config.PtrSize)]))
-
-(Arg {n} [off]) && v.Type.IsSlice() =>
- (SliceMake
- (Arg <v.Type.Elem().PtrTo()> {n} [off])
- (Arg <typ.Int> {n} [off+int32(config.PtrSize)])
- (Arg <typ.Int> {n} [off+2*int32(config.PtrSize)]))
-
-(Arg {n} [off]) && v.Type.IsInterface() =>
- (IMake
- (Arg <typ.Uintptr> {n} [off])
- (Arg <typ.BytePtr> {n} [off+int32(config.PtrSize)]))
-
-(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 =>
- (ComplexMake
- (Arg <typ.Float64> {n} [off])
- (Arg <typ.Float64> {n} [off+8]))
-
-(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 =>
- (ComplexMake
- (Arg <typ.Float32> {n} [off])
- (Arg <typ.Float32> {n} [off+4]))
-
-(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) =>
- (StructMake0)
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) =>
- (StructMake1
- (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) =>
- (StructMake2
- (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
- (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) =>
- (StructMake3
- (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
- (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))])
- (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]))
-(Arg <t> {n} [off]) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) =>
- (StructMake4
- (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))])
- (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))])
- (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))])
- (Arg <t.FieldType(3)> {n} [off+int32(t.FieldOff(3))]))
-
-(Arg <t>) && t.IsArray() && t.NumElem() == 0 =>
- (ArrayMake0)
-(Arg <t> {n} [off]) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) =>
- (ArrayMake1 (Arg <t.Elem()> {n} [off]))
diff --git a/src/cmd/compile/internal/ssa/gen/decArgsOps.go b/src/cmd/compile/internal/ssa/gen/decArgsOps.go
deleted file mode 100644
index b73d9d3976..0000000000
--- a/src/cmd/compile/internal/ssa/gen/decArgsOps.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-var decArgsOps = []opData{}
-
-var decArgsBlocks = []blockData{}
-
-func init() {
- archs = append(archs, arch{
- name: "decArgs",
- ops: decArgsOps,
- blocks: decArgsBlocks,
- generic: true,
- })
-}
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
index c49bc8043e..60b727f45f 100644
--- a/src/cmd/compile/internal/ssa/rewritedec64.go
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -184,12 +184,12 @@ func rewriteValuedec64_OpArg(v *Value) bool {
config := b.Func.Config
typ := &b.Func.Config.Types
// match: (Arg {n} [off])
- // cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+ // cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
// result: (Int64Make (Arg <typ.Int32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
for {
off := auxIntToInt32(v.AuxInt)
n := auxToSym(v.Aux)
- if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+ if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
break
}
v.reset(OpInt64Make)
@@ -203,12 +203,12 @@ func rewriteValuedec64_OpArg(v *Value) bool {
return true
}
// match: (Arg {n} [off])
- // cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+ // cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
// result: (Int64Make (Arg <typ.UInt32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
for {
off := auxIntToInt32(v.AuxInt)
n := auxToSym(v.Aux)
- if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+ if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
break
}
v.reset(OpInt64Make)
@@ -222,12 +222,12 @@ func rewriteValuedec64_OpArg(v *Value) bool {
return true
}
// match: (Arg {n} [off])
- // cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+ // cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
// result: (Int64Make (Arg <typ.Int32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
for {
off := auxIntToInt32(v.AuxInt)
n := auxToSym(v.Aux)
- if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+ if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
break
}
v.reset(OpInt64Make)
@@ -241,12 +241,12 @@ func rewriteValuedec64_OpArg(v *Value) bool {
return true
}
// match: (Arg {n} [off])
- // cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")
+ // cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
// result: (Int64Make (Arg <typ.UInt32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
for {
off := auxIntToInt32(v.AuxInt)
n := auxToSym(v.Aux)
- if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(go116lateCallExpansion && b.Func.pass.name == "decompose builtin")) {
+ if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
break
}
v.reset(OpInt64Make)
diff --git a/src/cmd/compile/internal/ssa/rewritedecArgs.go b/src/cmd/compile/internal/ssa/rewritedecArgs.go
deleted file mode 100644
index 23ff417eee..0000000000
--- a/src/cmd/compile/internal/ssa/rewritedecArgs.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Code generated from gen/decArgs.rules; DO NOT EDIT.
-// generated with: cd gen; go run *.go
-
-package ssa
-
-func rewriteValuedecArgs(v *Value) bool {
- switch v.Op {
- case OpArg:
- return rewriteValuedecArgs_OpArg(v)
- }
- return false
-}
-func rewriteValuedecArgs_OpArg(v *Value) bool {
- b := v.Block
- config := b.Func.Config
- fe := b.Func.fe
- typ := &b.Func.Config.Types
- // match: (Arg {n} [off])
- // cond: v.Type.IsString()
- // result: (StringMake (Arg <typ.BytePtr> {n} [off]) (Arg <typ.Int> {n} [off+int32(config.PtrSize)]))
- for {
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(v.Type.IsString()) {
- break
- }
- v.reset(OpStringMake)
- v0 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
- v0.AuxInt = int32ToAuxInt(off)
- v0.Aux = symToAux(n)
- v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
- v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
- v1.Aux = symToAux(n)
- v.AddArg2(v0, v1)
- return true
- }
- // match: (Arg {n} [off])
- // cond: v.Type.IsSlice()
- // result: (SliceMake (Arg <v.Type.Elem().PtrTo()> {n} [off]) (Arg <typ.Int> {n} [off+int32(config.PtrSize)]) (Arg <typ.Int> {n} [off+2*int32(config.PtrSize)]))
- for {
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(v.Type.IsSlice()) {
- break
- }
- v.reset(OpSliceMake)
- v0 := b.NewValue0(v.Pos, OpArg, v.Type.Elem().PtrTo())
- v0.AuxInt = int32ToAuxInt(off)
- v0.Aux = symToAux(n)
- v1 := b.NewValue0(v.Pos, OpArg, typ.Int)
- v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
- v1.Aux = symToAux(n)
- v2 := b.NewValue0(v.Pos, OpArg, typ.Int)
- v2.AuxInt = int32ToAuxInt(off + 2*int32(config.PtrSize))
- v2.Aux = symToAux(n)
- v.AddArg3(v0, v1, v2)
- return true
- }
- // match: (Arg {n} [off])
- // cond: v.Type.IsInterface()
- // result: (IMake (Arg <typ.Uintptr> {n} [off]) (Arg <typ.BytePtr> {n} [off+int32(config.PtrSize)]))
- for {
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(v.Type.IsInterface()) {
- break
- }
- v.reset(OpIMake)
- v0 := b.NewValue0(v.Pos, OpArg, typ.Uintptr)
- v0.AuxInt = int32ToAuxInt(off)
- v0.Aux = symToAux(n)
- v1 := b.NewValue0(v.Pos, OpArg, typ.BytePtr)
- v1.AuxInt = int32ToAuxInt(off + int32(config.PtrSize))
- v1.Aux = symToAux(n)
- v.AddArg2(v0, v1)
- return true
- }
- // match: (Arg {n} [off])
- // cond: v.Type.IsComplex() && v.Type.Size() == 16
- // result: (ComplexMake (Arg <typ.Float64> {n} [off]) (Arg <typ.Float64> {n} [off+8]))
- for {
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(v.Type.IsComplex() && v.Type.Size() == 16) {
- break
- }
- v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpArg, typ.Float64)
- v0.AuxInt = int32ToAuxInt(off)
- v0.Aux = symToAux(n)
- v1 := b.NewValue0(v.Pos, OpArg, typ.Float64)
- v1.AuxInt = int32ToAuxInt(off + 8)
- v1.Aux = symToAux(n)
- v.AddArg2(v0, v1)
- return true
- }
- // match: (Arg {n} [off])
- // cond: v.Type.IsComplex() && v.Type.Size() == 8
- // result: (ComplexMake (Arg <typ.Float32> {n} [off]) (Arg <typ.Float32> {n} [off+4]))
- for {
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(v.Type.IsComplex() && v.Type.Size() == 8) {
- break
- }
- v.reset(OpComplexMake)
- v0 := b.NewValue0(v.Pos, OpArg, typ.Float32)
- v0.AuxInt = int32ToAuxInt(off)
- v0.Aux = symToAux(n)
- v1 := b.NewValue0(v.Pos, OpArg, typ.Float32)
- v1.AuxInt = int32ToAuxInt(off + 4)
- v1.Aux = symToAux(n)
- v.AddArg2(v0, v1)
- return true
- }
- // match: (Arg <t>)
- // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
- // result: (StructMake0)
- for {
- t := v.Type
- if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
- break
- }
- v.reset(OpStructMake0)
- return true
- }
- // match: (Arg <t> {n} [off])
- // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
- // result: (StructMake1 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]))
- for {
- t := v.Type
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
- break
- }
- v.reset(OpStructMake1)
- v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
- v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
- v0.Aux = symToAux(n)
- v.AddArg(v0)
- return true
- }
- // match: (Arg <t> {n} [off])
- // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
- // result: (StructMake2 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]))
- for {
- t := v.Type
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
- break
- }
- v.reset(OpStructMake2)
- v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
- v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
- v0.Aux = symToAux(n)
- v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
- v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
- v1.Aux = symToAux(n)
- v.AddArg2(v0, v1)
- return true
- }
- // match: (Arg <t> {n} [off])
- // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
- // result: (StructMake3 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]) (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]))
- for {
- t := v.Type
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
- break
- }
- v.reset(OpStructMake3)
- v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
- v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
- v0.Aux = symToAux(n)
- v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
- v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
- v1.Aux = symToAux(n)
- v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2))
- v2.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(2)))
- v2.Aux = symToAux(n)
- v.AddArg3(v0, v1, v2)
- return true
- }
- // match: (Arg <t> {n} [off])
- // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
- // result: (StructMake4 (Arg <t.FieldType(0)> {n} [off+int32(t.FieldOff(0))]) (Arg <t.FieldType(1)> {n} [off+int32(t.FieldOff(1))]) (Arg <t.FieldType(2)> {n} [off+int32(t.FieldOff(2))]) (Arg <t.FieldType(3)> {n} [off+int32(t.FieldOff(3))]))
- for {
- t := v.Type
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
- break
- }
- v.reset(OpStructMake4)
- v0 := b.NewValue0(v.Pos, OpArg, t.FieldType(0))
- v0.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(0)))
- v0.Aux = symToAux(n)
- v1 := b.NewValue0(v.Pos, OpArg, t.FieldType(1))
- v1.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(1)))
- v1.Aux = symToAux(n)
- v2 := b.NewValue0(v.Pos, OpArg, t.FieldType(2))
- v2.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(2)))
- v2.Aux = symToAux(n)
- v3 := b.NewValue0(v.Pos, OpArg, t.FieldType(3))
- v3.AuxInt = int32ToAuxInt(off + int32(t.FieldOff(3)))
- v3.Aux = symToAux(n)
- v.AddArg4(v0, v1, v2, v3)
- return true
- }
- // match: (Arg <t>)
- // cond: t.IsArray() && t.NumElem() == 0
- // result: (ArrayMake0)
- for {
- t := v.Type
- if !(t.IsArray() && t.NumElem() == 0) {
- break
- }
- v.reset(OpArrayMake0)
- return true
- }
- // match: (Arg <t> {n} [off])
- // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
- // result: (ArrayMake1 (Arg <t.Elem()> {n} [off]))
- for {
- t := v.Type
- off := auxIntToInt32(v.AuxInt)
- n := auxToSym(v.Aux)
- if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
- break
- }
- v.reset(OpArrayMake1)
- v0 := b.NewValue0(v.Pos, OpArg, t.Elem())
- v0.AuxInt = int32ToAuxInt(off)
- v0.Aux = symToAux(n)
- v.AddArg(v0)
- return true
- }
- return false
-}
-func rewriteBlockdecArgs(b *Block) bool {
- switch b.Kind {
- }
- return false
-}
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index e49a9716fe..b042c132d5 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -1803,7 +1803,7 @@ const shareDeferExits = false
// It returns a BlockRet block that ends the control flow. Its control value
// will be set to the final memory state.
func (s *state) exit() *ssa.Block {
- lateResultLowering := s.f.DebugTest && ssa.LateCallExpansionEnabledWithin(s.f)
+ lateResultLowering := s.f.DebugTest
if s.hasdefer {
if s.hasOpenDefers {
if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
@@ -4628,7 +4628,6 @@ func (s *state) openDeferExit() {
s.lastDeferExit = deferExit
s.lastDeferCount = len(s.openDefers)
zeroval := s.constInt8(types.Types[types.TUINT8], 0)
- testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
// Test for and run defers in reverse order
for i := len(s.openDefers) - 1; i >= 0; i-- {
r := s.openDefers[i]
@@ -4670,35 +4669,19 @@ func (s *state) openDeferExit() {
if r.rcvr != nil {
// rcvr in case of OCALLINTER
v := s.load(r.rcvr.Type.Elem(), r.rcvr)
- addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
- if testLateExpansion {
- callArgs = append(callArgs, v)
- } else {
- s.store(types.Types[types.TUINTPTR], addr, v)
- }
+ callArgs = append(callArgs, v)
}
for j, argAddrVal := range r.argVals {
f := getParam(r.n, j)
- pt := types.NewPtr(f.Type)
ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)})
- if testLateExpansion {
- var a *ssa.Value
- if !TypeOK(f.Type) {
- a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
- } else {
- a = s.load(f.Type, argAddrVal)
- }
- callArgs = append(callArgs, a)
+ var a *ssa.Value
+ if !TypeOK(f.Type) {
+ a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
} else {
- addr := s.constOffPtrSP(pt, argStart+f.Offset)
- if !TypeOK(f.Type) {
- s.move(f.Type, addr, argAddrVal)
- } else {
- argVal := s.load(f.Type, argAddrVal)
- s.storeType(f.Type, addr, argVal, 0, false)
- }
+ a = s.load(f.Type, argAddrVal)
}
+ callArgs = append(callArgs, a)
}
var call *ssa.Value
if r.closure != nil {
@@ -4706,30 +4689,15 @@ func (s *state) openDeferExit() {
s.maybeNilCheckClosure(v, callDefer)
codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
- if testLateExpansion {
- callArgs = append(callArgs, s.mem())
- call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
- call.AddArgs(callArgs...)
- } else {
- call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, aux, codeptr, v, s.mem())
- }
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
} else {
aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), ACArgs, ACResults)
- if testLateExpansion {
- callArgs = append(callArgs, s.mem())
- call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
- call.AddArgs(callArgs...)
- } else {
- // Do a static call if the original call was a static function or method
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
- }
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
}
+ callArgs = append(callArgs, s.mem())
+ call.AddArgs(callArgs...)
call.AuxInt = stksize
- if testLateExpansion {
- s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
- } else {
- s.vars[memVar] = call
- }
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
// Make sure that the stack slots with pointers are kept live
// through the call (which is a pre-emption point). Also, we will
// use the first call of the last defer exit to compute liveness
@@ -4782,12 +4750,10 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
}
- testLateExpansion := false
inRegisters := false
switch n.Op() {
case ir.OCALLFUNC:
- testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
if k == callNormal && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
fn := fn.(*ir.Name)
callee = fn
@@ -4813,7 +4779,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
}
fn := fn.(*ir.SelectorExpr)
- testLateExpansion = k != callDeferStack && ssa.LateCallExpansionEnabledWithin(s.f)
var iclosure *ssa.Value
iclosure, rcvr = s.getClosureAndRcvr(fn)
if k == callNormal {
@@ -4827,7 +4792,6 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
var call *ssa.Value
if k == callDeferStack {
- testLateExpansion = ssa.LateCallExpansionEnabledWithin(s.f)
// Make a defer struct d on the stack.
t := deferstruct(stksize)
d := typecheck.TempAt(n.Pos(), s.curfn, t)
@@ -4878,15 +4842,9 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// Call runtime.deferprocStack with pointer to _defer record.
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())})
aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, ACArgs, ACResults)
- if testLateExpansion {
- callArgs = append(callArgs, addr, s.mem())
- call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
- call.AddArgs(callArgs...)
- } else {
- arg0 := s.constOffPtrSP(types.Types[types.TUINTPTR], base.Ctxt.FixedFrameSize())
- s.store(types.Types[types.TUINTPTR], arg0, addr)
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
- }
+ callArgs = append(callArgs, addr, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
if stksize < int64(types.PtrSize) {
// We need room for both the call to deferprocStack and the call to
// the deferred function.
@@ -4903,32 +4861,17 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// Write argsize and closure (args to newproc/deferproc).
argsize := s.constInt32(types.Types[types.TUINT32], int32(stksize))
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINT32], Offset: int32(argStart)})
- if testLateExpansion {
- callArgs = append(callArgs, argsize)
- } else {
- addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
- s.store(types.Types[types.TUINT32], addr, argsize)
- }
+ callArgs = append(callArgs, argsize)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart) + int32(types.PtrSize)})
- if testLateExpansion {
- callArgs = append(callArgs, closure)
- } else {
- addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(types.PtrSize))
- s.store(types.Types[types.TUINTPTR], addr, closure)
- }
+ callArgs = append(callArgs, closure)
stksize += 2 * int64(types.PtrSize)
argStart += 2 * int64(types.PtrSize)
}
// Set receiver (for interface calls).
if rcvr != nil {
- addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(argStart)})
- if testLateExpansion {
- callArgs = append(callArgs, rcvr)
- } else {
- s.store(types.Types[types.TUINTPTR], addr, rcvr)
- }
+ callArgs = append(callArgs, rcvr)
}
// Write args.
@@ -4939,7 +4882,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
}
for i, n := range args {
f := t.Params().Field(i)
- ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset, testLateExpansion)
+ ACArg, arg := s.putArg(n, f.Type, argStart+f.Offset)
ACArgs = append(ACArgs, ACArg)
callArgs = append(callArgs, arg)
}
@@ -4950,20 +4893,10 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
switch {
case k == callDefer:
aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults)
- if testLateExpansion {
- call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
- call.AddArgs(callArgs...)
- } else {
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
- }
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
case k == callGo:
aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults)
- if testLateExpansion {
- call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
- call.AddArgs(callArgs...)
- } else {
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
- }
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
case closure != nil:
// rawLoad because loading the code pointer from a
// closure is always safe, but IsSanitizerSafeAddr
@@ -4971,40 +4904,22 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// critical that we not clobber any arguments already
// stored onto the stack.
codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
- if testLateExpansion {
- aux := ssa.ClosureAuxCall(ACArgs, ACResults)
- call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
- call.AddArgs(callArgs...)
- } else {
- call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem())
- }
+ aux := ssa.ClosureAuxCall(ACArgs, ACResults)
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
case codeptr != nil:
- if testLateExpansion {
- aux := ssa.InterfaceAuxCall(ACArgs, ACResults)
- call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
- call.AddArgs(callArgs...)
- } else {
- call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem())
- }
+ aux := ssa.InterfaceAuxCall(ACArgs, ACResults)
+ call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
case callee != nil:
- if testLateExpansion {
- aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults)
- call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
- call.AddArgs(callArgs...)
- } else {
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults), s.mem())
- }
+ aux := ssa.StaticAuxCall(callTargetLSym(callee, s.curfn.LSym), ACArgs, ACResults)
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
default:
s.Fatalf("bad call type %v %v", n.Op(), n)
}
+ call.AddArgs(callArgs...)
call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
}
- if testLateExpansion {
- s.prevCall = call
- s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
- } else {
- s.vars[memVar] = call
- }
+ s.prevCall = call
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
// Insert OVARLIVE nodes
for _, name := range n.KeepAlive {
s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name))
@@ -5033,16 +4948,10 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
fp := res.Field(0)
if returnResultAddr {
pt := types.NewPtr(fp.Type)
- if testLateExpansion {
- return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
- }
- return s.constOffPtrSP(pt, fp.Offset+base.Ctxt.FixedFrameSize())
+ return s.newValue1I(ssa.OpSelectNAddr, pt, 0, call)
}
- if testLateExpansion {
- return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
- }
- return s.load(n.Type(), s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+base.Ctxt.FixedFrameSize()))
+ return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
}
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
@@ -5458,7 +5367,6 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
s.prevCall = nil
// Write args to the stack
off := base.Ctxt.FixedFrameSize()
- testLateExpansion := ssa.LateCallExpansionEnabledWithin(s.f)
var ACArgs []ssa.Param
var ACResults []ssa.Param
var callArgs []*ssa.Value
@@ -5468,12 +5376,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
off = types.Rnd(off, t.Alignment())
size := t.Size()
ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
- if testLateExpansion {
- callArgs = append(callArgs, arg)
- } else {
- ptr := s.constOffPtrSP(t.PtrTo(), off)
- s.store(t, ptr, arg)
- }
+ callArgs = append(callArgs, arg)
off += size
}
off = types.Rnd(off, int64(types.RegSize))
@@ -5489,15 +5392,10 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
// Issue call
var call *ssa.Value
aux := ssa.StaticAuxCall(fn, ACArgs, ACResults)
- if testLateExpansion {
- callArgs = append(callArgs, s.mem())
- call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
- call.AddArgs(callArgs...)
- s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
- } else {
- call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, aux, s.mem())
- s.vars[memVar] = call
- }
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
if !returns {
// Finish block
@@ -5513,24 +5411,15 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
// Load results
res := make([]*ssa.Value, len(results))
- if testLateExpansion {
- for i, t := range results {
- off = types.Rnd(off, t.Alignment())
- if TypeOK(t) {
- res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
- } else {
- addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
- res[i] = s.rawLoad(t, addr)
- }
- off += t.Size()
- }
- } else {
- for i, t := range results {
- off = types.Rnd(off, t.Alignment())
- ptr := s.constOffPtrSP(types.NewPtr(t), off)
- res[i] = s.load(t, ptr)
- off += t.Size()
+ for i, t := range results {
+ off = types.Rnd(off, t.Alignment())
+ if TypeOK(t) {
+ res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
+ } else {
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
+ res[i] = s.rawLoad(t, addr)
}
+ off += t.Size()
}
off = types.Rnd(off, int64(types.PtrSize))
@@ -5650,19 +5539,13 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
}
}
-// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param for the call.
-// If forLateExpandedCall is true, it returns the argument value to pass to the call operation.
-// If forLateExpandedCall is false, then the value is stored at the specified stack offset, and the returned value is nil.
-func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
+// putArg evaluates n for the purpose of passing it as an argument to a function and returns the corresponding Param and value for the call.
+func (s *state) putArg(n ir.Node, t *types.Type, off int64) (ssa.Param, *ssa.Value) {
var a *ssa.Value
- if forLateExpandedCall {
- if !TypeOK(t) {
- a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
- } else {
- a = s.expr(n)
- }
+ if !TypeOK(t) {
+ a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
} else {
- s.storeArgWithBase(n, t, s.sp, off)
+ a = s.expr(n)
}
return ssa.Param{Type: t, Offset: int32(off)}, a
}
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index 49d390297c..da06e831ae 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -111,7 +111,7 @@
// -p n
// the number of programs, such as build commands or
// test binaries, that can be run in parallel.
-// The default is the number of CPUs available.
+// The default is GOMAXPROCS, normally the number of CPUs available.
// -race
// enable data race detection.
// Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64,
diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go
index c48904eacc..322247962f 100644
--- a/src/cmd/go/internal/cfg/cfg.go
+++ b/src/cmd/go/internal/cfg/cfg.go
@@ -28,18 +28,18 @@ var (
BuildA bool // -a flag
BuildBuildmode string // -buildmode flag
BuildContext = defaultContext()
- BuildMod string // -mod flag
- BuildModExplicit bool // whether -mod was set explicitly
- BuildModReason string // reason -mod was set, if set by default
- BuildI bool // -i flag
- BuildLinkshared bool // -linkshared flag
- BuildMSan bool // -msan flag
- BuildN bool // -n flag
- BuildO string // -o flag
- BuildP = runtime.NumCPU() // -p flag
- BuildPkgdir string // -pkgdir flag
- BuildRace bool // -race flag
- BuildToolexec []string // -toolexec flag
+ BuildMod string // -mod flag
+ BuildModExplicit bool // whether -mod was set explicitly
+ BuildModReason string // reason -mod was set, if set by default
+ BuildI bool // -i flag
+ BuildLinkshared bool // -linkshared flag
+ BuildMSan bool // -msan flag
+ BuildN bool // -n flag
+ BuildO string // -o flag
+ BuildP = runtime.GOMAXPROCS(0) // -p flag
+ BuildPkgdir string // -pkgdir flag
+ BuildRace bool // -race flag
+ BuildToolexec []string // -toolexec flag
BuildToolchainName string
BuildToolchainCompiler func() string
BuildToolchainLinker func() string
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index 780d639c5d..0e7af6d33f 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -71,7 +71,7 @@ and test commands:
-p n
the number of programs, such as build commands or
test binaries, that can be run in parallel.
- The default is the number of CPUs available.
+ The default is GOMAXPROCS, normally the number of CPUs available.
-race
enable data race detection.
Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64,
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
index 3205fcbffc..2087855b3c 100644
--- a/src/cmd/go/internal/work/gc.go
+++ b/src/cmd/go/internal/work/gc.go
@@ -239,16 +239,19 @@ CheckFlags:
// - it has no successor packages to compile (usually package main)
// - all paths through the build graph pass through it
// - critical path scheduling says it is high priority
- // and in such a case, set c to runtime.NumCPU.
+ // and in such a case, set c to runtime.GOMAXPROCS(0).
+ // By default this is the same as runtime.NumCPU.
// We do this now when p==1.
+ // To limit parallelism, set GOMAXPROCS below numCPU; this may be useful
+ // on a low-memory builder, or if a deterministic build order is required.
+ c := runtime.GOMAXPROCS(0)
if cfg.BuildP == 1 {
- // No process parallelism. Max out c.
- return runtime.NumCPU()
+ // No process parallelism, do not cap compiler parallelism.
+ return c
}
- // Some process parallelism. Set c to min(4, numcpu).
- c := 4
- if ncpu := runtime.NumCPU(); ncpu < c {
- c = ncpu
+ // Some process parallelism. Set c to min(4, maxprocs).
+ if c > 4 {
+ c = 4
}
return c
}
diff --git a/src/cmd/go/testdata/script/build_trimpath.txt b/src/cmd/go/testdata/script/build_trimpath.txt
index e1ea0a48b2..2c3bee8fdc 100644
--- a/src/cmd/go/testdata/script/build_trimpath.txt
+++ b/src/cmd/go/testdata/script/build_trimpath.txt
@@ -121,6 +121,7 @@ package main
import (
"bytes"
"fmt"
+ "io/ioutil"
"log"
"os"
"os/exec"
@@ -130,7 +131,7 @@ import (
func main() {
exe := os.Args[1]
- data, err := os.ReadFile(exe)
+ data, err := ioutil.ReadFile(exe)
if err != nil {
log.Fatal(err)
}
diff --git a/src/cmd/go/testdata/script/link_syso_issue33139.txt b/src/cmd/go/testdata/script/link_syso_issue33139.txt
index 26034c9626..8a8cb4aa8c 100644
--- a/src/cmd/go/testdata/script/link_syso_issue33139.txt
+++ b/src/cmd/go/testdata/script/link_syso_issue33139.txt
@@ -2,7 +2,7 @@
# embedded in a package, that is referenced by a Go assembly function.
# See issue 33139.
[!gc] skip
-[!exec:cc] skip
+[!cgo] skip
# External linking is not supported on linux/ppc64.
# See: https://github.com/golang/go/issues/8912
diff --git a/src/cmd/link/elf_test.go b/src/cmd/link/elf_test.go
index 334f050e88..20754d09f5 100644
--- a/src/cmd/link/elf_test.go
+++ b/src/cmd/link/elf_test.go
@@ -226,6 +226,12 @@ func main() {
func TestPIESize(t *testing.T) {
testenv.MustHaveGoBuild(t)
+
+ // We don't want to test -linkmode=external if cgo is not supported.
+ // On some systems -buildmode=pie implies -linkmode=external, so just
+ // always skip the test if cgo is not supported.
+ testenv.MustHaveCGO(t)
+
if !sys.BuildModeSupported(runtime.Compiler, "pie", runtime.GOOS, runtime.GOARCH) {
t.Skip("-buildmode=pie not supported")
}
diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go
index d8813fa936..245076a83a 100644
--- a/src/cmd/link/internal/ld/deadcode.go
+++ b/src/cmd/link/internal/ld/deadcode.go
@@ -165,13 +165,17 @@ func (d *deadcodePass) flood() {
// R_USEIFACEMETHOD is a marker relocation that marks an interface
// method as used.
rs := r.Sym()
- if d.ldr.SymType(rs) != sym.SDYNIMPORT { // don't decode DYNIMPORT symbol (we'll mark all exported methods anyway)
- m := d.decodeIfaceMethod(d.ldr, d.ctxt.Arch, rs, r.Add())
- if d.ctxt.Debugvlog > 1 {
- d.ctxt.Logf("reached iface method: %v\n", m)
- }
- d.ifaceMethod[m] = true
+ if d.ctxt.linkShared && (d.ldr.SymType(rs) == sym.SDYNIMPORT || d.ldr.SymType(rs) == sym.Sxxx) {
+ // Don't decode symbol from shared library (we'll mark all exported methods anyway).
+ // We check for both SDYNIMPORT and Sxxx because name-mangled symbols haven't
+ // been resolved at this point.
+ continue
+ }
+ m := d.decodeIfaceMethod(d.ldr, d.ctxt.Arch, rs, r.Add())
+ if d.ctxt.Debugvlog > 1 {
+ d.ctxt.Logf("reached iface method: %v\n", m)
}
+ d.ifaceMethod[m] = true
continue
}
rs := r.Sym()
diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go
index c6e2d8ca7f..fc179fc6e4 100644
--- a/src/cmd/link/internal/ld/decodesym.go
+++ b/src/cmd/link/internal/ld/decodesym.go
@@ -279,7 +279,7 @@ func findShlibSection(ctxt *Link, path string, addr uint64) *elf.Section {
for _, shlib := range ctxt.Shlibs {
if shlib.Path == path {
for _, sect := range shlib.File.Sections[1:] { // skip the NULL section
- if sect.Addr <= addr && addr <= sect.Addr+sect.Size {
+ if sect.Addr <= addr && addr < sect.Addr+sect.Size {
return sect
}
}