From cca4ddb497a2d56654b38991566e45be1ef18f4d Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Apr 2016 12:48:49 -0700 Subject: cmd/compile: add comments explaining how declarations/scopes work Change-Id: I301760b015eb69ff12eee53473fdbf5e9f168413 Reviewed-on: https://go-review.googlesource.com/21542 Reviewed-by: Brad Fitzpatrick Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/gc/dcl.go | 43 +++++++++++++++++++++++++++----------- src/cmd/compile/internal/gc/go.go | 10 +++++++-- 2 files changed, 39 insertions(+), 14 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index bd5a1f6f07..8553e2f1e8 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -11,6 +11,8 @@ import ( "strings" ) +// Declaration stack & operations + func dflag() bool { if Debug['d'] == 0 { return false @@ -24,8 +26,21 @@ func dflag() bool { return true } -// declaration stack & operations -func dcopy(a *Sym, b *Sym) { +// dclstack maintains a stack of shadowed symbol declarations so that +// popdcl can restore their declarations when a block scope ends. +// The stack is maintained as a linked list, using Sym's Link field. +// +// In practice, the "stack" actually ends up forming a tree: goto and label +// statements record the current state of dclstack so that checkgoto can +// validate that a goto statement does not jump over any declarations or +// into a new block scope. +// +// Finally, the Syms in this list are not "real" Syms as they don't actually +// represent object names. Sym is just a convenient type for saving shadowed +// Sym definitions, and only a subset of its fields are actually used. +var dclstack *Sym + +func dcopy(a, b *Sym) { a.Pkg = b.Pkg a.Name = b.Name a.Def = b.Def @@ -41,6 +56,8 @@ func push() *Sym { return d } +// pushdcl pushes the current declaration for symbol s (if any) so that +// it can be shadowed by a new declaration within a nested block scope. func pushdcl(s *Sym) *Sym { d := push() dcopy(d, s) @@ -50,6 +67,8 @@ func pushdcl(s *Sym) *Sym { return d } +// popdcl pops the innermost block scope and restores all symbol declarations +// to their previous state. func popdcl() { d := dclstack for ; d != nil && d.Name != ""; d = d.Link { @@ -70,6 +89,7 @@ func popdcl() { block = d.Block } +// markdcl records the start of a new block scope for declarations. func markdcl() { d := push() d.Name = "" // used as a mark in fifo @@ -104,6 +124,7 @@ func testdclstack() { } } +// redeclare emits a diagnostic about symbol s being redeclared somewhere. func redeclare(s *Sym, where string) { if s.Lastlineno == 0 { var tmp string @@ -137,6 +158,8 @@ var vargen int var declare_typegen int +// declare records that Node n declares symbol n.Sym in the specified +// declaration context. func declare(n *Node, ctxt Class) { if ctxt == PDISCARD { return @@ -318,8 +341,7 @@ func constiter(vl []*Node, t *Node, cl []*Node) []*Node { return vv } -// this generates a new name node, -// typically for labels or other one-off names. +// newname returns a new ONAME Node associated with symbol s. func newname(s *Sym) *Node { if s == nil { Fatalf("newname nil") @@ -364,17 +386,14 @@ func typenod(t *Type) *Node { return t.Nod } -// this will return an old name -// that has already been pushed on the -// declaration list. a diagnostic is -// generated if no name has been defined. +// oldname returns the Node that declares symbol s in the current scope. +// If no such Node currently exists, an ONONAME Node is returned instead. func oldname(s *Sym) *Node { n := s.Def if n == nil { - // maybe a top-level name will come along - // to give this a definition later. - // walkdef will check s->def again once - // all the input source has been processed. + // Maybe a top-level declaration will come along later to + // define s. resolve will check s.Def again once all input + // source has been processed. n = newname(s) n.Op = ONONAME n.Name.Iota = iota_ // save current iota value in const declarations diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index fdea1f2fba..f4b3dc9326 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -66,6 +66,14 @@ type Pkg struct { Syms map[string]*Sym } +// Sym represents an object name. Most commonly, this is a Go identifier naming +// an object declared within a package, but Syms are also used to name internal +// synthesized objects. +// +// As a special exception, field and method names that are exported use the Sym +// associated with localpkg instead of the package that declared them. This +// allows using Sym pointer equality to test for Go identifier uniqueness when +// handling selector expressions. type Sym struct { Flags SymFlags Link *Sym @@ -111,8 +119,6 @@ const ( SymAlgGen ) -var dclstack *Sym - // Ctype describes the constant kind of an "ideal" (untyped) constant. type Ctype int8 -- cgit v1.3 From 5ba797bd189b460854a0aea877381abcaef8105b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 5 Apr 2016 14:20:04 -0700 Subject: cmd/compile: move a lot of declarations outside of go.go go.go is currently a grab bag of various unrelated type and variable declarations. Move a bunch of them into other more relevant source files. There are still more that can be moved, but these were the low hanging fruit with obvious homes. No code/comment changes. Just shuffling stuff around. Change-Id: I43dbe1a5b8b707709c1a3a034c693d38b8465063 Reviewed-on: https://go-review.googlesource.com/21561 Run-TryBot: Matthew Dempsky Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/const.go | 53 ++++++++++++ src/cmd/compile/internal/gc/dcl.go | 6 ++ src/cmd/compile/internal/gc/go.go | 137 ------------------------------- src/cmd/compile/internal/gc/lex.go | 12 +++ src/cmd/compile/internal/gc/popt.go | 28 +++++++ src/cmd/compile/internal/gc/reflect.go | 10 +++ src/cmd/compile/internal/gc/type.go | 14 ++++ src/cmd/compile/internal/gc/typecheck.go | 14 ++++ 8 files changed, 137 insertions(+), 137 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 5c9a67c8b5..c7fb4d97e5 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -10,6 +10,59 @@ import ( "strings" ) +// Ctype describes the constant kind of an "ideal" (untyped) constant. +type Ctype int8 + +const ( + CTxxx Ctype = iota + + CTINT + CTRUNE + CTFLT + CTCPLX + CTSTR + CTBOOL + CTNIL +) + +type Val struct { + // U contains one of: + // bool bool when n.ValCtype() == CTBOOL + // *Mpint int when n.ValCtype() == CTINT, rune when n.ValCtype() == CTRUNE + // *Mpflt float when n.ValCtype() == CTFLT + // *Mpcplx pair of floats when n.ValCtype() == CTCPLX + // string string when n.ValCtype() == CTSTR + // *Nilval when n.ValCtype() == CTNIL + U interface{} +} + +func (v Val) Ctype() Ctype { + switch x := v.U.(type) { + default: + Fatalf("unexpected Ctype for %T", v.U) + panic("not reached") + case nil: + return 0 + case *NilVal: + return CTNIL + case bool: + return CTBOOL + case *Mpint: + if x.Rune { + return CTRUNE + } + return CTINT + case *Mpflt: + return CTFLT + case *Mpcplx: + return CTCPLX + case string: + return CTSTR + } +} + +type NilVal struct{} + // IntLiteral returns the Node's literal value as an integer. func (n *Node) IntLiteral() (x int64, ok bool) { switch { diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 8553e2f1e8..fb81545a46 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -26,6 +26,12 @@ func dflag() bool { return true } +var externdcl []*Node + +var blockgen int32 // max block number + +var block int32 // current block number + // dclstack maintains a stack of shadowed symbol declarations so that // popdcl can restore their declarations when a block scope ends. // The stack is maintained as a linked list, using Sym's Link field. diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index f4b3dc9326..4cb985b1be 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -5,7 +5,6 @@ package gc import ( - "bytes" "cmd/compile/internal/ssa" "cmd/internal/obj" ) @@ -16,44 +15,6 @@ const ( MaxStackVarSize = 10 * 1024 * 1024 ) -type Val struct { - // U contains one of: - // bool bool when n.ValCtype() == CTBOOL - // *Mpint int when n.ValCtype() == CTINT, rune when n.ValCtype() == CTRUNE - // *Mpflt float when n.ValCtype() == CTFLT - // *Mpcplx pair of floats when n.ValCtype() == CTCPLX - // string string when n.ValCtype() == CTSTR - // *Nilval when n.ValCtype() == CTNIL - U interface{} -} - -type NilVal struct{} - -func (v Val) Ctype() Ctype { - switch x := v.U.(type) { - default: - Fatalf("unexpected Ctype for %T", v.U) - panic("not reached") - case nil: - return 0 - case *NilVal: - return CTNIL - case bool: - return CTBOOL - case *Mpint: - if x.Rune { - return CTRUNE - } - return CTINT - case *Mpflt: - return CTFLT - case *Mpcplx: - return CTCPLX - case string: - return CTSTR - } -} - type Pkg struct { Name string // package name, e.g. "sys" Path string // string literal used in import statement, e.g. "runtime/internal/sys" @@ -119,35 +80,6 @@ const ( SymAlgGen ) -// Ctype describes the constant kind of an "ideal" (untyped) constant. -type Ctype int8 - -const ( - CTxxx Ctype = iota - - CTINT - CTRUNE - CTFLT - CTCPLX - CTSTR - CTBOOL - CTNIL -) - -// ChanDir is whether a channel can send, receive, or both. -type ChanDir uint8 - -func (c ChanDir) CanRecv() bool { return c&Crecv != 0 } -func (c ChanDir) CanSend() bool { return c&Csend != 0 } - -const ( - // types of channel - // must match ../../../../reflect/type.go:/ChanDir - Crecv ChanDir = 1 << 0 - Csend ChanDir = 1 << 1 - Cboth ChanDir = Crecv | Csend -) - // The Class of a variable/function describes the "storage class" // of a variable or function. During parsing, storage classes are // called declaration contexts. @@ -167,30 +99,6 @@ const ( PHEAP = 1 << 7 // an extra bit to identify an escaped variable ) -const ( - Etop = 1 << 1 // evaluated at statement level - Erv = 1 << 2 // evaluated in value context - Etype = 1 << 3 - Ecall = 1 << 4 // call-only expressions are ok - Efnstruct = 1 << 5 // multivalue function returns are ok - Eiota = 1 << 6 // iota is ok - Easgn = 1 << 7 // assigning to expression - Eindir = 1 << 8 // indirecting through expression - Eaddr = 1 << 9 // taking address of expression - Eproc = 1 << 10 // inside a go statement - Ecomplit = 1 << 11 // type in composite literal -) - -type Sig struct { - name string - pkg *Pkg - isym *Sym - tsym *Sym - type_ *Type - mtype *Type - offset int32 -} - // note this is the runtime representation // of the compilers arrays. // @@ -218,13 +126,6 @@ var sizeof_Array int // runtime sizeof(Array) // } String; var sizeof_String int // runtime sizeof(String) -// lexlineno is the line number _after_ the most recently read rune. -// In particular, it's advanced (or rewound) as newlines are read (or unread). -var lexlineno int32 - -// lineno is the line number at the start of the most recently lexed token. -var lineno int32 - var pragcgobuf string var infile string @@ -245,10 +146,6 @@ var safemode int var nolocalimports int -var lexbuf bytes.Buffer -var strbuf bytes.Buffer -var litbuf string // LLITERAL value for use in syntax error messages - var Debug [256]int var debugstr string @@ -324,8 +221,6 @@ var maxfltval [NTYPE]*Mpflt var xtop []*Node -var externdcl []*Node - var exportlist []*Node var importlist []*Node // imported functions and methods with inlinable bodies @@ -350,10 +245,6 @@ var Stksize int64 // stack size for current frame var stkptrsize int64 // prefix of stack containing pointers -var blockgen int32 // max block number - -var block int32 // current block number - var hasdefer bool // flag that curfn has defer statement var Curfn *Node @@ -410,34 +301,6 @@ var nodfp *Node var Disable_checknil int -type Flow struct { - Prog *obj.Prog // actual instruction - P1 *Flow // predecessors of this instruction: p1, - P2 *Flow // and then p2 linked though p2link. - P2link *Flow - S1 *Flow // successors of this instruction (at most two: s1 and s2). - S2 *Flow - Link *Flow // next instruction in function code - - Active int32 // usable by client - - Id int32 // sequence number in flow graph - Rpo int32 // reverse post ordering - Loop uint16 // x5 for every loop - Refset bool // diagnostic generated - - Data interface{} // for use by client -} - -type Graph struct { - Start *Flow - Num int - - // After calling flowrpo, rpo lists the flow nodes in reverse postorder, - // and each non-dead Flow node f has g->rpo[f->rpo] == f. - Rpo []*Flow -} - // interface to back end const ( diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index 6f1331ca89..2dbbd9276b 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -6,6 +6,7 @@ package gc import ( "bufio" + "bytes" "cmd/internal/obj" "fmt" "io" @@ -20,6 +21,17 @@ const ( BOM = 0xFEFF ) +// lexlineno is the line number _after_ the most recently read rune. +// In particular, it's advanced (or rewound) as newlines are read (or unread). +var lexlineno int32 + +// lineno is the line number at the start of the most recently lexed token. +var lineno int32 + +var lexbuf bytes.Buffer +var strbuf bytes.Buffer +var litbuf string // LLITERAL value for use in syntax error messages + func isSpace(c rune) bool { return c == ' ' || c == '\t' || c == '\n' || c == '\r' } diff --git a/src/cmd/compile/internal/gc/popt.go b/src/cmd/compile/internal/gc/popt.go index 41f8ba9fcc..001a715f7b 100644 --- a/src/cmd/compile/internal/gc/popt.go +++ b/src/cmd/compile/internal/gc/popt.go @@ -235,6 +235,34 @@ func fixjmp(firstp *obj.Prog) { // for every f.Data field, for use by the client. // If newData is nil, f.Data will be nil. +type Graph struct { + Start *Flow + Num int + + // After calling flowrpo, rpo lists the flow nodes in reverse postorder, + // and each non-dead Flow node f has g->rpo[f->rpo] == f. + Rpo []*Flow +} + +type Flow struct { + Prog *obj.Prog // actual instruction + P1 *Flow // predecessors of this instruction: p1, + P2 *Flow // and then p2 linked though p2link. + P2link *Flow + S1 *Flow // successors of this instruction (at most two: s1 and s2). + S2 *Flow + Link *Flow // next instruction in function code + + Active int32 // usable by client + + Id int32 // sequence number in flow graph + Rpo int32 // reverse post ordering + Loop uint16 // x5 for every loop + Refset bool // diagnostic generated + + Data interface{} // for use by client +} + var flowmark int // MaxFlowProg is the maximum size program (counted in instructions) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 11bcd4cdc6..c069b35787 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -22,6 +22,16 @@ type itabEntry struct { var signatlist []*Node var itabs []itabEntry +type Sig struct { + name string + pkg *Pkg + isym *Sym + tsym *Sym + type_ *Type + mtype *Type + offset int32 +} + // byMethodNameAndPackagePath sorts method signatures by name, then package path. type byMethodNameAndPackagePath []*Sig diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index b89c5dbf22..05e30df271 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -75,6 +75,20 @@ const ( dddBound = -100 // arrays declared as [...]T start life with Bound=dddBound ) +// ChanDir is whether a channel can send, receive, or both. +type ChanDir uint8 + +func (c ChanDir) CanRecv() bool { return c&Crecv != 0 } +func (c ChanDir) CanSend() bool { return c&Csend != 0 } + +const ( + // types of channel + // must match ../../../../reflect/type.go:/ChanDir + Crecv ChanDir = 1 << 0 + Csend ChanDir = 1 << 1 + Cboth ChanDir = Crecv | Csend +) + // Types stores pointers to predeclared named types. // // It also stores pointers to several special types: diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 688936e926..d21552d180 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -11,6 +11,20 @@ import ( "strings" ) +const ( + Etop = 1 << 1 // evaluated at statement level + Erv = 1 << 2 // evaluated in value context + Etype = 1 << 3 + Ecall = 1 << 4 // call-only expressions are ok + Efnstruct = 1 << 5 // multivalue function returns are ok + Eiota = 1 << 6 // iota is ok + Easgn = 1 << 7 // assigning to expression + Eindir = 1 << 8 // indirecting through expression + Eaddr = 1 << 9 // taking address of expression + Eproc = 1 << 10 // inside a go statement + Ecomplit = 1 << 11 // type in composite literal +) + // type check the whole tree of an expression. // calculates expression types. // evaluates compile time constants. -- cgit v1.3 From fda831ed3f904c659fe41f253f75fe76528a28ee Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Tue, 5 Apr 2016 16:44:07 -0700 Subject: cmd/compile: encapsulate reads of gc.Type.Funarg Changes generated with eg and then manually checked and in some cases simplified. Passes toolstash -cmp. Change-Id: I2119f37f003368ce1884d2863b406d6ffbfe38c7 Reviewed-on: https://go-review.googlesource.com/21563 Reviewed-by: Brad Fitzpatrick Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/align.go | 4 ++-- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/esc.go | 2 +- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 2 +- src/cmd/compile/internal/gc/gsubr.go | 2 +- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/type.go | 7 ++++++- src/cmd/compile/internal/gc/typecheck.go | 8 ++++---- src/cmd/compile/internal/gc/walk.go | 2 +- 10 files changed, 19 insertions(+), 14 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index b7ed9f19b9..9d5c3a550c 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -263,7 +263,7 @@ func dowidth(t *Type) { } case TSTRUCT: - if t.Funarg { + if t.IsFuncArgStruct() { Fatalf("dowidth fn struct %v", t) } w = widstruct(t, t, 0, 1) @@ -335,7 +335,7 @@ func checkwidth(t *Type) { // function arg structs should not be checked // outside of the enclosing function. - if t.Funarg { + if t.IsFuncArgStruct() { Fatalf("checkwidth %v", t) } diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 8968ce8924..f88afd2488 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -742,7 +742,7 @@ func basetypeName(t *Type) string { } func (p *exporter) paramList(params *Type, numbered bool) { - if !params.IsStruct() || !params.Funarg { + if !params.IsFuncArgStruct() { Fatalf("exporter: parameter list expected") } diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 9b8f134178..d7a63668a6 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -1435,7 +1435,7 @@ func esccall(e *EscState, n *Node, up *Node) { ll := n.List if n.List.Len() == 1 { a := n.List.First() - if a.Type.IsStruct() && a.Type.Funarg { // f(g()). + if a.Type.IsFuncArgStruct() { // f(g()) ll = e.nodeEscState(a).Escretval } } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 9fc6e56275..6de7da0667 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -592,7 +592,7 @@ func dumpasmhdr() { case OTYPE: t := n.Type - if !t.IsStruct() || t.Map != nil || t.Funarg { + if !t.IsStruct() || t.Map != nil || t.IsFuncArgStruct() { break } fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width)) diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index ab9bad3c2a..27ccdfbdcf 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -690,7 +690,7 @@ func typefmt(t *Type, flag FmtFlag) string { } var buf bytes.Buffer - if t.Funarg { + if t.IsFuncArgStruct() { buf.WriteString("(") var flag1 FmtFlag if fmtmode == FTypeId || fmtmode == FErr { // no argument names on function signature, and no "noescape"/"nosplit" tags diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 353d90f593..a2fa5f8b31 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -541,7 +541,7 @@ func nodarg(t interface{}, fp int) *Node { switch t := t.(type) { case *Type: // entire argument struct, not just one arg - if !t.IsStruct() || !t.Funarg { + if !t.IsFuncArgStruct() { Fatalf("nodarg: bad type %v", t) } n = Nod(ONAME, nil, nil) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 8410a236cd..3b83e3bcc0 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -324,7 +324,7 @@ func ismulticall(l Nodes) bool { // Copyret emits t1, t2, ... = n, where n is a function call, // and then returns the list t1, t2, .... func copyret(n *Node, order *Order) []*Node { - if !n.Type.IsStruct() || !n.Type.Funarg { + if !n.Type.IsFuncArgStruct() { Fatalf("copyret %v %d", n.Type, n.Left.Type.Results().NumFields()) } diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index 05e30df271..e04cfcda63 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -127,7 +127,7 @@ type Type struct { Chan ChanDir Trecur uint8 // to detect loops Printed bool - Funarg bool // on TSTRUCT and TFIELD + Funarg bool // TSTRUCT only: whether this struct represents function parameters Local bool // created in this file Deferwidth bool Broke bool // broken type definition. @@ -566,6 +566,11 @@ func (t *Type) SetNname(n *Node) { t.nname = n } +// IsFuncArgStruct reports whether t is a struct representing function parameters. +func (t *Type) IsFuncArgStruct() bool { + return t.Etype == TSTRUCT && t.Funarg +} + func (t *Type) Methods() *Fields { // TODO(mdempsky): Validate t? return &t.methods diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index d21552d180..db74a0d246 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1607,7 +1607,7 @@ OpSwitch: // Unpack multiple-return result before type-checking. var funarg *Type - if t.IsStruct() && t.Funarg { + if t.IsFuncArgStruct() { funarg = t t = t.Field(0).Type } @@ -2159,7 +2159,7 @@ OpSwitch: } t := n.Type - if t != nil && !t.Funarg && n.Op != OTYPE { + if t != nil && !t.IsFuncArgStruct() && n.Op != OTYPE { switch t.Etype { case TFUNC, // might have TANY; wait until its called TANY, @@ -2611,7 +2611,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl Nodes, desc if nl.Len() == 1 { n = nl.First() if n.Type != nil { - if n.Type.IsStruct() && n.Type.Funarg { + if n.Type.IsFuncArgStruct() { if !hasddd(tstruct) { n1 := tstruct.NumFields() n2 := n.Type.NumFields() @@ -3359,7 +3359,7 @@ func typecheckas2(n *Node) { } switch r.Op { case OCALLMETH, OCALLINTER, OCALLFUNC: - if !r.Type.IsStruct() || !r.Type.Funarg { + if !r.Type.IsFuncArgStruct() { break } cr = r.Type.NumFields() diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index b7edae5af4..392dae0fa9 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1783,7 +1783,7 @@ func ascompatte(op Op, call *Node, isddd bool, nl *Type, lr []*Node, fp int, ini var nn []*Node // f(g()) where g has multiple return values - if r != nil && len(lr) <= 1 && r.Type.IsStruct() && r.Type.Funarg { + if r != nil && len(lr) <= 1 && r.Type.IsFuncArgStruct() { // optimization - can do block copy if eqtypenoname(r.Type, nl) { arg := nodarg(nl, fp) -- cgit v1.3 From 309144b7f1090cbc7c3a90eb252d20a939caf398 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 1 Apr 2016 11:05:30 -0700 Subject: cmd/compile: fix x=x assignments No point in doing anything for x=x assignments. In addition, skipping these assignments prevents generating: VARDEF x COPY x -> x which is bad because x is incorrectly considered dead before the vardef. Fixes #14904 Change-Id: I6817055ec20bcc34a9648617e0439505ee355f82 Reviewed-on: https://go-review.googlesource.com/21470 Reviewed-by: Brad Fitzpatrick Reviewed-by: Dave Cheney --- src/cmd/compile/internal/gc/ssa.go | 11 +++ src/cmd/compile/internal/gc/ssa_test.go | 2 + .../compile/internal/gc/testdata/namedReturn.go | 101 +++++++++++++++++++++ test/live_ssa.go | 13 ++- 4 files changed, 120 insertions(+), 7 deletions(-) create mode 100644 src/cmd/compile/internal/gc/testdata/namedReturn.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 359f4b22a2..1c2e528384 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -661,6 +661,17 @@ func (s *state) stmt(n *Node) { return } + if n.Left == n.Right && n.Left.Op == ONAME { + // An x=x assignment. No point in doing anything + // here. In addition, skipping this assignment + // prevents generating: + // VARDEF x + // COPY x -> x + // which is bad because x is incorrectly considered + // dead before the vardef. See issue #14904. + return + } + var t *Type if n.Right != nil { t = n.Right.Type diff --git a/src/cmd/compile/internal/gc/ssa_test.go b/src/cmd/compile/internal/gc/ssa_test.go index 59a240237b..0fb0f17778 100644 --- a/src/cmd/compile/internal/gc/ssa_test.go +++ b/src/cmd/compile/internal/gc/ssa_test.go @@ -99,3 +99,5 @@ func TestUnsafe(t *testing.T) { runTest(t, "unsafe_ssa.go") } func TestPhi(t *testing.T) { runTest(t, "phi_ssa.go") } func TestSlice(t *testing.T) { runTest(t, "slice.go") } + +func TestNamedReturn(t *testing.T) { runTest(t, "namedReturn.go") } diff --git a/src/cmd/compile/internal/gc/testdata/namedReturn.go b/src/cmd/compile/internal/gc/testdata/namedReturn.go new file mode 100644 index 0000000000..dafb5d719f --- /dev/null +++ b/src/cmd/compile/internal/gc/testdata/namedReturn.go @@ -0,0 +1,101 @@ +// run + +// This test makes sure that naming named +// return variables in a return statement works. +// See issue #14904. + +package main + +import ( + "fmt" + "runtime" +) + +// Our heap-allocated object that will be GC'd incorrectly. +// Note that we always check the second word because that's +// where 0xdeaddeaddeaddead is written. +type B [4]int + +// small (SSAable) array +type T1 [3]*B + +//go:noinline +func f1() (t T1) { + t[0] = &B{91, 92, 93, 94} + runtime.GC() + return t +} + +// large (non-SSAable) array +type T2 [8]*B + +//go:noinline +func f2() (t T2) { + t[0] = &B{91, 92, 93, 94} + runtime.GC() + return t +} + +// small (SSAable) struct +type T3 struct { + a, b, c *B +} + +//go:noinline +func f3() (t T3) { + t.a = &B{91, 92, 93, 94} + runtime.GC() + return t +} + +// large (non-SSAable) struct +type T4 struct { + a, b, c, d, e, f *B +} + +//go:noinline +func f4() (t T4) { + t.a = &B{91, 92, 93, 94} + runtime.GC() + return t +} + +var sink *B + +func f5() int { + b := &B{91, 92, 93, 94} + t := T4{b, nil, nil, nil, nil, nil} + sink = b // make sure b is heap allocated ... + sink = nil // ... but not live + runtime.GC() + t = t + return t.a[1] +} + +func main() { + failed := false + + if v := f1()[0][1]; v != 92 { + fmt.Printf("f1()[0][1]=%d, want 92\n", v) + failed = true + } + if v := f2()[0][1]; v != 92 { + fmt.Printf("f2()[0][1]=%d, want 92\n", v) + failed = true + } + if v := f3().a[1]; v != 92 { + fmt.Printf("f3().a[1]=%d, want 92\n", v) + failed = true + } + if v := f4().a[1]; v != 92 { + fmt.Printf("f4().a[1]=%d, want 92\n", v) + failed = true + } + if v := f5(); v != 92 { + fmt.Printf("f5()=%d, want 92\n", v) + failed = true + } + if failed { + panic("bad") + } +} diff --git a/test/live_ssa.go b/test/live_ssa.go index fe2541395f..fae0a2b82a 100644 --- a/test/live_ssa.go +++ b/test/live_ssa.go @@ -606,13 +606,12 @@ func f39a() (x []int) { return } -// TODO: Reenable after #14904 is fixed. -//func f39b() (x [10]*int) { -// x = [10]*int{} -// x[0] = new(int) // E.R.R.O.R. "live at call to newobject: x$" -// printnl() // E.R.R.O.R. "live at call to printnl: x$" -// return x -//} +func f39b() (x [10]*int) { + x = [10]*int{} + x[0] = new(int) // ERROR "live at call to newobject: x$" + printnl() // ERROR "live at call to printnl: x$" + return x +} func f39c() (x [10]*int) { x = [10]*int{} -- cgit v1.3 From f38f43d029de16f21f9102226d5c24684fb0ea25 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Fri, 1 Apr 2016 20:11:30 -0700 Subject: cmd/compile: shrink gc.Type in half MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Many of Type's fields are etype-specific. This CL organizes them into their own auxiliary types, duplicating a few fields as necessary, and adds an Extra field to hold them. It also sorts the remaining fields for better struct packing. It also improves documentation for most fields. This reduces the size of Type at the cost of some extra allocations. There's no CPU impact; memory impact below. It also makes the natural structure of Type clearer. Passes toolstash -cmp on all architectures. Ideas for future work in this vein: (1) Width and Align probably only need to be stored for Struct and Array types. The refactoring to accomplish this would hopefully also eliminate TFUNCARGS and TCHANARGS entirely. (2) Maplineno is sparsely used and could probably better be stored in a separate map[*Type]int32, with mapqueue updated to store both a Node and a line number. (3) The Printed field may be removable once the old (non-binary) importer/exported has been removed. (4) StructType's fields field could be changed from *[]*Field to []*Field, which would remove a common allocation. (5) I believe that Type.Nod can be moved to ForwardType. Separate CL. name old alloc/op new alloc/op delta Template 57.9MB ± 0% 55.9MB ± 0% -3.43% (p=0.000 n=50+50) Unicode 38.3MB ± 0% 37.8MB ± 0% -1.39% (p=0.000 n=50+50) GoTypes 185MB ± 0% 180MB ± 0% -2.56% (p=0.000 n=50+50) Compiler 824MB ± 0% 806MB ± 0% -2.19% (p=0.000 n=50+50) name old allocs/op new allocs/op delta Template 486k ± 0% 497k ± 0% +2.25% (p=0.000 n=50+50) Unicode 377k ± 0% 379k ± 0% +0.55% (p=0.000 n=50+50) GoTypes 1.39M ± 0% 1.42M ± 0% +1.63% (p=0.000 n=50+50) Compiler 5.52M ± 0% 5.57M ± 0% +0.84% (p=0.000 n=47+50) Change-Id: I828488eeb74902b013d5ae4cf844de0b6c0dfc87 Reviewed-on: https://go-review.googlesource.com/21611 Reviewed-by: Matthew Dempsky Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/align.go | 12 +- src/cmd/compile/internal/gc/bexport.go | 4 +- src/cmd/compile/internal/gc/bimport.go | 28 +- src/cmd/compile/internal/gc/dcl.go | 10 +- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 17 +- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/pgen_test.go | 20 +- src/cmd/compile/internal/gc/reflect.go | 90 +++--- src/cmd/compile/internal/gc/sizeof_test.go | 16 +- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/gc/type.go | 487 ++++++++++++++++++++++------- src/cmd/compile/internal/gc/typecheck.go | 17 +- src/cmd/compile/internal/gc/universe.go | 6 +- src/cmd/compile/internal/gc/walk.go | 2 +- 15 files changed, 490 insertions(+), 225 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index 9d5c3a550c..e43ed7b225 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -198,11 +198,11 @@ func dowidth(t *Type) { // make fake type to check later to // trigger channel argument check. - t1 := typWrapper(TCHANARGS, t) + t1 := typChanArgs(t) checkwidth(t1) case TCHANARGS: - t1 := t.Wrapped() + t1 := t.ChanArgs() dowidth(t1) // just in case if t1.Elem().Width >= 1<<16 { Yyerror("channel element type too large (>64kB)") @@ -271,18 +271,18 @@ func dowidth(t *Type) { // make fake type to check later to // trigger function argument computation. case TFUNC: - t1 := typWrapper(TFUNCARGS, t) + t1 := typFuncArgs(t) checkwidth(t1) w = int64(Widthptr) // width of func type is pointer // function is 3 cated structures; // compute their widths as side-effect. case TFUNCARGS: - t1 := t.Wrapped() + t1 := t.FuncArgs() w = widstruct(t1, t1.Recvs(), 0, 0) w = widstruct(t1, t1.Params(), w, Widthreg) w = widstruct(t1, t1.Results(), w, Widthreg) - t1.Argwid = w + t1.Extra.(*FuncType).Argwid = w if w%int64(Widthreg) != 0 { Warn("bad type %v %d\n", t1, w) } @@ -386,7 +386,7 @@ func Argsize(t *Type) int { } } - w = (w + int64(Widthptr) - 1) &^ (int64(Widthptr) - 1) + w = Rnd(w, int64(Widthptr)) if int64(int(w)) != w { Fatalf("argsize too big") } diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index f88afd2488..8dcf97b31d 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -602,7 +602,7 @@ func (p *exporter) typ(t *Type) { case TDDDFIELD: // see p.param use of TDDDFIELD p.tag(dddTag) - p.typ(t.Wrapped()) + p.typ(t.DDDField()) case TSTRUCT: p.tag(structTag) @@ -768,7 +768,7 @@ func (p *exporter) param(q *Field, n int, numbered bool) { t := q.Type if q.Isddd { // create a fake type to encode ... just for the p.typ call - t = typWrapper(TDDDFIELD, t.Elem()) + t = typDDDField(t.Elem()) } p.typ(t) if n > 0 { diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 8c53372b80..7ad4d9dbb0 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -359,16 +359,20 @@ func (p *importer) typ() *Type { case arrayTag, sliceTag: t = p.newtyp(TARRAY) + var bound int64 if i == arrayTag { - t.SetNumElem(p.int64()) + bound = p.int64() + } + elem := p.typ() + if i == arrayTag { + t.Extra = &ArrayType{Elem: elem, Bound: bound} } else { - t.SetNumElem(sliceBound) + t.Extra = SliceType{Elem: elem} } - t.Type = p.typ() case dddTag: t = p.newtyp(TDDDFIELD) - t.Type = p.typ() + t.Extra = DDDFieldType{T: p.typ()} case structTag: t = p.newtyp(TSTRUCT) @@ -376,7 +380,7 @@ func (p *importer) typ() *Type { case pointerTag: t = p.newtyp(Tptr) - t.Type = p.typ() + t.Extra = PtrType{Elem: p.typ()} case signatureTag: t = p.newtyp(TFUNC) @@ -393,13 +397,15 @@ func (p *importer) typ() *Type { case mapTag: t = p.newtyp(TMAP) - t.Down = p.typ() // key - t.Type = p.typ() // val + mt := t.MapType() + mt.Key = p.typ() + mt.Val = p.typ() case chanTag: t = p.newtyp(TCHAN) - t.Chan = ChanDir(p.int()) - t.Type = p.typ() + ct := t.ChanType() + ct.Dir = ChanDir(p.int()) + ct.Elem = p.typ() default: Fatalf("importer: unexpected type (tag = %d)", i) @@ -444,7 +450,7 @@ func (p *importer) field() *Node { // anonymous field - typ must be T or *T and T must be a type name s := typ.Sym if s == nil && typ.IsPtr() { - s = typ.Type.Sym // deref + s = typ.Elem().Sym // deref } pkg := importpkg if sym != nil { @@ -531,7 +537,7 @@ func (p *importer) param(named bool) *Node { isddd := false if typ.Etype == TDDDFIELD { // TDDDFIELD indicates wrapped ... slice type - typ = typSlice(typ.Wrapped()) + typ = typSlice(typ.DDDField()) isddd = true } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index fb81545a46..c652c65962 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -743,8 +743,8 @@ func checkembeddedtype(t *Type) { if t.IsPtr() { Yyerror("embedded type cannot be a pointer") - } else if t.Etype == TFORW && t.Embedlineno == 0 { - t.Embedlineno = lineno + } else if t.Etype == TFORW && t.ForwardType().Embedlineno == 0 { + t.ForwardType().Embedlineno = lineno } } @@ -855,7 +855,7 @@ func tostruct0(t *Type, l []*Node) { func tofunargs(l []*Node) *Type { t := typ(TSTRUCT) - t.Funarg = true + t.StructType().Funarg = true fields := make([]*Field, len(l)) for i, n := range l { @@ -1061,11 +1061,11 @@ func functype0(t *Type, this *Node, in, out []*Node) { t.Broke = true } - t.Outnamed = false + t.FuncType().Outnamed = false if len(out) > 0 && out[0].Left != nil && out[0].Left.Orig != nil { s := out[0].Left.Orig.Sym if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result - t.Outnamed = true + t.FuncType().Outnamed = true } } } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 6de7da0667..17311cf6af 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -592,7 +592,7 @@ func dumpasmhdr() { case OTYPE: t := n.Type - if !t.IsStruct() || t.Map != nil || t.IsFuncArgStruct() { + if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() { break } fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width)) diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 27ccdfbdcf..5c5503619f 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -671,19 +671,20 @@ func typefmt(t *Type, flag FmtFlag) string { return buf.String() case TSTRUCT: - if t.Map != nil { + if m := t.StructType().Map; m != nil { + mt := m.MapType() // Format the bucket struct for map[x]y as map.bucket[x]y. // This avoids a recursive print that generates very long names. - if t.Map.Bucket == t { - return "map.bucket[" + t.Map.Key().String() + "]" + t.Map.Val().String() + if mt.Bucket == t { + return "map.bucket[" + m.Key().String() + "]" + m.Val().String() } - if t.Map.Hmap == t { - return "map.hdr[" + t.Map.Key().String() + "]" + t.Map.Val().String() + if mt.Hmap == t { + return "map.hdr[" + m.Key().String() + "]" + m.Val().String() } - if t.Map.Hiter == t { - return "map.iter[" + t.Map.Key().String() + "]" + t.Map.Val().String() + if mt.Hiter == t { + return "map.iter[" + m.Key().String() + "]" + m.Val().String() } Yyerror("unknown internal map type") @@ -735,7 +736,7 @@ func typefmt(t *Type, flag FmtFlag) string { if fmtmode == FExp { Fatalf("cannot use TDDDFIELD with old exporter") } - return fmt.Sprintf("%v <%v> %v", Econv(t.Etype), t.Sym, t.Wrapped()) + return fmt.Sprintf("%v <%v> %v", Econv(t.Etype), t.Sym, t.DDDField()) } if fmtmode == FExp { diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index efe10a419c..63f7bf825e 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -375,7 +375,7 @@ func compile(fn *Node) { // set up domain for labels clearlabels() - if Curfn.Type.Outnamed { + if Curfn.Type.FuncType().Outnamed { // add clearing of the output parameters for _, t := range Curfn.Type.Results().Fields().Slice() { if t.Nname != nil { diff --git a/src/cmd/compile/internal/gc/pgen_test.go b/src/cmd/compile/internal/gc/pgen_test.go index fcb8bfa0c2..44dc1db12e 100644 --- a/src/cmd/compile/internal/gc/pgen_test.go +++ b/src/cmd/compile/internal/gc/pgen_test.go @@ -10,6 +10,14 @@ import ( "testing" ) +func typeWithoutPointers() *Type { + return &Type{Etype: TSTRUCT, Extra: &StructType{Haspointers: 1}} // haspointers -> false +} + +func typeWithPointers() *Type { + return &Type{Etype: TSTRUCT, Extra: &StructType{Haspointers: 2}} // haspointers -> true +} + // Test all code paths for cmpstackvarlt. func TestCmpstackvar(t *testing.T) { testdata := []struct { @@ -62,13 +70,13 @@ func TestCmpstackvar(t *testing.T) { false, }, { - Node{Class: PAUTO, Type: &Type{Haspointers: 1}}, // haspointers -> false - Node{Class: PAUTO, Type: &Type{Haspointers: 2}}, // haspointers -> true + Node{Class: PAUTO, Type: typeWithoutPointers()}, + Node{Class: PAUTO, Type: typeWithPointers()}, false, }, { - Node{Class: PAUTO, Type: &Type{Haspointers: 2}}, // haspointers -> true - Node{Class: PAUTO, Type: &Type{Haspointers: 1}}, // haspointers -> false + Node{Class: PAUTO, Type: typeWithPointers()}, + Node{Class: PAUTO, Type: typeWithoutPointers()}, true, }, { @@ -127,7 +135,7 @@ func TestStackvarSort(t *testing.T) { {Class: PFUNC, Xoffset: 10, Type: &Type{}, Name: &Name{}, Sym: &Sym{}}, {Class: PFUNC, Xoffset: 20, Type: &Type{}, Name: &Name{}, Sym: &Sym{}}, {Class: PAUTO, Used: true, Type: &Type{}, Name: &Name{}, Sym: &Sym{}}, - {Class: PAUTO, Type: &Type{Haspointers: 1}, Name: &Name{}, Sym: &Sym{}}, // haspointers -> false + {Class: PAUTO, Type: typeWithoutPointers(), Name: &Name{}, Sym: &Sym{}}, {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}}, {Class: PAUTO, Type: &Type{}, Name: &Name{Needzero: true}, Sym: &Sym{}}, {Class: PAUTO, Type: &Type{Width: 1}, Name: &Name{}, Sym: &Sym{}}, @@ -148,7 +156,7 @@ func TestStackvarSort(t *testing.T) { {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}}, {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "abc"}}, {Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{Name: "xyz"}}, - {Class: PAUTO, Type: &Type{Haspointers: 1}, Name: &Name{}, Sym: &Sym{}}, // haspointers -> false + {Class: PAUTO, Type: typeWithoutPointers(), Name: &Name{}, Sym: &Sym{}}, } // haspointers updates Type.Haspointers as a side effect, so // exercise this function on all inputs so that reflect.DeepEqual diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index c069b35787..df9ef27b7a 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -86,8 +86,8 @@ func makefield(name string, t *Type) *Field { } func mapbucket(t *Type) *Type { - if t.Bucket != nil { - return t.Bucket + if t.MapType().Bucket != nil { + return t.MapType().Bucket } bucket := typ(TSTRUCT) @@ -157,17 +157,17 @@ func mapbucket(t *Type) *Type { Yyerror("bad math in mapbucket for %v", t) } - t.Bucket = bucket + t.MapType().Bucket = bucket - bucket.Map = t + bucket.StructType().Map = t return bucket } // Builds a type representing a Hmap structure for the given map type. // Make sure this stays in sync with ../../../../runtime/hashmap.go! func hmap(t *Type) *Type { - if t.Hmap != nil { - return t.Hmap + if t.MapType().Hmap != nil { + return t.MapType().Hmap } bucket := mapbucket(t) @@ -186,14 +186,14 @@ func hmap(t *Type) *Type { h.Local = t.Local h.SetFields(field[:]) dowidth(h) - t.Hmap = h - h.Map = t + t.MapType().Hmap = h + h.StructType().Map = t return h } func hiter(t *Type) *Type { - if t.Hiter != nil { - return t.Hiter + if t.MapType().Hiter != nil { + return t.MapType().Hiter } // build a struct: @@ -234,8 +234,8 @@ func hiter(t *Type) *Type { if i.Width != int64(12*Widthptr) { Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr) } - t.Hiter = i - i.Map = t + t.MapType().Hiter = i + i.StructType().Map = t return i } @@ -664,67 +664,47 @@ var kinds = []int{ } func haspointers(t *Type) bool { - if t.Haspointers != 0 { - return t.Haspointers-1 != 0 - } - - var ret bool switch t.Etype { - case TINT, - TUINT, - TINT8, - TUINT8, - TINT16, - TUINT16, - TINT32, - TUINT32, - TINT64, - TUINT64, - TUINTPTR, - TFLOAT32, - TFLOAT64, - TCOMPLEX64, - TCOMPLEX128, - TBOOL: - ret = false + case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, + TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL: + return false case TARRAY: if t.IsSlice() { - ret = true - break + return true } - if t.NumElem() == 0 { // empty array - ret = false - break + at := t.Extra.(*ArrayType) + if at.Haspointers != 0 { + return at.Haspointers-1 != 0 } - ret = haspointers(t.Elem()) + ret := false + if t.NumElem() != 0 { // non-empty array + ret = haspointers(t.Elem()) + } + + at.Haspointers = 1 + uint8(obj.Bool2int(ret)) + return ret case TSTRUCT: - ret = false + st := t.StructType() + if st.Haspointers != 0 { + return st.Haspointers-1 != 0 + } + + ret := false for _, t1 := range t.Fields().Slice() { if haspointers(t1.Type) { ret = true break } } - - case TSTRING, - TPTR32, - TPTR64, - TUNSAFEPTR, - TINTER, - TCHAN, - TMAP, - TFUNC: - fallthrough - default: - ret = true + st.Haspointers = 1 + uint8(obj.Bool2int(ret)) + return ret } - t.Haspointers = 1 + uint8(obj.Bool2int(ret)) - return ret + return true } // typeptrdata returns the length in bytes of the prefix of t diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go index 11c0f419da..8b0dfe538e 100644 --- a/src/cmd/compile/internal/gc/sizeof_test.go +++ b/src/cmd/compile/internal/gc/sizeof_test.go @@ -27,7 +27,21 @@ func TestSizeof(t *testing.T) { {Name{}, 52, 80}, {Node{}, 92, 144}, {Sym{}, 60, 112}, - {Type{}, 116, 184}, + {Type{}, 56, 88}, + {MapType{}, 20, 40}, + {ForwardType{}, 16, 32}, + {FuncType{}, 28, 48}, + {StructType{}, 12, 24}, + {InterType{}, 4, 8}, + {ChanType{}, 8, 16}, + {ArrayType{}, 16, 24}, + {InterMethType{}, 4, 8}, + {DDDFieldType{}, 4, 8}, + {FuncArgsType{}, 4, 8}, + {ChanArgsType{}, 4, 8}, + {PtrType{}, 4, 8}, + {SliceType{}, 4, 8}, + {DDDArrayType{}, 4, 8}, } for _, tt := range tests { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 1c2e528384..127a7c4698 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4218,7 +4218,7 @@ func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.Local func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { n := name.N.(*Node) - ptrType := Ptrto(n.Type.Type) + ptrType := Ptrto(n.Type.Elem()) lenType := Types[TINT] if n.Class == PAUTO && !n.Addrtaken { // Split this slice up into three separate variables. diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index e04cfcda63..3d2f01ef7d 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -122,55 +122,174 @@ var ( // A Type represents a Go type. type Type struct { - Etype EType - Noalg bool - Chan ChanDir - Trecur uint8 // to detect loops - Printed bool - Funarg bool // TSTRUCT only: whether this struct represents function parameters - Local bool // created in this file - Deferwidth bool - Broke bool // broken type definition. - Align uint8 - Haspointers uint8 // 0 unknown, 1 no, 2 yes - Outnamed bool // on TFUNC - - Nod *Node // canonical OTYPE node - Orig *Type // original type (type literal or predefined type) + // Extra contains extra etype-specific fields. + // As an optimization, those etype-specific structs which contain exactly + // one pointer-shaped field are stored as values rather than pointers when possible. + // + // TMAP: *MapType + // TFORW: *ForwardType + // TFUNC: *FuncType + // TINTERMETHOD: InterMethType + // TSTRUCT: *StructType + // TINTER: *InterType + // TDDDFIELD: DDDFieldType + // TFUNCARGS: FuncArgsType + // TCHANARGS: ChanArgsType + // TCHAN: *ChanType + // TPTR32, TPTR64: PtrType + // TARRAY: *ArrayType, SliceType, or DDDArrayType + Extra interface{} + + // Width is the width of this Type in bytes. + Width int64 methods Fields allMethods Fields - Sym *Sym + Nod *Node // canonical OTYPE node + Orig *Type // original type (type literal or predefined type) + + Sym *Sym // symbol containing name, for named types Vargen int32 // unique name for OTYPE/ONAME - Lineno int32 + Lineno int32 // line at which this type was declared, implicitly or explicitly + + Maplineno int32 // first use of this type as a map key + + Etype EType // kind of type + Noalg bool // suppress hash and eq algorithm generation + Trecur uint8 // to detect loops + Printed bool // prevent duplicate export printing + Local bool // created in this file + Deferwidth bool + Broke bool // broken type definition. + Align uint8 // the required alignment of this type, in bytes +} + +// MapType contains Type fields specific to maps. +type MapType struct { + Key *Type // Key type + Val *Type // Val (elem) type + + Bucket *Type // internal struct type representing a hash bucket + Hmap *Type // internal struct type representing the Hmap (map header object) + Hiter *Type // internal struct type representing hash iterator state +} + +// MapType returns t's extra map-specific fields. +func (t *Type) MapType() *MapType { + t.wantEtype(TMAP) + return t.Extra.(*MapType) +} + +// ForwardType contains Type fields specific to forward types. +type ForwardType struct { + Copyto []*Node // where to copy the eventual value to + Embedlineno int32 // first use of this type as an embedded type +} + +// ForwardType returns t's extra forward-type-specific fields. +func (t *Type) ForwardType() *ForwardType { + t.wantEtype(TFORW) + return t.Extra.(*ForwardType) +} + +// FuncType contains Type fields specific to func types. +type FuncType struct { + Receiver *Type // function receiver + Results *Type // function results + Params *Type // function params + + Nname *Node - nname *Node + // Argwid is the total width of the function receiver, params, and results. + // It gets calculated via a temporary TFUNCARGS type. + // Note that TFUNC's Width is Widthptr. Argwid int64 - // most nodes - Type *Type // element type for TARRAY, TCHAN, TMAP, TPTRxx - Width int64 + Outnamed bool +} + +// FuncType returns t's extra func-specific fields. +func (t *Type) FuncType() *FuncType { + t.wantEtype(TFUNC) + return t.Extra.(*FuncType) +} + +// InterMethType contains Type fields specific to interface method psuedo-types. +type InterMethType struct { + Nname *Node +} - // TSTRUCT +// StructType contains Type fields specific to struct types. +type StructType struct { fields Fields - Down *Type // key type in TMAP; next struct in Funarg TSTRUCT + // Maps have three associated internal structs (see struct MapType). + // Map links such structs back to their map type. + Map *Type - // TARRAY - Bound int64 // negative is slice + Funarg bool // whether this struct represents function parameters + Haspointers uint8 // 0 unknown, 1 no, 2 yes +} + +// StructType returns t's extra struct-specific fields. +func (t *Type) StructType() *StructType { + t.wantEtype(TSTRUCT) + return t.Extra.(*StructType) +} + +// InterType contains Type fields specific to interface types. +type InterType struct { + fields Fields +} + +// PtrType contains Type fields specific to pointer types. +type PtrType struct { + Elem *Type // element type +} + +// DDDFieldType contains Type fields specific to TDDDFIELD types. +type DDDFieldType struct { + T *Type // reference to a slice type for ... args +} + +// ChanArgsType contains Type fields specific to TCHANARGS types. +type ChanArgsType struct { + T *Type // reference to a chan type whose elements need a width check +} - // TMAP - Bucket *Type // internal type representing a hash bucket - Hmap *Type // internal type representing a Hmap (map header object) - Hiter *Type // internal type representing hash iterator state - Map *Type // link from the above 3 internal types back to the map type. +// // FuncArgsType contains Type fields specific to TFUNCARGS types. +type FuncArgsType struct { + T *Type // reference to a func type whose elements need a width check +} - Maplineno int32 // first use of TFORW as map key - Embedlineno int32 // first use of TFORW as embedded type +// ChanType contains Type fields specific to channel types. +type ChanType struct { + Elem *Type // element type + Dir ChanDir // channel direction +} - // for TFORW, where to copy the eventual value to - Copyto []*Node +// ChanType returns t's extra channel-specific fields. +func (t *Type) ChanType() *ChanType { + t.wantEtype(TCHAN) + return t.Extra.(*ChanType) +} + +// ArrayType contains Type fields specific to array types with known lengths. +type ArrayType struct { + Elem *Type // element type + Bound int64 // number of elements; always >= 0; do not use with sliceBound or dddBound + Haspointers uint8 // 0 unknown, 1 no, 2 yes +} + +// SliceType contains Type fields specific to slice types. +type SliceType struct { + Elem *Type // element type +} + +// DDDArrayType contains Type fields specific to ddd array types. +type DDDArrayType struct { + Elem *Type // element type } // A Field represents a field in a struct or a method in an interface or @@ -252,38 +371,61 @@ func typ(et EType) *Type { Lineno: lineno, } t.Orig = t + // TODO(josharian): lazily initialize some of these? + switch t.Etype { + case TMAP: + t.Extra = new(MapType) + case TFORW: + t.Extra = new(ForwardType) + case TFUNC: + t.Extra = new(FuncType) + case TINTERMETH: + t.Extra = InterMethType{} + case TSTRUCT: + t.Extra = new(StructType) + case TINTER: + t.Extra = new(InterType) + case TPTR32, TPTR64: + t.Extra = PtrType{} + case TCHANARGS: + t.Extra = ChanArgsType{} + case TFUNCARGS: + t.Extra = FuncArgsType{} + case TDDDFIELD: + t.Extra = DDDFieldType{} + case TCHAN: + t.Extra = new(ChanType) + } return t } // typArray returns a new fixed-length array Type. func typArray(elem *Type, bound int64) *Type { t := typ(TARRAY) - t.Type = elem - t.Bound = bound + t.Extra = &ArrayType{Elem: elem, Bound: bound} return t } // typSlice returns a new slice Type. func typSlice(elem *Type) *Type { t := typ(TARRAY) - t.Type = elem - t.Bound = sliceBound + t.Extra = SliceType{Elem: elem} return t } // typDDDArray returns a new [...]T array Type. func typDDDArray(elem *Type) *Type { t := typ(TARRAY) - t.Type = elem - t.Bound = dddBound + t.Extra = DDDArrayType{Elem: elem} return t } // typChan returns a new chan Type with direction dir. func typChan(elem *Type, dir ChanDir) *Type { t := typ(TCHAN) - t.Type = elem - t.Chan = dir + ct := t.ChanType() + ct.Elem = elem + ct.Dir = dir return t } @@ -294,29 +436,39 @@ func typMap(k, v *Type) *Type { } t := typ(TMAP) - t.Down = k - t.Type = v + mt := t.MapType() + mt.Key = k + mt.Val = v return t } // typPtr returns a new pointer type pointing to t. func typPtr(elem *Type) *Type { t := typ(Tptr) - t.Type = elem + t.Extra = PtrType{Elem: elem} t.Width = int64(Widthptr) t.Align = uint8(Widthptr) return t } -// typWrapper returns a new wrapper psuedo-type. -func typWrapper(et EType, wrapped *Type) *Type { - switch et { - case TCHANARGS, TFUNCARGS, TDDDFIELD: - default: - Fatalf("typWrapper bad etype %s", et) - } - t := typ(et) - t.Type = wrapped +// typDDDField returns a new TDDDFIELD type for slice type s. +func typDDDField(s *Type) *Type { + t := typ(TDDDFIELD) + t.Extra = DDDFieldType{T: s} + return t +} + +// typChanArgs returns a new TCHANARGS type for channel type c. +func typChanArgs(c *Type) *Type { + t := typ(TCHANARGS) + t.Extra = ChanArgsType{T: c} + return t +} + +// typFuncArgs returns a new TFUNCARGS type for func type f. +func typFuncArgs(f *Type) *Type { + t := typ(TFUNCARGS) + t.Extra = FuncArgsType{T: f} return t } @@ -362,20 +514,43 @@ func substAny(t *Type, types *[]*Type) *Type { t = (*types)[0] *types = (*types)[1:] - case TPTR32, TPTR64, TCHAN, TARRAY: - elem := substAny(t.Type, types) - if elem != t.Type { + case TPTR32, TPTR64: + elem := substAny(t.Elem(), types) + if elem != t.Elem() { + t = t.Copy() + t.Extra = PtrType{Elem: elem} + } + + case TARRAY: + elem := substAny(t.Elem(), types) + if elem != t.Elem() { + t = t.Copy() + switch x := t.Extra.(type) { + case *ArrayType: + x.Elem = elem + case SliceType: + t.Extra = SliceType{Elem: elem} + case DDDArrayType: + t.Extra = DDDArrayType{Elem: elem} + default: + Fatalf("substAny bad array elem type %T %v", x, t) + } + } + + case TCHAN: + elem := substAny(t.Elem(), types) + if elem != t.Elem() { t = t.Copy() - t.Type = elem + t.Extra.(*ChanType).Elem = elem } case TMAP: - key := substAny(t.Down, types) - val := substAny(t.Type, types) - if key != t.Down || val != t.Type { + key := substAny(t.Key(), types) + val := substAny(t.Val(), types) + if key != t.Key() || val != t.Val() { t = t.Copy() - t.Down = key - t.Type = val + t.Extra.(*MapType).Key = key + t.Extra.(*MapType).Val = val } case TFUNC: @@ -426,6 +601,32 @@ func (t *Type) Copy() *Type { return nil } nt := *t + // copy any *T Extra fields, to avoid aliasing + switch t.Etype { + case TMAP: + x := *t.Extra.(*MapType) + nt.Extra = &x + case TFORW: + x := *t.Extra.(*ForwardType) + nt.Extra = &x + case TFUNC: + x := *t.Extra.(*FuncType) + nt.Extra = &x + case TSTRUCT: + x := *t.Extra.(*StructType) + nt.Extra = &x + case TINTER: + x := *t.Extra.(*InterType) + nt.Extra = &x + case TCHAN: + x := *t.Extra.(*ChanType) + nt.Extra = &x + case TARRAY: + if arr, ok := t.Extra.(*ArrayType); ok { + x := *arr + nt.Extra = &x + } + } // TODO(mdempsky): Find out why this is necessary and explain. if t.Orig == t { nt.Orig = &nt @@ -483,17 +684,17 @@ func (t *Type) wantEtype2(et1, et2 EType) { func (t *Type) RecvsP() **Type { t.wantEtype(TFUNC) - return &t.Type + return &t.Extra.(*FuncType).Receiver } func (t *Type) ParamsP() **Type { t.wantEtype(TFUNC) - return &t.Type.Down.Down + return &t.Extra.(*FuncType).Params } func (t *Type) ResultsP() **Type { t.wantEtype(TFUNC) - return &t.Type.Down + return &t.Extra.(*FuncType).Results } func (t *Type) Recvs() *Type { return *t.RecvsP() } @@ -524,51 +725,82 @@ var paramsResults = [2]func(*Type) *Type{ // Key returns the key type of map type t. func (t *Type) Key() *Type { t.wantEtype(TMAP) - return t.Down + return t.Extra.(*MapType).Key } // Val returns the value type of map type t. func (t *Type) Val() *Type { t.wantEtype(TMAP) - return t.Type + return t.Extra.(*MapType).Val } // Elem returns the type of elements of t. // Usable with pointers, channels, arrays, and slices. func (t *Type) Elem() *Type { switch t.Etype { - case TPTR32, TPTR64, TCHAN, TARRAY: - default: - Fatalf("Type.Elem %s", t.Etype) + case TPTR32, TPTR64: + return t.Extra.(PtrType).Elem + case TARRAY: + switch t := t.Extra.(type) { + case *ArrayType: + return t.Elem + case SliceType: + return t.Elem + case DDDArrayType: + return t.Elem + } + case TCHAN: + return t.Extra.(*ChanType).Elem } - return t.Type + Fatalf("Type.Elem %s", t.Etype) + return nil } -// Wrapped returns the type that pseudo-type t wraps. -func (t *Type) Wrapped() *Type { - switch t.Etype { - case TCHANARGS, TFUNCARGS, TDDDFIELD: - default: - Fatalf("Type.Wrapped %s", t.Etype) - } - return t.Type +// DDDField returns the slice ... type for TDDDFIELD type t. +func (t *Type) DDDField() *Type { + t.wantEtype(TDDDFIELD) + return t.Extra.(DDDFieldType).T +} + +// ChanArgs returns the channel type for TCHANARGS type t. +func (t *Type) ChanArgs() *Type { + t.wantEtype(TCHANARGS) + return t.Extra.(ChanArgsType).T +} + +// FuncArgs returns the channel type for TFUNCARGS type t. +func (t *Type) FuncArgs() *Type { + t.wantEtype(TFUNCARGS) + return t.Extra.(FuncArgsType).T } // Nname returns the associated function's nname. func (t *Type) Nname() *Node { - t.wantEtype2(TFUNC, TINTERMETH) - return t.nname + switch t.Etype { + case TFUNC: + return t.Extra.(*FuncType).Nname + case TINTERMETH: + return t.Extra.(InterMethType).Nname + } + Fatalf("Type.Nname %v %v", t.Etype, t) + return nil } // Nname sets the associated function's nname. func (t *Type) SetNname(n *Node) { - t.wantEtype2(TFUNC, TINTERMETH) - t.nname = n + switch t.Etype { + case TFUNC: + t.Extra.(*FuncType).Nname = n + case TINTERMETH: + t.Extra = InterMethType{Nname: n} + default: + Fatalf("Type.SetNname %v %v", t.Etype, t) + } } // IsFuncArgStruct reports whether t is a struct representing function parameters. func (t *Type) IsFuncArgStruct() bool { - return t.Etype == TSTRUCT && t.Funarg + return t.Etype == TSTRUCT && t.Extra.(*StructType).Funarg } func (t *Type) Methods() *Fields { @@ -582,10 +814,14 @@ func (t *Type) AllMethods() *Fields { } func (t *Type) Fields() *Fields { - if t.Etype != TSTRUCT && t.Etype != TINTER { - Fatalf("Fields: type %v does not have fields", t) + switch t.Etype { + case TSTRUCT: + return &t.Extra.(*StructType).fields + case TINTER: + return &t.Extra.(*InterType).fields } - return &t.fields + Fatalf("Fields: type %v does not have fields", t) + return nil } // Field returns the i'th field/method of struct/interface type t. @@ -608,15 +844,15 @@ func (t *Type) isDDDArray() bool { if t.Etype != TARRAY { return false } - t.checkBound() - return t.Bound == dddBound + _, ok := t.Extra.(DDDArrayType) + return ok } // ArgWidth returns the total aligned argument size for a function. // It includes the receiver, parameters, and results. func (t *Type) ArgWidth() int64 { t.wantEtype(TFUNC) - return t.Argwid + return t.Extra.(*FuncType).Argwid } func (t *Type) Size() int64 { @@ -764,20 +1000,20 @@ func (t *Type) cmp(x *Type) ssa.Cmp { // by the general code after the switch. case TSTRUCT: - if t.Map == nil { - if x.Map != nil { + if t.StructType().Map == nil { + if x.StructType().Map != nil { return ssa.CMPlt // nil < non-nil } // to the fallthrough - } else if x.Map == nil { + } else if x.StructType().Map == nil { return ssa.CMPgt // nil > non-nil - } else if t.Map.Bucket == t { + } else if t.StructType().Map.MapType().Bucket == t { // Both have non-nil Map // Special case for Maps which include a recursive type where the recursion is not broken with a named type - if x.Map.Bucket != x { + if x.StructType().Map.MapType().Bucket != x { return ssa.CMPlt // bucket maps are least } - return t.Map.cmp(x.Map) + return t.StructType().Map.cmp(x.StructType().Map) } // If t != t.Map.Bucket, fall through to general case fallthrough @@ -910,21 +1146,22 @@ func (t *Type) IsChan() bool { return t.Etype == TCHAN } -// checkBound enforces that Bound has an acceptable value. -func (t *Type) checkBound() { - if t.Bound != sliceBound && t.Bound < 0 && t.Bound != dddBound { - Fatalf("bad TARRAY bounds %d %s", t.Bound, t) - } -} - +// TODO: Remove noinline when issue 15084 is resolved. +//go:noinline func (t *Type) IsSlice() bool { - t.checkBound() - return t.Etype == TARRAY && t.Bound == sliceBound + if t.Etype != TARRAY { + return false + } + _, ok := t.Extra.(SliceType) + return ok } func (t *Type) IsArray() bool { - t.checkBound() - return t.Etype == TARRAY && t.Bound >= 0 + if t.Etype != TARRAY { + return false + } + _, ok := t.Extra.(*ArrayType) + return ok } func (t *Type) IsStruct() bool { @@ -961,24 +1198,48 @@ func (t *Type) FieldOff(i int) int64 { func (t *Type) NumElem() int64 { t.wantEtype(TARRAY) - t.checkBound() - return t.Bound + switch t := t.Extra.(type) { + case *ArrayType: + return t.Bound + case SliceType: + return sliceBound + case DDDArrayType: + return dddBound + } + Fatalf("NumElem on non-array %T %v", t.Extra, t) + return 0 } // SetNumElem sets the number of elements in an array type. // It should not be used if at all possible. // Create a new array/slice/dddArray with typX instead. -// TODO(josharian): figure out how to get rid of this. +// The only allowed uses are: +// * array -> slice as a hack to suppress extra error output +// * ddd array -> array +// TODO(josharian): figure out how to get rid of this entirely. func (t *Type) SetNumElem(n int64) { t.wantEtype(TARRAY) - t.Bound = n + switch { + case n >= 0: + if !t.isDDDArray() { + Fatalf("SetNumElem non-ddd -> array %v", t) + } + t.Extra = &ArrayType{Elem: t.Elem(), Bound: n} + case n == sliceBound: + if !t.IsArray() { + Fatalf("SetNumElem non-array -> slice %v", t) + } + t.Extra = SliceType{Elem: t.Elem()} + default: + Fatalf("SetNumElem %d %v", n, t) + } } // ChanDir returns the direction of a channel type t. // The direction will be one of Crecv, Csend, or Cboth. func (t *Type) ChanDir() ChanDir { t.wantEtype(TCHAN) - return t.Chan + return t.Extra.(*ChanType).Dir } func (t *Type) IsMemory() bool { return false } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index db74a0d246..ab7d257aac 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -2103,7 +2103,7 @@ OpSwitch: return n } - if Curfn.Type.Outnamed && n.List.Len() == 0 { + if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 { break OpSwitch } typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" }) @@ -2161,12 +2161,8 @@ OpSwitch: t := n.Type if t != nil && !t.IsFuncArgStruct() && n.Op != OTYPE { switch t.Etype { - case TFUNC, // might have TANY; wait until its called - TANY, - TFORW, - TIDEAL, - TNIL, - TBLANK: + case TFUNC, // might have TANY; wait until it's called + TANY, TFORW, TIDEAL, TNIL, TBLANK: break default: @@ -3522,13 +3518,13 @@ var mapqueue []*Node func copytype(n *Node, t *Type) { if t.Etype == TFORW { // This type isn't computed yet; when it is, update n. - t.Copyto = append(t.Copyto, n) + t.ForwardType().Copyto = append(t.ForwardType().Copyto, n) return } maplineno := n.Type.Maplineno - embedlineno := n.Type.Embedlineno - l := n.Type.Copyto + embedlineno := n.Type.ForwardType().Embedlineno + l := n.Type.ForwardType().Copyto // TODO(mdempsky): Fix Type rekinding. *n.Type = *t @@ -3544,7 +3540,6 @@ func copytype(n *Node, t *Type) { t.Nod = nil t.Printed = false t.Deferwidth = false - t.Copyto = nil // Update nodes waiting on this type. for _, n := range l { diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index c2ba9c9a93..3330fbbab2 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -359,16 +359,16 @@ func lexinit1() { // t = interface { Error() string } rcvr := typ(TSTRUCT) - rcvr.Funarg = true + rcvr.StructType().Funarg = true field := newField() field.Type = Ptrto(typ(TSTRUCT)) rcvr.SetFields([]*Field{field}) in := typ(TSTRUCT) - in.Funarg = true + in.StructType().Funarg = true out := typ(TSTRUCT) - out.Funarg = true + out.StructType().Funarg = true field = newField() field.Type = Types[TSTRING] out.SetFields([]*Field{field}) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 392dae0fa9..ff8ddea7f6 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -287,7 +287,7 @@ func walkstmt(n *Node) *Node { if n.List.Len() == 0 { break } - if (Curfn.Type.Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) { + if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) { // assign to the function out parameters, // so that reorder3 can fix up conflicts var rl []*Node -- cgit v1.3 From 04945edd40fff4d66321a4f98c1bb070b6356008 Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Mon, 4 Apr 2016 19:23:41 +0200 Subject: cmd/compile: replaces ANDQ with MOV?ZX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Where possible replace ANDQ with MOV?ZX. Takes care that we don't regress wrt bounds checking, for example [1000]int{}[i&255]. According to "Intel 64 and IA-32 Architectures Optimization Reference Manual" Section: "3.5.1.13 Zero-Latency MOV Instructions" MOV?ZX instructions have zero latency on newer processors. Updates #15105 Change-Id: I63539fdbc5812d5563aa1ebc49eca035bd307997 Reviewed-on: https://go-review.googlesource.com/21508 Reviewed-by: Айнар Гарипов Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 8 +++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 81 ++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 4ad0f883b0..b37720eb39 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -587,6 +587,11 @@ (CMPB x (MOVBconst [c])) -> (CMPBconst x [c]) (CMPB (MOVBconst [c]) x) -> (InvertFlags (CMPBconst x [c])) +// Using MOVBQZX instead of ANDQ is cheaper. +(ANDQconst [0xFF] x) -> (MOVBQZX x) +(ANDQconst [0xFFFF] x) -> (MOVWQZX x) +(ANDQconst [0xFFFFFFFF] x) -> (MOVLQZX x) + // strength reduction // Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: // 1 - addq, shlq, leaq, negq @@ -1093,6 +1098,9 @@ (CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) // Other known comparisons. +(CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT) +(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT) +(CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT) (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT) (CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 11c2de391c..a1d1e4edd9 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1838,6 +1838,42 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (ANDQconst [0xFF] x) + // cond: + // result: (MOVBQZX x) + for { + if v.AuxInt != 0xFF { + break + } + x := v.Args[0] + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + // match: (ANDQconst [0xFFFF] x) + // cond: + // result: (MOVWQZX x) + for { + if v.AuxInt != 0xFFFF { + break + } + x := v.Args[0] + v.reset(OpAMD64MOVWQZX) + v.AddArg(x) + return true + } + // match: (ANDQconst [0xFFFFFFFF] x) + // cond: + // result: (MOVLQZX x) + for { + if v.AuxInt != 0xFFFFFFFF { + break + } + x := v.Args[0] + v.reset(OpAMD64MOVLQZX) + v.AddArg(x) + return true + } // match: (ANDQconst [0] _) // cond: // result: (MOVQconst [0]) @@ -3026,6 +3062,51 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagGT_UGT) return true } + // match: (CMPQconst (MOVBQZX _) [c]) + // cond: 0xFF < c + // result: (FlagLT_ULT) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQZX { + break + } + c := v.AuxInt + if !(0xFF < c) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPQconst (MOVWQZX _) [c]) + // cond: 0xFFFF < c + // result: (FlagLT_ULT) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQZX { + break + } + c := v.AuxInt + if !(0xFFFF < c) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPQconst (MOVLQZX _) [c]) + // cond: 0xFFFFFFFF < c + // result: (FlagLT_ULT) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLQZX { + break + } + c := v.AuxInt + if !(0xFFFFFFFF < c) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } // match: (CMPQconst (ANDQconst _ [m]) [n]) // cond: 0 <= m && m < n // result: (FlagLT_ULT) -- cgit v1.3 From 007b12977aa8f3373b358361fe21802d5a8408b4 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Wed, 6 Apr 2016 14:12:48 -0700 Subject: cmd/compile: move Type.Maplineno to separate data structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Relatively few types are ever used as map keys, so tracking this separately is a net win. Passes toolstash -cmp. name old alloc/op new alloc/op delta Template 55.9MB ± 0% 55.5MB ± 0% -0.71% (p=0.000 n=10+10) Unicode 37.8MB ± 0% 37.7MB ± 0% -0.27% (p=0.000 n=10+10) GoTypes 180MB ± 0% 179MB ± 0% -0.52% (p=0.000 n=7+10) Compiler 806MB ± 0% 803MB ± 0% -0.41% (p=0.000 n=10+10) CPU and number of allocs are unchanged. Change-Id: I6d60d74a4866995a231dfed3dd5792d75d904292 Reviewed-on: https://go-review.googlesource.com/21622 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/sizeof_test.go | 2 +- src/cmd/compile/internal/gc/subr.go | 4 ++-- src/cmd/compile/internal/gc/type.go | 2 -- src/cmd/compile/internal/gc/typecheck.go | 14 +++++++++----- 4 files changed, 12 insertions(+), 10 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go index 8b0dfe538e..f2b1461bc8 100644 --- a/src/cmd/compile/internal/gc/sizeof_test.go +++ b/src/cmd/compile/internal/gc/sizeof_test.go @@ -27,7 +27,7 @@ func TestSizeof(t *testing.T) { {Name{}, 52, 80}, {Node{}, 92, 144}, {Sym{}, 60, 112}, - {Type{}, 56, 88}, + {Type{}, 52, 80}, {MapType{}, 20, 40}, {ForwardType{}, 16, 32}, {FuncType{}, 28, 48}, diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index a61b8bcd27..035bd815c2 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -390,8 +390,8 @@ func checkMapKeyType(key *Type) { // before key is fully defined, the error // will only be printed for the first one. // good enough. - if key.Maplineno == 0 { - key.Maplineno = lineno + if maplineno[key] == 0 { + maplineno[key] = lineno } } } diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index 3d2f01ef7d..eee8e0384a 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -153,8 +153,6 @@ type Type struct { Vargen int32 // unique name for OTYPE/ONAME Lineno int32 // line at which this type was declared, implicitly or explicitly - Maplineno int32 // first use of this type as a map key - Etype EType // kind of type Noalg bool // suppress hash and eq algorithm generation Trecur uint8 // to detect loops diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index ab7d257aac..a20f87d940 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3513,7 +3513,11 @@ func domethod(n *Node) { checkwidth(n.Type) } -var mapqueue []*Node +var ( + mapqueue []*Node + // maplineno tracks the line numbers at which types are first used as map keys + maplineno = map[*Type]int32{} +) func copytype(n *Node, t *Type) { if t.Etype == TFORW { @@ -3522,7 +3526,7 @@ func copytype(n *Node, t *Type) { return } - maplineno := n.Type.Maplineno + mapline := maplineno[n.Type] embedlineno := n.Type.ForwardType().Embedlineno l := n.Type.ForwardType().Copyto @@ -3559,8 +3563,8 @@ func copytype(n *Node, t *Type) { lineno = lno // Queue check for map until all the types are done settling. - if maplineno != 0 { - t.Maplineno = maplineno + if mapline != 0 { + maplineno[t] = mapline mapqueue = append(mapqueue, n) } } @@ -3609,7 +3613,7 @@ ret: } for _, n := range mapqueue { - lineno = n.Type.Maplineno + lineno = maplineno[n.Type] checkMapKeyType(n.Type) } -- cgit v1.3 From 81aacb80d55eddcb95cbe2c87392cc922e026e45 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Wed, 6 Apr 2016 15:27:30 -0700 Subject: cmd/compile, go/importer: minor cleanups Change-Id: I4ffb79d8cb08b0b44f59757fb7f0ec3ed1e4479f Reviewed-on: https://go-review.googlesource.com/21624 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 8 +++----- src/go/internal/gcimporter/bimport.go | 5 ++--- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 8dcf97b31d..092cdac2f6 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -109,8 +109,6 @@ import ( // // NOTE: This flag is the first flag to enable if importing dies because of // (suspected) format errors, and whenever a change is made to the format. -// Having debugFormat enabled increases the export data size massively (by -// several factors) - avoid running with the flag enabled in general. const debugFormat = false // default: false // TODO(gri) remove eventually @@ -515,19 +513,19 @@ func (p *exporter) typ(t *Type) { p.typIndex[t] = len(p.typIndex) // pick off named types - if sym := t.Sym; sym != nil { + if tsym := t.Sym; tsym != nil { // Predeclared types should have been found in the type map. if t.Orig == t { Fatalf("exporter: predeclared type missing from type map?") } // TODO(gri) The assertion below seems incorrect (crashes during all.bash). // we expect the respective definition to point to us - // if sym.Def.Type != t { + // if tsym.Def.Type != t { // Fatalf("exporter: type definition doesn't point to us?") // } p.tag(namedTag) - p.qualifiedName(sym) + p.qualifiedName(tsym) // write underlying type p.typ(t.Orig) diff --git a/src/go/internal/gcimporter/bimport.go b/src/go/internal/gcimporter/bimport.go index 12efb2aaf3..aa9569de52 100644 --- a/src/go/internal/gcimporter/bimport.go +++ b/src/go/internal/gcimporter/bimport.go @@ -232,8 +232,7 @@ func (p *importer) typ(parent *types.Package) types.Type { switch i { case namedTag: // read type object - name := p.string() - parent = p.pkg() + parent, name := p.qualifiedName() scope := parent.Scope() obj := scope.Lookup(name) @@ -258,7 +257,7 @@ func (p *importer) typ(parent *types.Package) types.Type { t0.SetUnderlying(p.typ(parent)) // interfaces don't have associated methods - if _, ok := t0.Underlying().(*types.Interface); ok { + if types.IsInterface(t0) { return t } -- cgit v1.3 From c6e11fe03765e3fe1fc68bd794625ca0ecd833be Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 6 Apr 2016 12:01:40 -0700 Subject: cmd: add new common architecture representation Information about CPU architectures (e.g., name, family, byte ordering, pointer and register size) is currently redundantly scattered around the source tree. Instead consolidate the basic information into a single new package cmd/internal/sys. Also, introduce new sys.I386, sys.AMD64, etc. names for the constants '8', '6', etc. and replace most uses of the latter. The notable exceptions are a couple of error messages that still refer to the old char-based toolchain names and function reltype in cmd/link. Passes toolstash/buildall. Change-Id: I8a6f0cbd49577ec1672a98addebc45f767e36461 Reviewed-on: https://go-review.googlesource.com/21623 Reviewed-by: Michael Hudson-Doyle Reviewed-by: Brad Fitzpatrick Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/asm/internal/asm/asm.go | 49 +++++----- src/cmd/asm/internal/asm/parse.go | 16 ++-- src/cmd/compile/internal/amd64/galign.go | 12 +-- src/cmd/compile/internal/arm/galign.go | 7 +- src/cmd/compile/internal/arm64/galign.go | 7 +- src/cmd/compile/internal/gc/cgen.go | 88 +++++++++--------- src/cmd/compile/internal/gc/gen.go | 3 +- src/cmd/compile/internal/gc/go.go | 5 +- src/cmd/compile/internal/gc/gsubr.go | 19 ++-- src/cmd/compile/internal/gc/main.go | 21 +++-- src/cmd/compile/internal/gc/pgen.go | 5 +- src/cmd/compile/internal/gc/plive.go | 3 +- src/cmd/compile/internal/gc/reg.go | 7 +- src/cmd/compile/internal/gc/ssa.go | 7 +- src/cmd/compile/internal/gc/walk.go | 10 +-- src/cmd/compile/internal/mips64/galign.go | 10 +-- src/cmd/compile/internal/ppc64/galign.go | 11 +-- src/cmd/compile/internal/x86/galign.go | 7 +- src/cmd/dist/buildtool.go | 1 + src/cmd/internal/obj/arm/obj5.go | 15 ++-- src/cmd/internal/obj/arm64/obj7.go | 15 ++-- src/cmd/internal/obj/data.go | 2 +- src/cmd/internal/obj/link.go | 19 ++-- src/cmd/internal/obj/mips/obj0.go | 22 ++--- src/cmd/internal/obj/objfile.go | 3 +- src/cmd/internal/obj/pcln.go | 4 +- src/cmd/internal/obj/ppc64/obj9.go | 22 ++--- src/cmd/internal/obj/s390x/objz.go | 15 ++-- src/cmd/internal/obj/sym.go | 3 +- src/cmd/internal/obj/x86/asm6.go | 2 +- src/cmd/internal/obj/x86/obj6.go | 55 +++++------- src/cmd/internal/sys/arch.go | 145 ++++++++++++++++++++++++++++++ src/cmd/link/internal/amd64/asm.go | 2 +- src/cmd/link/internal/amd64/l.go | 5 -- src/cmd/link/internal/amd64/obj.go | 11 +-- src/cmd/link/internal/arm/l.go | 2 - src/cmd/link/internal/arm/obj.go | 9 +- src/cmd/link/internal/arm64/asm.go | 2 +- src/cmd/link/internal/arm64/l.go | 2 - src/cmd/link/internal/arm64/obj.go | 9 +- src/cmd/link/internal/ld/arch.go | 97 -------------------- src/cmd/link/internal/ld/data.go | 57 ++++++------ src/cmd/link/internal/ld/deadcode.go | 3 +- src/cmd/link/internal/ld/decodesym.go | 77 ++++++++-------- src/cmd/link/internal/ld/dwarf.go | 44 ++++----- src/cmd/link/internal/ld/elf.go | 92 ++++++++++--------- src/cmd/link/internal/ld/ldelf.go | 26 +++--- src/cmd/link/internal/ld/ldmacho.go | 18 ++-- src/cmd/link/internal/ld/ldpe.go | 3 +- src/cmd/link/internal/ld/lib.go | 53 +++++------ src/cmd/link/internal/ld/link.go | 25 ++---- src/cmd/link/internal/ld/macho.go | 46 ++++------ src/cmd/link/internal/ld/pcln.go | 38 ++++---- src/cmd/link/internal/ld/pe.go | 29 +++--- src/cmd/link/internal/ld/pobj.go | 9 +- src/cmd/link/internal/ld/sym.go | 33 +++---- src/cmd/link/internal/ld/symtab.go | 5 +- src/cmd/link/internal/mips64/asm.go | 9 +- src/cmd/link/internal/mips64/l.go | 2 - src/cmd/link/internal/mips64/obj.go | 15 ++-- src/cmd/link/internal/ppc64/l.go | 2 - src/cmd/link/internal/ppc64/obj.go | 17 ++-- src/cmd/link/internal/s390x/l.go | 5 -- src/cmd/link/internal/s390x/obj.go | 9 +- src/cmd/link/internal/x86/asm.go | 4 +- src/cmd/link/internal/x86/l.go | 3 - src/cmd/link/internal/x86/obj.go | 9 +- 67 files changed, 639 insertions(+), 743 deletions(-) create mode 100644 src/cmd/internal/sys/arch.go delete mode 100644 src/cmd/link/internal/ld/arch.go (limited to 'src/cmd/compile') diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go index 950fd735c9..d674914c67 100644 --- a/src/cmd/asm/internal/asm/asm.go +++ b/src/cmd/asm/internal/asm/asm.go @@ -13,6 +13,7 @@ import ( "cmd/asm/internal/flags" "cmd/asm/internal/lex" "cmd/internal/obj" + "cmd/internal/sys" ) // TODO: configure the architecture @@ -23,14 +24,14 @@ var testOut *bytes.Buffer // Gathers output when testing. // If doLabel is set, it also defines the labels collect for this Prog. func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) { if cond != "" { - switch p.arch.Thechar { - case '5': + switch p.arch.Family { + case sys.ARM: if !arch.ARMConditionCodes(prog, cond) { p.errorf("unrecognized condition code .%q", cond) return } - case '7': + case sys.ARM64: if !arch.ARM64Suffix(prog, cond) { p.errorf("unrecognized suffix .%q", cond) return @@ -361,7 +362,7 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { target = &a[1] prog.From = a[0] case 3: - if p.arch.Thechar == '9' { + if p.arch.Family == sys.PPC64 { // Special 3-operand jumps. // First two must be constants; a[1] is a register number. target = &a[2] @@ -378,7 +379,7 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { prog.Reg = reg break } - if p.arch.Thechar == '0' { + if p.arch.Family == sys.MIPS64 { // 3-operand jumps. // First two must be registers target = &a[2] @@ -386,7 +387,7 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { prog.Reg = p.getRegister(prog, op, &a[1]) break } - if p.arch.Thechar == 'z' { + if p.arch.Family == sys.S390X { // 3-operand jumps. target = &a[2] prog.From = a[0] @@ -438,7 +439,7 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { // JMP 4(R0) prog.To = *target // On the ppc64, 9a encodes BR (CTR) as BR CTR. We do the same. - if p.arch.Thechar == '9' && target.Offset == 0 { + if p.arch.Family == sys.PPC64 && target.Offset == 0 { prog.To.Type = obj.TYPE_REG } case target.Type == obj.TYPE_CONST: @@ -492,14 +493,14 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.From = a[0] // prog.To is no address. } - if p.arch.Thechar == '9' && arch.IsPPC64NEG(op) { + if p.arch.Family == sys.PPC64 && arch.IsPPC64NEG(op) { // NEG: From and To are both a[0]. prog.To = a[0] prog.From = a[0] break } case 2: - if p.arch.Thechar == '5' { + if p.arch.Family == sys.ARM { if arch.IsARMCMP(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) @@ -532,11 +533,11 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.Reg = p.getRegister(prog, op, &a[1]) break } - } else if p.arch.Thechar == '7' && arch.IsARM64CMP(op) { + } else if p.arch.Family == sys.ARM64 && arch.IsARM64CMP(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) break - } else if p.arch.Thechar == '0' { + } else if p.arch.Family == sys.MIPS64 { if arch.IsMIPS64CMP(op) || arch.IsMIPS64MUL(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) @@ -546,12 +547,12 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.From = a[0] prog.To = a[1] case 3: - switch p.arch.Thechar { - case '0': + switch p.arch.Family { + case sys.MIPS64: prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.To = a[2] - case '5': + case sys.ARM: // Special cases. if arch.IsARMSTREX(op) { /* @@ -567,7 +568,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.To = a[2] - case '7': + case sys.ARM64: // ARM64 instructions with one input and two outputs. if arch.IsARM64STLXR(op) { prog.From = a[0] @@ -582,11 +583,11 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.To = a[2] - case '6', '8': + case sys.AMD64, sys.I386: prog.From = a[0] prog.From3 = newAddr(a[1]) prog.To = a[2] - case '9': + case sys.PPC64: if arch.IsPPC64CMP(op) { // CMPW etc.; third argument is a CR register that goes into prog.Reg. prog.From = a[0] @@ -612,7 +613,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op)) return } - case 'z': + case sys.S390X: if arch.IsS390xWithLength(op) || arch.IsS390xWithIndex(op) { prog.From = a[1] prog.From3 = newAddr(a[0]) @@ -626,7 +627,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { return } case 4: - if p.arch.Thechar == '5' && arch.IsARMMULA(op) { + if p.arch.Family == sys.ARM && arch.IsARMMULA(op) { // All must be registers. p.getRegister(prog, op, &a[0]) r1 := p.getRegister(prog, op, &a[1]) @@ -639,14 +640,14 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.Reg = r1 break } - if p.arch.Thechar == '7' { + if p.arch.Family == sys.ARM64 { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.From3 = newAddr(a[2]) prog.To = a[3] break } - if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) { + if p.arch.Family == sys.PPC64 && arch.IsPPC64RLD(op) { // 2nd operand must always be a register. // TODO: Do we need to guard this with the instruction type? // That is, are there 4-operand instructions without this property? @@ -656,7 +657,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.To = a[3] break } - if p.arch.Thechar == 'z' { + if p.arch.Family == sys.S390X { prog.From = a[1] prog.Reg = p.getRegister(prog, op, &a[2]) prog.From3 = newAddr(a[0]) @@ -666,7 +667,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op)) return case 5: - if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) { + if p.arch.Family == sys.PPC64 && arch.IsPPC64RLD(op) { // Always reg, reg, con, con, reg. (con, con is a 'mask'). prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) @@ -688,7 +689,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { p.errorf("can't handle %s instruction with 5 operands", obj.Aconv(op)) return case 6: - if p.arch.Thechar == '5' && arch.IsARMMRC(op) { + if p.arch.Family == sys.ARM && arch.IsARMMRC(op) { // Strange special case: MCR, MRC. prog.To.Type = obj.TYPE_CONST x0 := p.getConstant(prog, op, &a[0]) diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go index ee37439962..40206e6dc1 100644 --- a/src/cmd/asm/internal/asm/parse.go +++ b/src/cmd/asm/internal/asm/parse.go @@ -19,6 +19,7 @@ import ( "cmd/asm/internal/flags" "cmd/asm/internal/lex" "cmd/internal/obj" + "cmd/internal/sys" ) type Parser struct { @@ -130,7 +131,7 @@ func (p *Parser) line() bool { for { tok = p.lex.Next() if len(operands) == 0 && len(items) == 0 { - if (p.arch.Thechar == '5' || p.arch.Thechar == '7') && tok == '.' { + if p.arch.InFamily(sys.ARM, sys.ARM64) && tok == '.' { // ARM conditionals. tok = p.lex.Next() str := p.lex.Text() @@ -420,7 +421,7 @@ func (p *Parser) atStartOfRegister(name string) bool { // We have consumed the register or R prefix. func (p *Parser) atRegisterShift() bool { // ARM only. - if p.arch.Thechar != '5' { + if p.arch.Family != sys.ARM { return false } // R1<<... @@ -476,15 +477,14 @@ func (p *Parser) register(name string, prefix rune) (r1, r2 int16, scale int8, o if c == ':' || c == ',' || c == '+' { // 2nd register; syntax (R1+R2) etc. No two architectures agree. // Check the architectures match the syntax. - char := p.arch.Thechar switch p.next().ScanToken { case ',': - if char != '5' && char != '7' { + if !p.arch.InFamily(sys.ARM, sys.ARM64) { p.errorf("(register,register) not supported on this architecture") return } case '+': - if char != '9' { + if p.arch.Family != sys.PPC64 { p.errorf("(register+register) not supported on this architecture") return } @@ -649,7 +649,7 @@ func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) { a.Reg = r1 if r2 != 0 { // TODO: Consistency in the encoding would be nice here. - if p.arch.Thechar == '5' || p.arch.Thechar == '7' { + if p.arch.InFamily(sys.ARM, sys.ARM64) { // Special form // ARM: destination register pair (R1, R2). // ARM64: register pair (R1, R2) for LDP/STP. @@ -662,7 +662,7 @@ func (p *Parser) registerIndirect(a *obj.Addr, prefix rune) { // Nothing may follow return } - if p.arch.Thechar == '9' { + if p.arch.Family == sys.PPC64 { // Special form for PPC64: (R1+R2); alias for (R1)(R2*1). if prefix != 0 || scale != 0 { p.errorf("illegal address mode for register+register") @@ -752,7 +752,7 @@ ListLoop: // register number is ARM-specific. It returns the number of the specified register. func (p *Parser) registerNumber(name string) uint16 { - if p.arch.Thechar == '5' && name == "g" { + if p.arch.Family == sys.ARM && name == "g" { return 10 } if name[0] != 'R' { diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go index 14721ea35b..461ef2ada1 100644 --- a/src/cmd/compile/internal/amd64/galign.go +++ b/src/cmd/compile/internal/amd64/galign.go @@ -18,12 +18,7 @@ var ( ) func betypeinit() { - gc.Widthptr = 8 - gc.Widthint = 8 - gc.Widthreg = 8 if obj.Getgoarch() == "amd64p32" { - gc.Widthptr = 4 - gc.Widthint = 4 addptr = x86.AADDL movptr = x86.AMOVL leaptr = x86.ALEAL @@ -42,12 +37,9 @@ func Main() { resvd = append(resvd, x86.REG_BP) } - gc.Thearch.Thechar = '6' - gc.Thearch.Thestring = "amd64" - gc.Thearch.Thelinkarch = &x86.Linkamd64 + gc.Thearch.LinkArch = &x86.Linkamd64 if obj.Getgoarch() == "amd64p32" { - gc.Thearch.Thestring = "amd64p32" - gc.Thearch.Thelinkarch = &x86.Linkamd64p32 + gc.Thearch.LinkArch = &x86.Linkamd64p32 } gc.Thearch.REGSP = x86.REGSP gc.Thearch.REGCTXT = x86.REGCTXT diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go index e05f4d06bb..afd86e44c8 100644 --- a/src/cmd/compile/internal/arm/galign.go +++ b/src/cmd/compile/internal/arm/galign.go @@ -11,15 +11,10 @@ import ( ) func betypeinit() { - gc.Widthptr = 4 - gc.Widthint = 4 - gc.Widthreg = 4 } func Main() { - gc.Thearch.Thechar = '5' - gc.Thearch.Thestring = "arm" - gc.Thearch.Thelinkarch = &arm.Linkarm + gc.Thearch.LinkArch = &arm.Linkarm gc.Thearch.REGSP = arm.REGSP gc.Thearch.REGCTXT = arm.REGCTXT gc.Thearch.REGCALLX = arm.REG_R1 diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go index 7e1226fee1..17c851cb14 100644 --- a/src/cmd/compile/internal/arm64/galign.go +++ b/src/cmd/compile/internal/arm64/galign.go @@ -10,15 +10,10 @@ import ( ) func betypeinit() { - gc.Widthptr = 8 - gc.Widthint = 8 - gc.Widthreg = 8 } func Main() { - gc.Thearch.Thechar = '7' - gc.Thearch.Thestring = "arm64" - gc.Thearch.Thelinkarch = &arm64.Linkarm64 + gc.Thearch.LinkArch = &arm64.Linkarm64 gc.Thearch.REGSP = arm64.REGSP gc.Thearch.REGCTXT = arm64.REGCTXT gc.Thearch.REGCALLX = arm64.REGRT1 diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index c594ad4c11..a9cedf7cfc 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -7,6 +7,7 @@ package gc import ( "cmd/internal/obj" "cmd/internal/obj/ppc64" + "cmd/internal/sys" "fmt" ) @@ -88,7 +89,7 @@ func cgen_wb(n, res *Node, wb bool) { if !res.Addable { if n.Ullman > res.Ullman { - if Ctxt.Arch.Regsize == 4 && Is64(n.Type) { + if Ctxt.Arch.RegSize == 4 && Is64(n.Type) { var n1 Node Tempname(&n1, n.Type) Cgen(n, &n1) @@ -127,7 +128,7 @@ func cgen_wb(n, res *Node, wb bool) { f = false } - if !n.Type.IsComplex() && Ctxt.Arch.Regsize == 8 && !wb { + if !n.Type.IsComplex() && Ctxt.Arch.RegSize == 8 && !wb { a := Thearch.Optoas(OAS, res.Type) var addr obj.Addr if Thearch.Sudoaddable(a, res, &addr) { @@ -151,7 +152,7 @@ func cgen_wb(n, res *Node, wb bool) { } } - if Ctxt.Arch.Thechar == '8' { + if Ctxt.Arch.Family == sys.I386 { // no registers to speak of var n1, n2 Node Tempname(&n1, n.Type) @@ -203,7 +204,7 @@ func cgen_wb(n, res *Node, wb bool) { // Write barrier now handled. Code below this line can ignore wb. - if Ctxt.Arch.Thechar == '5' { // TODO(rsc): Maybe more often? + if Ctxt.Arch.Family == sys.ARM { // TODO(rsc): Maybe more often? // if both are addressable, move if n.Addable && res.Addable { if Is64(n.Type) || Is64(res.Type) || n.Op == OREGISTER || res.Op == OREGISTER || n.Type.IsComplex() || res.Type.IsComplex() { @@ -246,12 +247,12 @@ func cgen_wb(n, res *Node, wb bool) { return } - if (Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8') && n.Addable { + if Ctxt.Arch.InFamily(sys.AMD64, sys.I386) && n.Addable { Thearch.Gmove(n, res) return } - if Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' { + if Ctxt.Arch.InFamily(sys.ARM64, sys.MIPS64, sys.PPC64) { // if both are addressable, move if n.Addable { if n.Op == OREGISTER || res.Op == OREGISTER { @@ -268,7 +269,7 @@ func cgen_wb(n, res *Node, wb bool) { } // if n is sudoaddable generate addr and move - if Ctxt.Arch.Thechar == '5' && !Is64(n.Type) && !Is64(res.Type) && !n.Type.IsComplex() && !res.Type.IsComplex() { + if Ctxt.Arch.Family == sys.ARM && !Is64(n.Type) && !Is64(res.Type) && !n.Type.IsComplex() && !res.Type.IsComplex() { a := Thearch.Optoas(OAS, n.Type) var addr obj.Addr if Thearch.Sudoaddable(a, n, &addr) { @@ -310,7 +311,7 @@ func cgen_wb(n, res *Node, wb bool) { } // 64-bit ops are hard on 32-bit machine. - if Ctxt.Arch.Regsize == 4 && (Is64(n.Type) || Is64(res.Type) || n.Left != nil && Is64(n.Left.Type)) { + if Ctxt.Arch.RegSize == 4 && (Is64(n.Type) || Is64(res.Type) || n.Left != nil && Is64(n.Left.Type)) { switch n.Op { // math goes to cgen64. case OMINUS, @@ -334,7 +335,7 @@ func cgen_wb(n, res *Node, wb bool) { return } - if !n.Type.IsComplex() && Ctxt.Arch.Regsize == 8 { + if !n.Type.IsComplex() && Ctxt.Arch.RegSize == 8 { a := Thearch.Optoas(OAS, n.Type) var addr obj.Addr if Thearch.Sudoaddable(a, n, &addr) { @@ -401,11 +402,11 @@ func cgen_wb(n, res *Node, wb bool) { Regalloc(&n1, nl.Type, res) Cgen(nl, &n1) - if Ctxt.Arch.Thechar == '5' { + if Ctxt.Arch.Family == sys.ARM { var n2 Node Nodconst(&n2, nl.Type, 0) Thearch.Gins(a, &n2, &n1) - } else if Ctxt.Arch.Thechar == '7' { + } else if Ctxt.Arch.Family == sys.ARM64 { Thearch.Gins(a, &n1, &n1) } else { Thearch.Gins(a, nil, &n1) @@ -452,7 +453,7 @@ func cgen_wb(n, res *Node, wb bool) { return } - if Ctxt.Arch.Thechar == '8' { + if Ctxt.Arch.Family == sys.I386 { var n1 Node var n2 Node Tempname(&n2, n.Type) @@ -465,7 +466,7 @@ func cgen_wb(n, res *Node, wb bool) { var n1 Node var n2 Node - if Ctxt.Arch.Thechar == '5' { + if Ctxt.Arch.Family == sys.ARM { if nl.Addable && !Is64(nl.Type) { Regalloc(&n1, nl.Type, res) Thearch.Gmove(nl, &n1) @@ -707,7 +708,7 @@ sbop: // symmetric binary abop: // asymmetric binary var n1 Node var n2 Node - if Ctxt.Arch.Thechar == '8' { + if Ctxt.Arch.Family == sys.I386 { // no registers, sigh if Smallintconst(nr) { var n1 Node @@ -751,14 +752,14 @@ abop: // asymmetric binary Regalloc(&n1, nl.Type, res) Cgen(nl, &n1) - if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm + if Smallintconst(nr) && Ctxt.Arch.Family != sys.MIPS64 && Ctxt.Arch.Family != sys.ARM && Ctxt.Arch.Family != sys.ARM64 && Ctxt.Arch.Family != sys.PPC64 { // TODO(rsc): Check opcode for arm n2 = *nr } else { Regalloc(&n2, nr.Type, nil) Cgen(nr, &n2) } } else { - if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm + if Smallintconst(nr) && Ctxt.Arch.Family != sys.MIPS64 && Ctxt.Arch.Family != sys.ARM && Ctxt.Arch.Family != sys.ARM64 && Ctxt.Arch.Family != sys.PPC64 { // TODO(rsc): Check opcode for arm n2 = *nr } else { Regalloc(&n2, nr.Type, res) @@ -876,8 +877,8 @@ func cgen_wbfat(n, res *Node) { // cgen_norm moves n1 to res, truncating to expected type if necessary. // n1 is a register, and cgen_norm frees it. func cgen_norm(n, n1, res *Node) { - switch Ctxt.Arch.Thechar { - case '6', '8': + switch Ctxt.Arch.Family { + case sys.AMD64, sys.I386: // We use sized math, so the result is already truncated. default: switch n.Op { @@ -980,7 +981,7 @@ func Agenr(n *Node, a *Node, res *Node) { Cgen_checknil(a) case OINDEX: - if Ctxt.Arch.Thechar == '5' { + if Ctxt.Arch.Family == sys.ARM { var p2 *obj.Prog // to be patched to panicindex. w := uint32(n.Type.Width) bounded := Debug['B'] != 0 || n.Bounded @@ -1127,7 +1128,7 @@ func Agenr(n *Node, a *Node, res *Node) { Regfree(&n2) break } - if Ctxt.Arch.Thechar == '8' { + if Ctxt.Arch.Family == sys.I386 { var p2 *obj.Prog // to be patched to panicindex. w := uint32(n.Type.Width) bounded := Debug['B'] != 0 || n.Bounded @@ -1604,7 +1605,7 @@ func Agen(n *Node, res *Node) { } func addOffset(res *Node, offset int64) { - if Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8' { + if Ctxt.Arch.InFamily(sys.AMD64, sys.I386) { Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), Nodintconst(offset), res) return } @@ -1825,13 +1826,14 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { return case ONAME: + // Some architectures might need a temporary or other help here, + // but they don't support direct generation of a bool value yet. + // We can fix that as we go. + mayNeedTemp := Ctxt.Arch.InFamily(sys.ARM, sys.ARM64, sys.MIPS64, sys.PPC64) + if genval { - // 5g, 7g, and 9g might need a temporary or other help here, - // but they don't support direct generation of a bool value yet. - // We can fix that as we go. - switch Ctxt.Arch.Thechar { - case '0', '5', '7', '9': - Fatalf("genval 0g, 5g, 7g, 9g ONAMES not fully implemented") + if mayNeedTemp { + Fatalf("genval ONAMES not fully implemented") } Cgen(n, res) if !wantTrue { @@ -1840,7 +1842,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { return } - if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { + if n.Addable && !mayNeedTemp { // no need for a temporary bgenNonZero(n, nil, wantTrue, likely, to) return @@ -1977,7 +1979,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { return } - if Ctxt.Arch.Regsize == 4 && Is64(nr.Type) { + if Ctxt.Arch.RegSize == 4 && Is64(nr.Type) { if genval { // TODO: Teach Cmp64 to generate boolean values and remove this. bvgenjump(n, res, wantTrue, false) @@ -2015,7 +2017,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { Regfree(&n2) } else { var n1 Node - if !nl.Addable && Ctxt.Arch.Thechar == '8' { + if !nl.Addable && Ctxt.Arch.Family == sys.I386 { Tempname(&n1, nl.Type) } else { Regalloc(&n1, nl.Type, nil) @@ -2024,13 +2026,13 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { Cgen(nl, &n1) nl = &n1 - if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '9' { + if Smallintconst(nr) && Ctxt.Arch.Family != sys.MIPS64 && Ctxt.Arch.Family != sys.PPC64 { Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr) bins(nr.Type, res, op, likely, to) return } - if !nr.Addable && Ctxt.Arch.Thechar == '8' { + if !nr.Addable && Ctxt.Arch.Family == sys.I386 { nr = CgenTemp(nr) } @@ -2044,13 +2046,13 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { l, r := nl, nr // On x86, only < and <= work right with NaN; reverse if needed - if Ctxt.Arch.Thechar == '6' && nl.Type.IsFloat() && (op == OGT || op == OGE) { + if Ctxt.Arch.Family == sys.AMD64 && nl.Type.IsFloat() && (op == OGT || op == OGE) { l, r = r, l op = Brrev(op) } // MIPS does not have CMP instruction - if Ctxt.Arch.Thechar == '0' { + if Ctxt.Arch.Family == sys.MIPS64 { p := Thearch.Ginscmp(op, nr.Type, l, r, likely) Patch(p, to) return @@ -2062,8 +2064,8 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { // Handle floating point special cases. // Note that 8g has Bgen_float and is handled above. if nl.Type.IsFloat() { - switch Ctxt.Arch.Thechar { - case '5': + switch Ctxt.Arch.Family { + case sys.ARM: if genval { Fatalf("genval 5g Isfloat special cases not implemented") } @@ -2077,7 +2079,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { Patch(p, Pc) } return - case '6': + case sys.AMD64: switch n.Op { case OEQ: // neither NE nor P @@ -2111,7 +2113,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { } return } - case '7', '9': + case sys.ARM64, sys.PPC64: if genval { Fatalf("genval 7g, 9g Isfloat special cases not implemented") } @@ -2143,7 +2145,7 @@ func bgenNonZero(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { } // MIPS does not have CMP instruction - if Thearch.Thechar == '0' { + if Thearch.LinkArch.Family == sys.MIPS64 { p := Gbranch(Thearch.Optoas(op, n.Type), n.Type, likely) Naddr(&p.From, n) Patch(p, to) @@ -2352,7 +2354,7 @@ func Ginscall(f *Node, proc int) { // into the instruction stream. Thearch.Ginsnop() - if Thearch.Thechar == '9' { + if Thearch.LinkArch.Family == sys.PPC64 { // On ppc64, when compiling Go into position // independent code on ppc64le we insert an // instruction to reload the TOC pointer from the @@ -2630,7 +2632,7 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { // in peep and optoas in order to enable this. // TODO(rsc): ppc64 needs to support the relevant instructions // in peep and optoas in order to enable this. - if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' { + if nr.Op != OLITERAL || Ctxt.Arch.Family == sys.MIPS64 || Ctxt.Arch.Family == sys.ARM64 || Ctxt.Arch.Family == sys.PPC64 { goto longdiv } w = int(nl.Type.Width * 8) @@ -2995,7 +2997,7 @@ func cgen_slice(n, res *Node, wb bool) { regalloc := Regalloc ginscon := Thearch.Ginscon gins := Thearch.Gins - if Thearch.Thechar == '8' { + if Thearch.LinkArch.Family == sys.I386 { regalloc = func(n *Node, t *Type, reuse *Node) { Tempname(n, t) } @@ -3238,7 +3240,7 @@ func cgen_slice(n, res *Node, wb bool) { compare := func(n1, n2 *Node) { // n1 might be a 64-bit constant, even on 32-bit architectures, // but it will be represented in 32 bits. - if Ctxt.Arch.Regsize == 4 && Is64(n1.Type) { + if Ctxt.Arch.RegSize == 4 && Is64(n1.Type) { if n1.Val().U.(*Mpint).CmpInt64(1<<31) >= 0 { Fatalf("missed slice out of bounds check") } diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 4a98f41bcb..7527452c93 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -8,6 +8,7 @@ package gc import ( "cmd/internal/obj" + "cmd/internal/sys" "fmt" ) @@ -1174,7 +1175,7 @@ func visitComponents(t *Type, startOffset int64, f func(elem *Type, elemOffset i } // NOTE: Assuming little endian (signed top half at offset 4). // We don't have any 32-bit big-endian systems. - if Thearch.Thechar != '5' && Thearch.Thechar != '8' { + if !Thearch.LinkArch.InFamily(sys.ARM, sys.I386) { Fatalf("unknown 32-bit architecture") } return f(Types[TUINT32], startOffset) && diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 4cb985b1be..ef8b516ea5 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -360,9 +360,8 @@ const ( ) type Arch struct { - Thechar int - Thestring string - Thelinkarch *obj.LinkArch + LinkArch *obj.LinkArch + REGSP int REGCTXT int REGCALLX int // BX diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index a2fa5f8b31..63a8e969c3 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -32,6 +32,7 @@ package gc import ( "cmd/internal/obj" + "cmd/internal/sys" "fmt" "runtime" "strings" @@ -57,7 +58,7 @@ func Ismem(n *Node) bool { return true case OADDR: - return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too + return Thearch.LinkArch.InFamily(sys.AMD64, sys.PPC64) // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too } return false @@ -83,7 +84,7 @@ func Gbranch(as obj.As, t *Type, likely int) *obj.Prog { p := Prog(as) p.To.Type = obj.TYPE_BRANCH p.To.Val = nil - if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' { + if as != obj.AJMP && likely != 0 && Thearch.LinkArch.Family != sys.PPC64 && Thearch.LinkArch.Family != sys.ARM64 && Thearch.LinkArch.Family != sys.MIPS64 { p.From.Type = obj.TYPE_CONST if likely > 0 { p.From.Offset = 1 @@ -330,7 +331,7 @@ func Naddr(a *obj.Addr, n *Node) { a.Type = obj.TYPE_REG a.Reg = n.Reg a.Sym = nil - if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width. + if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width. a.Width = 0 } @@ -342,7 +343,7 @@ func Naddr(a *obj.Addr, n *Node) { if a.Offset != int64(int32(a.Offset)) { Yyerror("offset %d too large for OINDREG", a.Offset) } - if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width. + if Thearch.LinkArch.Family == sys.I386 { // TODO(rsc): Never clear a->width. a.Width = 0 } @@ -424,7 +425,7 @@ func Naddr(a *obj.Addr, n *Node) { Naddr(a, n.Left) case OLITERAL: - if Thearch.Thechar == '8' { + if Thearch.LinkArch.Family == sys.I386 { a.Width = 0 } switch n.Val().Ctype() { @@ -457,7 +458,7 @@ func Naddr(a *obj.Addr, n *Node) { case OADDR: Naddr(a, n.Left) a.Etype = uint8(Tptr) - if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64. + if !Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) { // TODO(rsc): Do this even for arm, ppc64. a.Width = int64(Widthptr) } if a.Type != obj.TYPE_MEM { @@ -496,7 +497,7 @@ func Naddr(a *obj.Addr, n *Node) { } a.Etype = uint8(Simtype[TUINT]) a.Offset += int64(Array_nel) - if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm. + if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm. a.Width = int64(Widthint) } @@ -509,7 +510,7 @@ func Naddr(a *obj.Addr, n *Node) { } a.Etype = uint8(Simtype[TUINT]) a.Offset += int64(Array_cap) - if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm. + if Thearch.LinkArch.Family != sys.ARM { // TODO(rsc): Do this even on arm. a.Width = int64(Widthint) } } @@ -695,7 +696,7 @@ func Regalloc(n *Node, t *Type, o *Node) { Fatalf("regalloc: t nil") } et := Simtype[t.Etype] - if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) { + if Ctxt.Arch.RegSize == 4 && (et == TINT64 || et == TUINT64) { Fatalf("regalloc 64bit") } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 73ecb09fa5..72e6478afe 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -10,6 +10,7 @@ import ( "bufio" "cmd/compile/internal/ssa" "cmd/internal/obj" + "cmd/internal/sys" "flag" "fmt" "io" @@ -96,12 +97,12 @@ func Main() { // but not other values. p := obj.Getgoarch() - if !strings.HasPrefix(p, Thearch.Thestring) { - log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.Thechar, p) + if !strings.HasPrefix(p, Thearch.LinkArch.Name) { + log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.LinkArch.Family, p) } goarch = p - Ctxt = obj.Linknew(Thearch.Thelinkarch) + Ctxt = obj.Linknew(Thearch.LinkArch) Ctxt.DiagFunc = Yyerror Ctxt.Bso = &bstdout bstdout = *obj.Binitw(os.Stdout) @@ -200,15 +201,13 @@ func Main() { obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y']) var flag_shared int var flag_dynlink bool - switch Thearch.Thechar { - case '5', '6', '7', '8', '9': + if Thearch.LinkArch.InFamily(sys.ARM, sys.AMD64, sys.ARM64, sys.I386, sys.PPC64) { obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared) } - if Thearch.Thechar == '6' { + if Thearch.LinkArch.Family == sys.AMD64 { obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel) } - switch Thearch.Thechar { - case '5', '6', '7', '8', '9': + if Thearch.LinkArch.InFamily(sys.ARM, sys.AMD64, sys.ARM64, sys.I386, sys.PPC64) { flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries") } obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile) @@ -301,9 +300,9 @@ func Main() { } Thearch.Betypeinit() - if Widthptr == 0 { - Fatalf("betypeinit failed") - } + Widthint = Thearch.LinkArch.IntSize + Widthptr = Thearch.LinkArch.PtrSize + Widthreg = Thearch.LinkArch.RegSize initUniverse() diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 63f7bf825e..bfb65ade38 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -7,6 +7,7 @@ package gc import ( "cmd/compile/internal/ssa" "cmd/internal/obj" + "cmd/internal/sys" "crypto/md5" "fmt" "sort" @@ -286,7 +287,7 @@ func allocauto(ptxt *obj.Prog) { if haspointers(n.Type) { stkptrsize = Stksize } - if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' { + if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) { Stksize = Rnd(Stksize, int64(Widthptr)) } if Stksize >= 1<<31 { @@ -323,7 +324,7 @@ func Cgen_checknil(n *Node) { Fatalf("bad checknil") } - if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL { + if (Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL { var reg Node Regalloc(®, Types[Tptr], n) Cgen(n, ®) diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 43f594e2ea..6e43d3133f 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -17,6 +17,7 @@ package gc import ( "cmd/internal/obj" + "cmd/internal/sys" "fmt" "sort" "strings" @@ -1396,7 +1397,7 @@ func livenessepilogue(lv *Liveness) { // The instruction before a call to deferreturn is always a // no-op, to keep PC-specific data unambiguous. prev := p.Opt.(*obj.Prog) - if Ctxt.Arch.Thechar == '9' { + if Ctxt.Arch.Family == sys.PPC64 { // On ppc64 there is an additional instruction // (another no-op or reload of toc pointer) before // the call. diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go index 26746a5bcf..8705d6dfa4 100644 --- a/src/cmd/compile/internal/gc/reg.go +++ b/src/cmd/compile/internal/gc/reg.go @@ -33,6 +33,7 @@ package gc import ( "bytes" "cmd/internal/obj" + "cmd/internal/sys" "fmt" "sort" "strings" @@ -249,7 +250,7 @@ func addmove(r *Flow, bn int, rn int, f int) { p1.As = Thearch.Optoas(OAS, Types[uint8(v.etype)]) // TODO(rsc): Remove special case here. - if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL { + if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) && v.etype == TBOOL { p1.As = Thearch.Optoas(OAS, Types[TUINT8]) } p1.From.Type = obj.TYPE_REG @@ -302,7 +303,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits { // TODO(rsc): Remove special case here. case obj.TYPE_ADDR: var bit Bits - if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' { + if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) { goto memcase } a.Type = obj.TYPE_MEM @@ -368,7 +369,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits { if v.etype == et { if int64(v.width) == w { // TODO(rsc): Remove special case for arm here. - if flag == 0 || Thearch.Thechar != '5' { + if flag == 0 || Thearch.LinkArch.Family != sys.ARM { return blsh(uint(i)) } } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 127a7c4698..90c4d4e95e 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -13,6 +13,7 @@ import ( "cmd/compile/internal/ssa" "cmd/internal/obj" + "cmd/internal/sys" ) var ssaEnabled = true @@ -24,13 +25,13 @@ func initssa() *ssa.Config { ssaExp.unimplemented = false ssaExp.mustImplement = true if ssaConfig == nil { - ssaConfig = ssa.NewConfig(Thearch.Thestring, &ssaExp, Ctxt, Debug['N'] == 0) + ssaConfig = ssa.NewConfig(Thearch.LinkArch.Name, &ssaExp, Ctxt, Debug['N'] == 0) } return ssaConfig } func shouldssa(fn *Node) bool { - switch Thearch.Thestring { + switch Thearch.LinkArch.Name { default: // Only available for testing. if os.Getenv("SSATEST") == "" { @@ -2409,7 +2410,7 @@ func isSSAIntrinsic1(s *Sym) bool { // so far has only been noticed for Bswap32 and the 16-bit count // leading/trailing instructions, but heuristics might change // in the future or on different architectures). - if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.Thechar != '6' { + if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 { return false } if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index ff8ddea7f6..586a8e9c4f 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -6,6 +6,7 @@ package gc import ( "cmd/internal/obj" + "cmd/internal/sys" "fmt" "strings" ) @@ -672,8 +673,7 @@ opswitch: walkexprlist(n.List.Slice(), init) if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" && n.Left.Sym.Pkg.Path == "math" { - switch Thearch.Thechar { - case '5', '6', '7', '9': + if Thearch.LinkArch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.PPC64) { n.Op = OSQRT n.Left = n.List.First() n.List.Set(nil) @@ -1056,7 +1056,7 @@ opswitch: n = walkexpr(n, init) case OCONV, OCONVNOP: - if Thearch.Thechar == '5' { + if Thearch.LinkArch.Family == sys.ARM { if n.Left.Type.IsFloat() { if n.Type.Etype == TINT64 { n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64])) @@ -3274,7 +3274,7 @@ func samecheap(a *Node, b *Node) bool { // The result of walkrotate MUST be assigned back to n, e.g. // n.Left = walkrotate(n.Left) func walkrotate(n *Node) *Node { - if Thearch.Thechar == '0' || Thearch.Thechar == '7' || Thearch.Thechar == '9' { + if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM64, sys.PPC64) { return n } @@ -3401,7 +3401,7 @@ func walkdiv(n *Node, init *Nodes) *Node { // if >= 0, nr is 1<= 32678 { ld.Diag("TLS offset out of range %d", v) } diff --git a/src/cmd/link/internal/arm64/l.go b/src/cmd/link/internal/arm64/l.go index b9b7ea50e3..67ad5c977f 100644 --- a/src/cmd/link/internal/arm64/l.go +++ b/src/cmd/link/internal/arm64/l.go @@ -62,11 +62,9 @@ package arm64 // THE SOFTWARE. const ( - thechar = '7' MaxAlign = 32 // max data alignment MinAlign = 1 // min data alignment FuncAlign = 8 - MINLC = 4 ) /* Used by ../internal/ld/dwarf.go */ diff --git a/src/cmd/link/internal/arm64/obj.go b/src/cmd/link/internal/arm64/obj.go index 693e106ff1..1169e79a58 100644 --- a/src/cmd/link/internal/arm64/obj.go +++ b/src/cmd/link/internal/arm64/obj.go @@ -32,6 +32,7 @@ package arm64 import ( "cmd/internal/obj" + "cmd/internal/sys" "cmd/link/internal/ld" "fmt" "log" @@ -45,17 +46,11 @@ func Main() { } func linkarchinit() { - ld.Thestring = obj.Getgoarch() - ld.Thelinkarch = &ld.Linkarm64 + ld.SysArch = sys.ArchARM64 - ld.Thearch.Thechar = thechar - ld.Thearch.Ptrsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Intsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Regsize = ld.Thelinkarch.Regsize ld.Thearch.Funcalign = FuncAlign ld.Thearch.Maxalign = MaxAlign ld.Thearch.Minalign = MinAlign - ld.Thearch.Minlc = MINLC ld.Thearch.Dwarfregsp = DWARFREGSP ld.Thearch.Dwarfreglr = DWARFREGLR diff --git a/src/cmd/link/internal/ld/arch.go b/src/cmd/link/internal/ld/arch.go deleted file mode 100644 index d28f37fa02..0000000000 --- a/src/cmd/link/internal/ld/arch.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ld - -import "encoding/binary" - -var Linkarm = LinkArch{ - ByteOrder: binary.LittleEndian, - Name: "arm", - Thechar: '5', - Minlc: 4, - Ptrsize: 4, - Regsize: 4, -} - -var Linkarm64 = LinkArch{ - ByteOrder: binary.LittleEndian, - Name: "arm64", - Thechar: '7', - Minlc: 4, - Ptrsize: 8, - Regsize: 8, -} - -var Linkamd64 = LinkArch{ - ByteOrder: binary.LittleEndian, - Name: "amd64", - Thechar: '6', - Minlc: 1, - Ptrsize: 8, - Regsize: 8, -} - -var Linkamd64p32 = LinkArch{ - ByteOrder: binary.LittleEndian, - Name: "amd64p32", - Thechar: '6', - Minlc: 1, - Ptrsize: 4, - Regsize: 8, -} - -var Link386 = LinkArch{ - ByteOrder: binary.LittleEndian, - Name: "386", - Thechar: '8', - Minlc: 1, - Ptrsize: 4, - Regsize: 4, -} - -var Linkppc64 = LinkArch{ - ByteOrder: binary.BigEndian, - Name: "ppc64", - Thechar: '9', - Minlc: 4, - Ptrsize: 8, - Regsize: 8, -} - -var Linkppc64le = LinkArch{ - ByteOrder: binary.LittleEndian, - Name: "ppc64le", - Thechar: '9', - Minlc: 4, - Ptrsize: 8, - Regsize: 8, -} - -var Linkmips64 = LinkArch{ - ByteOrder: binary.BigEndian, - Name: "mips64", - Thechar: '0', - Minlc: 4, - Ptrsize: 8, - Regsize: 8, -} - -var Linkmips64le = LinkArch{ - ByteOrder: binary.LittleEndian, - Name: "mips64le", - Thechar: '0', - Minlc: 4, - Ptrsize: 8, - Regsize: 8, -} - -var Links390x = LinkArch{ - ByteOrder: binary.BigEndian, - Name: "s390x", - Thechar: 'z', - Minlc: 2, - Ptrsize: 8, - Regsize: 8, -} diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 6bbd6c7d5c..ae430b4e45 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -34,6 +34,7 @@ package ld import ( "cmd/internal/gcprog" "cmd/internal/obj" + "cmd/internal/sys" "fmt" "log" "os" @@ -121,7 +122,7 @@ func Adduint64(ctxt *Link, s *LSym, v uint64) int64 { } func adduint(ctxt *Link, s *LSym, v uint64) int64 { - return adduintxx(ctxt, s, v, Thearch.Intsize) + return adduintxx(ctxt, s, v, SysArch.IntSize) } func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 { @@ -138,12 +139,12 @@ func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 { } s.Attr |= AttrReachable i := s.Size - s.Size += int64(ctxt.Arch.Ptrsize) + s.Size += int64(ctxt.Arch.PtrSize) Symgrow(ctxt, s, s.Size) r := Addrel(s) r.Sym = t r.Off = int32(i) - r.Siz = uint8(ctxt.Arch.Ptrsize) + r.Siz = uint8(ctxt.Arch.PtrSize) r.Type = obj.R_ADDR r.Add = add return i + int64(r.Siz) @@ -163,7 +164,7 @@ func Addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 { r.Add = add r.Type = obj.R_PCREL r.Siz = 4 - if Thearch.Thechar == 'z' { + if SysArch.Family == sys.S390X { r.Variant = RV_390_DBL } return i + int64(r.Siz) @@ -178,15 +179,15 @@ func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 { s.Type = obj.SDATA } s.Attr |= AttrReachable - if off+int64(ctxt.Arch.Ptrsize) > s.Size { - s.Size = off + int64(ctxt.Arch.Ptrsize) + if off+int64(ctxt.Arch.PtrSize) > s.Size { + s.Size = off + int64(ctxt.Arch.PtrSize) Symgrow(ctxt, s, s.Size) } r := Addrel(s) r.Sym = t r.Off = int32(off) - r.Siz = uint8(ctxt.Arch.Ptrsize) + r.Siz = uint8(ctxt.Arch.PtrSize) r.Type = obj.R_ADDR r.Add = add return off + int64(r.Siz) @@ -202,12 +203,12 @@ func addsize(ctxt *Link, s *LSym, t *LSym) int64 { } s.Attr |= AttrReachable i := s.Size - s.Size += int64(ctxt.Arch.Ptrsize) + s.Size += int64(ctxt.Arch.PtrSize) Symgrow(ctxt, s, s.Size) r := Addrel(s) r.Sym = t r.Off = int32(i) - r.Siz = uint8(ctxt.Arch.Ptrsize) + r.Siz = uint8(ctxt.Arch.PtrSize) r.Type = obj.R_SIZE return i + int64(r.Siz) } @@ -356,7 +357,7 @@ func relocsym(s *LSym) { // We need to be able to reference dynimport symbols when linking against // shared libraries, and Solaris needs it always if HEADTYPE != obj.Hsolaris && r.Sym != nil && r.Sym.Type == obj.SDYNIMPORT && !DynlinkingGo() { - if !(Thearch.Thechar == '9' && Linkmode == LinkExternal && r.Sym.Name == ".TOC.") { + if !(SysArch.Family == sys.PPC64 && Linkmode == LinkExternal && r.Sym.Name == ".TOC.") { Diag("unhandled relocation for %s (type %d rtype %d)", r.Sym.Name, r.Sym.Type, r.Type) } } @@ -365,7 +366,7 @@ func relocsym(s *LSym) { } // TODO(mundaym): remove this special case - see issue 14218. - if Thearch.Thechar == 'z' { + if SysArch.Family == sys.S390X { switch r.Type { case obj.R_PCRELDBL: r.Type = obj.R_PCREL @@ -394,7 +395,7 @@ func relocsym(s *LSym) { } case obj.R_TLS_LE: - isAndroidX86 := goos == "android" && (Thearch.Thechar == '6' || Thearch.Thechar == '8') + isAndroidX86 := goos == "android" && (SysArch.InFamily(sys.AMD64, sys.I386)) if Linkmode == LinkExternal && Iself && HEADTYPE != obj.Hopenbsd && !isAndroidX86 { r.Done = 0 @@ -404,13 +405,13 @@ func relocsym(s *LSym) { r.Xsym = r.Sym r.Xadd = r.Add o = 0 - if Thearch.Thechar != '6' { + if SysArch.Family != sys.AMD64 { o = r.Add } break } - if Iself && Thearch.Thechar == '5' { + if Iself && SysArch.Family == sys.ARM { // On ELF ARM, the thread pointer is 8 bytes before // the start of the thread-local data block, so add 8 // to the actual TLS offset (r->sym->value). @@ -428,7 +429,7 @@ func relocsym(s *LSym) { } case obj.R_TLS_IE: - isAndroidX86 := goos == "android" && (Thearch.Thechar == '6' || Thearch.Thechar == '8') + isAndroidX86 := goos == "android" && (SysArch.InFamily(sys.AMD64, sys.I386)) if Linkmode == LinkExternal && Iself && HEADTYPE != obj.Hopenbsd && !isAndroidX86 { r.Done = 0 @@ -438,7 +439,7 @@ func relocsym(s *LSym) { r.Xsym = r.Sym r.Xadd = r.Add o = 0 - if Thearch.Thechar != '6' { + if SysArch.Family != sys.AMD64 { o = r.Add } break @@ -465,7 +466,7 @@ func relocsym(s *LSym) { o = r.Xadd if Iself { - if Thearch.Thechar == '6' { + if SysArch.Family == sys.AMD64 { o = 0 } } else if HEADTYPE == obj.Hdarwin { @@ -475,10 +476,10 @@ func relocsym(s *LSym) { // The workaround is that on arm64 don't ever add symaddr to o and always use // extern relocation by requiring rs->dynid >= 0. if rs.Type != obj.SHOSTOBJ { - if Thearch.Thechar == '7' && rs.Dynid < 0 { + if SysArch.Family == sys.ARM64 && rs.Dynid < 0 { Diag("R_ADDR reloc to %s+%d is not supported on darwin/arm64", rs.Name, o) } - if Thearch.Thechar != '7' { + if SysArch.Family != sys.ARM64 { o += Symaddr(rs) } } @@ -498,7 +499,7 @@ func relocsym(s *LSym) { // fail at runtime. See https://golang.org/issue/7980. // Instead of special casing only amd64, we treat this as an error on all // 64-bit architectures so as to be future-proof. - if int32(o) < 0 && Thearch.Ptrsize > 4 && siz == 4 { + if int32(o) < 0 && SysArch.PtrSize > 4 && siz == 4 { Diag("non-pc-relative relocation address is too big: %#x (%#x + %#x)", uint64(o), Symaddr(r.Sym), r.Add) errorexit() } @@ -515,7 +516,7 @@ func relocsym(s *LSym) { r.Xadd = r.Add + Symaddr(r.Sym) - int64(r.Sym.Sect.Vaddr) o = r.Xadd rs = r.Xsym - if Iself && Thearch.Thechar == '6' { + if Iself && SysArch.Family == sys.AMD64 { o = 0 } break @@ -544,7 +545,7 @@ func relocsym(s *LSym) { o = r.Xadd if Iself { - if Thearch.Thechar == '6' { + if SysArch.Family == sys.AMD64 { o = 0 } } else if HEADTYPE == obj.Hdarwin { @@ -556,7 +557,7 @@ func relocsym(s *LSym) { } else { o += int64(r.Siz) } - } else if HEADTYPE == obj.Hwindows && Thearch.Thechar == '6' { // only amd64 needs PCREL + } else if HEADTYPE == obj.Hwindows && SysArch.Family == sys.AMD64 { // only amd64 needs PCREL // PE/COFF's PC32 relocation uses the address after the relocated // bytes as the base. Compensate by skewing the addend. o += int64(r.Siz) @@ -675,7 +676,7 @@ func dynrelocsym(s *LSym) { r.Add = int64(targ.Plt) // jmp *addr - if Thearch.Thechar == '8' { + if SysArch.Family == sys.I386 { Adduint8(Ctxt, rel, 0xff) Adduint8(Ctxt, rel, 0x25) Addaddr(Ctxt, rel, targ) @@ -982,7 +983,7 @@ func addstrdata(name string, value string) { s.Attr |= AttrDuplicateOK reachable := s.Attr.Reachable() Addaddr(Ctxt, s, sp) - adduintxx(Ctxt, s, uint64(len(value)), Thearch.Ptrsize) + adduintxx(Ctxt, s, uint64(len(value)), SysArch.PtrSize) // addstring, addaddr, etc., mark the symbols as reachable. // In this case that is not necessarily true, so stick to what @@ -1128,7 +1129,7 @@ func (p *GCProg) writeByte(x byte) { } func (p *GCProg) End(size int64) { - p.w.ZeroUntil(size / int64(Thearch.Ptrsize)) + p.w.ZeroUntil(size / int64(SysArch.PtrSize)) p.w.End() if debugGCProg { fmt.Fprintf(os.Stderr, "ld: end GCProg\n") @@ -1144,7 +1145,7 @@ func (p *GCProg) AddSym(s *LSym) { return } - ptrsize := int64(Thearch.Ptrsize) + ptrsize := int64(SysArch.PtrSize) nptr := decodetype_ptrdata(typ) / ptrsize if debugGCProg { @@ -1532,7 +1533,7 @@ func dodata() { if s != nil && s.Type == obj.STLSBSS { if Iself && (Linkmode == LinkExternal || Debug['d'] == 0) && HEADTYPE != obj.Hopenbsd { sect = addsection(&Segdata, ".tbss", 06) - sect.Align = int32(Thearch.Ptrsize) + sect.Align = int32(SysArch.PtrSize) sect.Vaddr = 0 } else { sect = nil diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index 56c4370bcc..b17b96001e 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -6,6 +6,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "fmt" "strings" "unicode" @@ -227,7 +228,7 @@ func (d *deadcodepass) markMethod(m methodref) { func (d *deadcodepass) init() { var names []string - if Thearch.Thechar == '5' { + if SysArch.Family == sys.ARM { // mark some functions that are only referenced after linker code editing if d.ctxt.Goarm == 5 { names = append(names, "_sfloat") diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index 0a6bf094aa..bc29938590 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -7,6 +7,7 @@ package ld import ( "bytes" "cmd/internal/obj" + "cmd/internal/sys" "debug/elf" "fmt" ) @@ -46,39 +47,39 @@ func decode_inuxi(p []byte, sz int) uint64 { } } -func commonsize() int { return 6*Thearch.Ptrsize + 8 } // runtime._type -func structfieldSize() int { return 3 * Thearch.Ptrsize } // runtime.structfield -func uncommonSize() int { return 2*Thearch.Ptrsize + 2*Thearch.Intsize } // runtime.uncommontype +func commonsize() int { return 6*SysArch.PtrSize + 8 } // runtime._type +func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield +func uncommonSize() int { return 2*SysArch.PtrSize + 2*SysArch.IntSize } // runtime.uncommontype // Type.commonType.kind func decodetype_kind(s *LSym) uint8 { - return uint8(s.P[2*Thearch.Ptrsize+7] & obj.KindMask) // 0x13 / 0x1f + return uint8(s.P[2*SysArch.PtrSize+7] & obj.KindMask) // 0x13 / 0x1f } // Type.commonType.kind func decodetype_noptr(s *LSym) uint8 { - return uint8(s.P[2*Thearch.Ptrsize+7] & obj.KindNoPointers) // 0x13 / 0x1f + return uint8(s.P[2*SysArch.PtrSize+7] & obj.KindNoPointers) // 0x13 / 0x1f } // Type.commonType.kind func decodetype_usegcprog(s *LSym) uint8 { - return uint8(s.P[2*Thearch.Ptrsize+7] & obj.KindGCProg) // 0x13 / 0x1f + return uint8(s.P[2*SysArch.PtrSize+7] & obj.KindGCProg) // 0x13 / 0x1f } // Type.commonType.size func decodetype_size(s *LSym) int64 { - return int64(decode_inuxi(s.P, Thearch.Ptrsize)) // 0x8 / 0x10 + return int64(decode_inuxi(s.P, SysArch.PtrSize)) // 0x8 / 0x10 } // Type.commonType.ptrdata func decodetype_ptrdata(s *LSym) int64 { - return int64(decode_inuxi(s.P[Thearch.Ptrsize:], Thearch.Ptrsize)) // 0x8 / 0x10 + return int64(decode_inuxi(s.P[SysArch.PtrSize:], SysArch.PtrSize)) // 0x8 / 0x10 } // Type.commonType.tflag func decodetype_hasUncommon(s *LSym) bool { const tflagUncommon = 1 // see ../../../../reflect/type.go:/^type.tflag - return s.P[2*Thearch.Ptrsize+4]&tflagUncommon != 0 + return s.P[2*SysArch.PtrSize+4]&tflagUncommon != 0 } // Find the elf.Section of a given shared library that contains a given address. @@ -112,11 +113,11 @@ func decodetype_gcprog(s *LSym) []byte { Exitf("cannot find gcprog for %s", s.Name) return nil } - return decode_reloc_sym(s, 2*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize)).P + return decode_reloc_sym(s, 2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize)).P } func decodetype_gcprog_shlib(s *LSym) uint64 { - if Thearch.Thechar == '7' { + if SysArch.Family == sys.ARM64 { for _, shlib := range Ctxt.Shlibs { if shlib.Path == s.File { return shlib.gcdata_addresses[s] @@ -124,7 +125,7 @@ func decodetype_gcprog_shlib(s *LSym) uint64 { } return 0 } - return decode_inuxi(s.P[2*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize):], Thearch.Ptrsize) + return decode_inuxi(s.P[2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize):], SysArch.PtrSize) } func decodetype_gcmask(s *LSym) []byte { @@ -133,14 +134,14 @@ func decodetype_gcmask(s *LSym) []byte { ptrdata := decodetype_ptrdata(s) sect := findShlibSection(s.File, addr) if sect != nil { - r := make([]byte, ptrdata/int64(Thearch.Ptrsize)) + r := make([]byte, ptrdata/int64(SysArch.PtrSize)) sect.ReadAt(r, int64(addr-sect.Addr)) return r } Exitf("cannot find gcmask for %s", s.Name) return nil } - mask := decode_reloc_sym(s, 2*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize)) + mask := decode_reloc_sym(s, 2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize)) return mask.P } @@ -150,7 +151,7 @@ func decodetype_arrayelem(s *LSym) *LSym { } func decodetype_arraylen(s *LSym) int64 { - return int64(decode_inuxi(s.P[commonsize()+2*Thearch.Ptrsize:], Thearch.Ptrsize)) + return int64(decode_inuxi(s.P[commonsize()+2*SysArch.PtrSize:], SysArch.PtrSize)) } // Type.PtrType.elem @@ -164,7 +165,7 @@ func decodetype_mapkey(s *LSym) *LSym { } func decodetype_mapvalue(s *LSym) *LSym { - return decode_reloc_sym(s, int32(commonsize())+int32(Thearch.Ptrsize)) // 0x20 / 0x38 + return decode_reloc_sym(s, int32(commonsize())+int32(SysArch.PtrSize)) // 0x20 / 0x38 } // Type.ChanType.elem @@ -188,13 +189,13 @@ func decodetype_funcoutcount(s *LSym) int { func decodetype_funcintype(s *LSym, i int) *LSym { uadd := commonsize() + 4 - if Thearch.Ptrsize == 8 { + if SysArch.PtrSize == 8 { uadd += 4 } if decodetype_hasUncommon(s) { uadd += uncommonSize() } - return decode_reloc_sym(s, int32(uadd+i*Thearch.Ptrsize)) + return decode_reloc_sym(s, int32(uadd+i*SysArch.PtrSize)) } func decodetype_funcouttype(s *LSym, i int) *LSym { @@ -203,11 +204,11 @@ func decodetype_funcouttype(s *LSym, i int) *LSym { // Type.StructType.fields.Slice::length func decodetype_structfieldcount(s *LSym) int { - return int(decode_inuxi(s.P[commonsize()+2*Thearch.Ptrsize:], Thearch.Intsize)) + return int(decode_inuxi(s.P[commonsize()+2*SysArch.PtrSize:], SysArch.IntSize)) } func decodetype_structfieldarrayoff(s *LSym, i int) int { - off := commonsize() + 2*Thearch.Ptrsize + 2*Thearch.Intsize + off := commonsize() + 2*SysArch.PtrSize + 2*SysArch.IntSize if decodetype_hasUncommon(s) { off += uncommonSize() } @@ -224,7 +225,7 @@ func decodetype_stringptr(s *LSym, off int) string { if r == nil { // shouldn't happen. return "" } - strlen := int64(decode_inuxi(s.P[Thearch.Ptrsize:], Thearch.Intsize)) + strlen := int64(decode_inuxi(s.P[SysArch.PtrSize:], SysArch.IntSize)) return string(r.Sym.P[r.Add : r.Add+strlen]) } @@ -248,17 +249,17 @@ func decodetype_structfieldname(s *LSym, i int) string { func decodetype_structfieldtype(s *LSym, i int) *LSym { off := decodetype_structfieldarrayoff(s, i) - return decode_reloc_sym(s, int32(off+Thearch.Ptrsize)) + return decode_reloc_sym(s, int32(off+SysArch.PtrSize)) } func decodetype_structfieldoffs(s *LSym, i int) int64 { off := decodetype_structfieldarrayoff(s, i) - return int64(decode_inuxi(s.P[off+2*Thearch.Ptrsize:], Thearch.Intsize)) + return int64(decode_inuxi(s.P[off+2*SysArch.PtrSize:], SysArch.IntSize)) } // InterfaceType.methods.length func decodetype_ifacemethodcount(s *LSym) int64 { - return int64(decode_inuxi(s.P[commonsize()+2*Thearch.Ptrsize:], Thearch.Intsize)) + return int64(decode_inuxi(s.P[commonsize()+2*SysArch.PtrSize:], SysArch.IntSize)) } // methodsig is a fully qualified typed method signature, like @@ -288,7 +289,7 @@ func decode_methodsig(s *LSym, off, size, count int) []methodsig { var methods []methodsig for i := 0; i < count; i++ { buf.WriteString(decodetype_name(s, off)) - mtypSym := decode_reloc_sym(s, int32(off+Thearch.Ptrsize)) + mtypSym := decode_reloc_sym(s, int32(off+SysArch.PtrSize)) buf.WriteRune('(') inCount := decodetype_funcincount(mtypSym) @@ -319,7 +320,7 @@ func decodetype_ifacemethods(s *LSym) []methodsig { if decodetype_kind(s)&kindMask != kindInterface { panic(fmt.Sprintf("symbol %q is not an interface", s.Name)) } - r := decode_reloc(s, int32(commonsize()+Thearch.Ptrsize)) + r := decode_reloc(s, int32(commonsize()+SysArch.PtrSize)) if r == nil { return nil } @@ -328,7 +329,7 @@ func decodetype_ifacemethods(s *LSym) []methodsig { } off := int(r.Add) // array of reflect.imethod values numMethods := int(decodetype_ifacemethodcount(s)) - sizeofIMethod := 2 * Thearch.Ptrsize + sizeofIMethod := 2 * SysArch.PtrSize return decode_methodsig(s, off, sizeofIMethod, numMethods) } @@ -339,31 +340,31 @@ func decodetype_methods(s *LSym) []methodsig { off := commonsize() // reflect.rtype switch decodetype_kind(s) & kindMask { case kindStruct: // reflect.structType - off += 2*Thearch.Ptrsize + 2*Thearch.Intsize + off += 2*SysArch.PtrSize + 2*SysArch.IntSize case kindPtr: // reflect.ptrType - off += Thearch.Ptrsize + off += SysArch.PtrSize case kindFunc: // reflect.funcType - off += Thearch.Ptrsize // 4 bytes, pointer aligned + off += SysArch.PtrSize // 4 bytes, pointer aligned case kindSlice: // reflect.sliceType - off += Thearch.Ptrsize + off += SysArch.PtrSize case kindArray: // reflect.arrayType - off += 3 * Thearch.Ptrsize + off += 3 * SysArch.PtrSize case kindChan: // reflect.chanType - off += 2 * Thearch.Ptrsize + off += 2 * SysArch.PtrSize case kindMap: // reflect.mapType - off += 4*Thearch.Ptrsize + 8 + off += 4*SysArch.PtrSize + 8 case kindInterface: // reflect.interfaceType - off += Thearch.Ptrsize + 2*Thearch.Intsize + off += SysArch.PtrSize + 2*SysArch.IntSize default: // just Sizeof(rtype) } - numMethods := int(decode_inuxi(s.P[off+2*Thearch.Ptrsize:], Thearch.Intsize)) - r := decode_reloc(s, int32(off+Thearch.Ptrsize)) + numMethods := int(decode_inuxi(s.P[off+2*SysArch.PtrSize:], SysArch.IntSize)) + r := decode_reloc(s, int32(off+SysArch.PtrSize)) if r.Sym != s { panic(fmt.Sprintf("method slice pointer in %s leads to a different symbol %s", s, r.Sym)) } off = int(r.Add) // array of reflect.method values - sizeofMethod := 4 * Thearch.Ptrsize // sizeof reflect.method in program + sizeofMethod := 4 * SysArch.PtrSize // sizeof reflect.method in program return decode_methodsig(s, off, sizeofMethod, numMethods) } diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index eaa0bdbb41..230d146877 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -39,7 +39,7 @@ var gdbscript string * Basic I/O */ func addrput(s *LSym, addr int64) { - switch Thearch.Ptrsize { + switch SysArch.PtrSize { case 4: Adduint32(Ctxt, s, uint32(addr)) @@ -569,7 +569,7 @@ func adddwarfref(ctxt *Link, s *LSym, t *LSym, size int) int64 { default: Diag("invalid size %d in adddwarfref\n", size) fallthrough - case Thearch.Ptrsize: + case SysArch.PtrSize: result = Addaddr(ctxt, s, t) case 4: result = addaddrplus4(ctxt, s, t, 0) @@ -599,7 +599,7 @@ func putattr(s *LSym, abbrev int, form int, cls int, value int64, data interface case DW_FORM_block1: // block if cls == DW_CLS_ADDRESS { - Adduint8(Ctxt, s, uint8(1+Thearch.Ptrsize)) + Adduint8(Ctxt, s, uint8(1+SysArch.PtrSize)) Adduint8(Ctxt, s, DW_OP_addr) Addaddr(Ctxt, s, data.(*LSym)) break @@ -682,14 +682,14 @@ func putattr(s *LSym, abbrev int, form int, cls int, value int64, data interface case DW_FORM_ref_addr: // reference to a DIE in the .info section if data == nil { Diag("dwarf: null reference in %d", abbrev) - if Thearch.Ptrsize == 8 { + if SysArch.PtrSize == 8 { Adduint64(Ctxt, s, 0) // invalid dwarf, gdb will complain. } else { Adduint32(Ctxt, s, 0) // invalid dwarf, gdb will complain. } } else { dsym := data.(*LSym) - adddwarfref(Ctxt, s, dsym, Thearch.Ptrsize) + adddwarfref(Ctxt, s, dsym, SysArch.PtrSize) } case DW_FORM_ref1, // reference within the compilation unit @@ -1161,11 +1161,11 @@ func synthesizemaptypes(die *DWDie) { // compute size info like hashmap.c does. indirect_key, indirect_val := false, false if keysize > MaxKeySize { - keysize = int64(Thearch.Ptrsize) + keysize = int64(SysArch.PtrSize) indirect_key = true } if valsize > MaxValSize { - valsize = int64(Thearch.Ptrsize) + valsize = int64(SysArch.PtrSize) indirect_val = true } @@ -1212,13 +1212,13 @@ func synthesizemaptypes(die *DWDie) { fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "overflow", 0) newrefattr(fld, DW_AT_type, defptrto(dwhb.sym)) newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))) - if Thearch.Regsize > Thearch.Ptrsize { + if SysArch.RegSize > SysArch.PtrSize { fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "pad", 0) newrefattr(fld, DW_AT_type, mustFind("uintptr")) - newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(Thearch.Ptrsize)) + newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(SysArch.PtrSize)) } - newattr(dwhb, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize+BucketSize*int64(keysize)+BucketSize*int64(valsize)+int64(Thearch.Regsize), 0) + newattr(dwhb, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize+BucketSize*int64(keysize)+BucketSize*int64(valsize)+int64(SysArch.RegSize), 0) }) // Construct hash @@ -1481,7 +1481,7 @@ func writelines(prev *LSym) *LSym { headerend = ls.Size Adduint8(Ctxt, ls, 0) // start extended opcode - uleb128put(ls, 1+int64(Thearch.Ptrsize)) + uleb128put(ls, 1+int64(SysArch.PtrSize)) Adduint8(Ctxt, ls, DW_LNE_set_address) pc := s.Value @@ -1555,7 +1555,7 @@ func writelines(prev *LSym) *LSym { dt = DW_ABRV_AUTO offs = int64(a.Aoffset) if !haslinkregister() { - offs -= int64(Thearch.Ptrsize) + offs -= int64(SysArch.PtrSize) } case obj.A_PARAM: @@ -1667,7 +1667,7 @@ func writeframes(prev *LSym) *LSym { if haslinkregister() { uleb128put(fs, int64(0)) // offset } else { - uleb128put(fs, int64(Thearch.Ptrsize)) // offset + uleb128put(fs, int64(SysArch.PtrSize)) // offset } Adduint8(Ctxt, fs, DW_CFA_offset_extended) @@ -1675,7 +1675,7 @@ func writeframes(prev *LSym) *LSym { if haslinkregister() { uleb128put(fs, int64(0)/DATAALIGNMENTFACTOR) // at cfa - 0 } else { - uleb128put(fs, int64(-Thearch.Ptrsize)/DATAALIGNMENTFACTOR) // at cfa - x*4 + uleb128put(fs, int64(-SysArch.PtrSize)/DATAALIGNMENTFACTOR) // at cfa - x*4 } // 4 is to exclude the length field. @@ -1713,10 +1713,10 @@ func writeframes(prev *LSym) *LSym { if haslinkregister() { deltaBuf = appendPCDeltaCFA(deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(pcsp.value)) } else { - deltaBuf = appendPCDeltaCFA(deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(Thearch.Ptrsize)+int64(pcsp.value)) + deltaBuf = appendPCDeltaCFA(deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(SysArch.PtrSize)+int64(pcsp.value)) } } - pad := int(Rnd(int64(len(deltaBuf)), int64(Thearch.Ptrsize))) - len(deltaBuf) + pad := int(Rnd(int64(len(deltaBuf)), int64(SysArch.PtrSize))) - len(deltaBuf) deltaBuf = append(deltaBuf, zeros[:pad]...) // Emit the FDE header, Section 6.4.1. @@ -1724,7 +1724,7 @@ func writeframes(prev *LSym) *LSym { // 4 bytes: Pointer to the CIE above, at offset 0 // ptrsize: initial location // ptrsize: address range - Adduint32(Ctxt, fs, uint32(4+2*Thearch.Ptrsize+len(deltaBuf))) // length (excludes itself) + Adduint32(Ctxt, fs, uint32(4+2*SysArch.PtrSize+len(deltaBuf))) // length (excludes itself) if Linkmode == LinkExternal { adddwarfref(Ctxt, fs, framesec, 4) } else { @@ -1771,7 +1771,7 @@ func writeinfo(prev *LSym) *LSym { // debug_abbrev_offset (*) adddwarfref(Ctxt, s, abbrevsym, 4) - Adduint8(Ctxt, s, uint8(Thearch.Ptrsize)) // address_size + Adduint8(Ctxt, s, uint8(SysArch.PtrSize)) // address_size prev = putdie(prev, compunit) cusize := s.Size - 4 // exclude the length field. @@ -1848,7 +1848,7 @@ func writearanges(prev *LSym) *LSym { s.Type = obj.SDWARFSECT // The first tuple is aligned to a multiple of the size of a single tuple // (twice the size of an address) - headersize := int(Rnd(4+2+4+1+1, int64(Thearch.Ptrsize*2))) // don't count unit_length field itself + headersize := int(Rnd(4+2+4+1+1, int64(SysArch.PtrSize*2))) // don't count unit_length field itself for compunit := dwroot.child; compunit != nil; compunit = compunit.link { b := getattr(compunit, DW_AT_low_pc) @@ -1861,13 +1861,13 @@ func writearanges(prev *LSym) *LSym { } // Write .debug_aranges Header + entry (sec 6.1.2) - unitlength := uint32(headersize) + 4*uint32(Thearch.Ptrsize) - 4 + unitlength := uint32(headersize) + 4*uint32(SysArch.PtrSize) - 4 Adduint32(Ctxt, s, unitlength) // unit_length (*) Adduint16(Ctxt, s, 2) // dwarf version (appendix F) adddwarfref(Ctxt, s, compunit.sym, 4) - Adduint8(Ctxt, s, uint8(Thearch.Ptrsize)) // address_size + Adduint8(Ctxt, s, uint8(SysArch.PtrSize)) // address_size Adduint8(Ctxt, s, 0) // segment_size padding := headersize - (4 + 2 + 4 + 1 + 1) for i := 0; i < padding; i++ { @@ -1940,7 +1940,7 @@ func dwarfgeneratedebugsyms() { die := newdie(&dwtypes, DW_ABRV_BASETYPE, "uintptr", 0) // needed for array size newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_unsigned, 0) - newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, int64(Thearch.Ptrsize), 0) + newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, int64(SysArch.PtrSize), 0) newattr(die, DW_AT_go_kind, DW_CLS_CONSTANT, obj.KindUintptr, 0) // Prototypes needed for type synthesis. diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 035826df7c..7c760775b5 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -6,6 +6,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "crypto/sha1" "encoding/binary" "encoding/hex" @@ -866,25 +867,23 @@ var buildinfo []byte func Elfinit() { Iself = true - switch Thearch.Thechar { - case '0', '6', '7', '9', 'z': + if SysArch.InFamily(sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.S390X) { elfRelType = ".rela" - default: + } else { elfRelType = ".rel" } - switch Thearch.Thechar { + switch SysArch.Family { // 64-bit architectures - case '9', 'z': + case sys.PPC64, sys.S390X: if Ctxt.Arch.ByteOrder == binary.BigEndian { ehdr.flags = 1 /* Version 1 ABI */ } else { ehdr.flags = 2 /* Version 2 ABI */ } fallthrough - - case '0', '6', '7': - if Thearch.Thechar == '0' { + case sys.AMD64, sys.ARM64, sys.MIPS64: + if SysArch.Family == sys.MIPS64 { ehdr.flags = 0x20000000 /* MIPS 3 */ } elf64 = true @@ -897,7 +896,7 @@ func Elfinit() { // we use EABI on both linux/arm and freebsd/arm. // 32-bit architectures - case '5': + case sys.ARM: // we use EABI on both linux/arm and freebsd/arm. if HEADTYPE == obj.Hlinux || HEADTYPE == obj.Hfreebsd { // We set a value here that makes no indication of which @@ -911,7 +910,6 @@ func Elfinit() { ehdr.flags = 0x5000002 // has entry point, Version5 EABI } fallthrough - default: ehdr.phoff = ELF32HDRSIZE /* Must be be ELF32HDRSIZE: first PHdr must follow ELF header */ @@ -1432,7 +1430,7 @@ func elfdynhash() { } // s390x (ELF64) hash table entries are 8 bytes - if Thearch.Thechar == 'z' { + if SysArch.Family == sys.S390X { Adduint64(Ctxt, s, uint64(nbucket)) Adduint64(Ctxt, s, uint64(nsym)) for i := 0; i < nbucket; i++ { @@ -1660,15 +1658,15 @@ func elfshreloc(sect *Section) *ElfShdr { sh := elfshname(elfRelType + sect.Name) sh.type_ = uint32(typ) - sh.entsize = uint64(Thearch.Regsize) * 2 + sh.entsize = uint64(SysArch.RegSize) * 2 if typ == SHT_RELA { - sh.entsize += uint64(Thearch.Regsize) + sh.entsize += uint64(SysArch.RegSize) } sh.link = uint32(elfshname(".symtab").shnum) sh.info = uint32(sect.Elfsect.shnum) sh.off = sect.Reloff sh.size = sect.Rellen - sh.addralign = uint64(Thearch.Regsize) + sh.addralign = uint64(SysArch.RegSize) return sh } @@ -1872,7 +1870,7 @@ func doelf() { Addstring(shstrtab, ".interp") Addstring(shstrtab, ".hash") Addstring(shstrtab, ".got") - if Thearch.Thechar == '9' { + if SysArch.Family == sys.PPC64 { Addstring(shstrtab, ".glink") } Addstring(shstrtab, ".got.plt") @@ -1919,7 +1917,7 @@ func doelf() { s.Type = obj.SELFGOT // writable /* ppc64 glink resolver */ - if Thearch.Thechar == '9' { + if SysArch.Family == sys.PPC64 { s := Linklookup(Ctxt, ".glink", 0) s.Attr |= AttrReachable s.Type = obj.SELFRXSECT @@ -1938,7 +1936,7 @@ func doelf() { s = Linklookup(Ctxt, ".plt", 0) s.Attr |= AttrReachable - if Thearch.Thechar == '9' { + if SysArch.Family == sys.PPC64 { // In the ppc64 ABI, .plt is a data section // written by the dynamic linker. s.Type = obj.SELFSECT @@ -1993,15 +1991,15 @@ func doelf() { Elfwritedynent(s, DT_RUNPATH, uint64(Addstring(dynstr, rpath.val))) } - if Thearch.Thechar == '9' { + if SysArch.Family == sys.PPC64 { elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".plt", 0)) - } else if Thearch.Thechar == 'z' { + } else if SysArch.Family == sys.S390X { elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".got", 0)) } else { elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".got.plt", 0)) } - if Thearch.Thechar == '9' { + if SysArch.Family == sys.PPC64 { Elfwritedynent(s, DT_PPC64_OPT, 0) } @@ -2080,22 +2078,22 @@ func Asmbelfsetup() { func Asmbelf(symo int64) { eh := getElfEhdr() - switch Thearch.Thechar { + switch SysArch.Family { default: - Exitf("unknown architecture in asmbelf: %v", Thearch.Thechar) - case '0': + Exitf("unknown architecture in asmbelf: %v", SysArch.Family) + case sys.MIPS64: eh.machine = EM_MIPS - case '5': + case sys.ARM: eh.machine = EM_ARM - case '6': + case sys.AMD64: eh.machine = EM_X86_64 - case '7': + case sys.ARM64: eh.machine = EM_AARCH64 - case '8': + case sys.I386: eh.machine = EM_386 - case '9': + case sys.PPC64: eh.machine = EM_PPC64 - case 'z': + case sys.S390X: eh.machine = EM_S390 } @@ -2251,7 +2249,7 @@ func Asmbelf(symo int64) { } else { sh.entsize = ELF32SYMSIZE } - sh.addralign = uint64(Thearch.Regsize) + sh.addralign = uint64(SysArch.RegSize) sh.link = uint32(elfshname(".dynstr").shnum) // sh->info = index of first non-local symbol (number of local symbols) @@ -2275,7 +2273,7 @@ func Asmbelf(symo int64) { sh = elfshname(".gnu.version_r") sh.type_ = SHT_GNU_VERNEED sh.flags = SHF_ALLOC - sh.addralign = uint64(Thearch.Regsize) + sh.addralign = uint64(SysArch.RegSize) sh.info = uint32(elfverneed) sh.link = uint32(elfshname(".dynstr").shnum) shsym(sh, Linklookup(Ctxt, ".gnu.version_r", 0)) @@ -2286,7 +2284,7 @@ func Asmbelf(symo int64) { sh.type_ = SHT_RELA sh.flags = SHF_ALLOC sh.entsize = ELF64RELASIZE - sh.addralign = uint64(Thearch.Regsize) + sh.addralign = uint64(SysArch.RegSize) sh.link = uint32(elfshname(".dynsym").shnum) sh.info = uint32(elfshname(".plt").shnum) shsym(sh, Linklookup(Ctxt, ".rela.plt", 0)) @@ -2350,15 +2348,15 @@ func Asmbelf(symo int64) { sh := elfshname(".got") sh.type_ = SHT_PROGBITS sh.flags = SHF_ALLOC + SHF_WRITE - sh.entsize = uint64(Thearch.Regsize) - sh.addralign = uint64(Thearch.Regsize) + sh.entsize = uint64(SysArch.RegSize) + sh.addralign = uint64(SysArch.RegSize) shsym(sh, Linklookup(Ctxt, ".got", 0)) sh = elfshname(".got.plt") sh.type_ = SHT_PROGBITS sh.flags = SHF_ALLOC + SHF_WRITE - sh.entsize = uint64(Thearch.Regsize) - sh.addralign = uint64(Thearch.Regsize) + sh.entsize = uint64(SysArch.RegSize) + sh.addralign = uint64(SysArch.RegSize) shsym(sh, Linklookup(Ctxt, ".got.plt", 0)) } @@ -2366,7 +2364,7 @@ func Asmbelf(symo int64) { sh.type_ = SHT_HASH sh.flags = SHF_ALLOC sh.entsize = 4 - sh.addralign = uint64(Thearch.Regsize) + sh.addralign = uint64(SysArch.RegSize) sh.link = uint32(elfshname(".dynsym").shnum) shsym(sh, Linklookup(Ctxt, ".hash", 0)) @@ -2375,8 +2373,8 @@ func Asmbelf(symo int64) { sh.type_ = SHT_DYNAMIC sh.flags = SHF_ALLOC + SHF_WRITE - sh.entsize = 2 * uint64(Thearch.Regsize) - sh.addralign = uint64(Thearch.Regsize) + sh.entsize = 2 * uint64(SysArch.RegSize) + sh.addralign = uint64(SysArch.RegSize) sh.link = uint32(elfshname(".dynstr").shnum) shsym(sh, Linklookup(Ctxt, ".dynamic", 0)) ph := newElfPhdr() @@ -2402,7 +2400,7 @@ func Asmbelf(symo int64) { ph.type_ = PT_TLS ph.flags = PF_R ph.memsz = tlssize - ph.align = uint64(Thearch.Regsize) + ph.align = uint64(SysArch.RegSize) } } } @@ -2411,12 +2409,12 @@ func Asmbelf(symo int64) { ph := newElfPhdr() ph.type_ = PT_GNU_STACK ph.flags = PF_W + PF_R - ph.align = uint64(Thearch.Regsize) + ph.align = uint64(SysArch.RegSize) ph = newElfPhdr() ph.type_ = PT_PAX_FLAGS ph.flags = 0x2a00 // mprotect, randexec, emutramp disabled - ph.align = uint64(Thearch.Regsize) + ph.align = uint64(SysArch.RegSize) } elfobj: @@ -2476,8 +2474,8 @@ elfobj: sh.type_ = SHT_SYMTAB sh.off = uint64(symo) sh.size = uint64(Symsize) - sh.addralign = uint64(Thearch.Regsize) - sh.entsize = 8 + 2*uint64(Thearch.Regsize) + sh.addralign = uint64(SysArch.RegSize) + sh.entsize = 8 + 2*uint64(SysArch.RegSize) sh.link = uint32(elfshname(".strtab").shnum) sh.info = uint32(elfglobalsymndx) @@ -2600,7 +2598,7 @@ func Elfadddynsym(ctxt *Link, s *LSym) { /* size of object */ Adduint64(ctxt, d, uint64(s.Size)) - if Thearch.Thechar == '6' && !s.Attr.CgoExportDynamic() && s.Dynimplib != "" && !seenlib[s.Dynimplib] { + if SysArch.Family == sys.AMD64 && !s.Attr.CgoExportDynamic() && s.Dynimplib != "" && !seenlib[s.Dynimplib] { Elfwritedynent(Linklookup(ctxt, ".dynamic", 0), DT_NEEDED, uint64(Addstring(Linklookup(ctxt, ".dynstr", 0), s.Dynimplib))) } } else { @@ -2628,9 +2626,9 @@ func Elfadddynsym(ctxt *Link, s *LSym) { t := STB_GLOBAL << 4 // TODO(mwhudson): presumably the behaviour should actually be the same on both arm and 386. - if Thearch.Thechar == '8' && s.Attr.CgoExport() && s.Type&obj.SMASK == obj.STEXT { + if SysArch.Family == sys.I386 && s.Attr.CgoExport() && s.Type&obj.SMASK == obj.STEXT { t |= STT_FUNC - } else if Thearch.Thechar == '5' && s.Attr.CgoExportDynamic() && s.Type&obj.SMASK == obj.STEXT { + } else if SysArch.Family == sys.ARM && s.Attr.CgoExportDynamic() && s.Type&obj.SMASK == obj.STEXT { t |= STT_FUNC } else { t |= STT_OBJECT diff --git a/src/cmd/link/internal/ld/ldelf.go b/src/cmd/link/internal/ld/ldelf.go index 0255331ac6..3aee2d5ece 100644 --- a/src/cmd/link/internal/ld/ldelf.go +++ b/src/cmd/link/internal/ld/ldelf.go @@ -3,6 +3,7 @@ package ld import ( "bytes" "cmd/internal/obj" + "cmd/internal/sys" "encoding/binary" "fmt" "io" @@ -546,47 +547,48 @@ func ldelf(f *obj.Biobuf, pkg string, length int64, pn string) { return } - switch Thearch.Thechar { + switch SysArch.Family { default: - Diag("%s: elf %s unimplemented", pn, Thestring) + Diag("%s: elf %s unimplemented", pn, SysArch.Name) return - case '0': + case sys.MIPS64: if elfobj.machine != ElfMachMips || hdr.Ident[4] != ElfClass64 { Diag("%s: elf object but not mips64", pn) return } - case '5': + case sys.ARM: if e != binary.LittleEndian || elfobj.machine != ElfMachArm || hdr.Ident[4] != ElfClass32 { Diag("%s: elf object but not arm", pn) return } - case '6': + case sys.AMD64: if e != binary.LittleEndian || elfobj.machine != ElfMachAmd64 || hdr.Ident[4] != ElfClass64 { Diag("%s: elf object but not amd64", pn) return } - case '7': + case sys.ARM64: if e != binary.LittleEndian || elfobj.machine != ElfMachArm64 || hdr.Ident[4] != ElfClass64 { Diag("%s: elf object but not arm64", pn) return } - case '8': + case sys.I386: if e != binary.LittleEndian || elfobj.machine != ElfMach386 || hdr.Ident[4] != ElfClass32 { Diag("%s: elf object but not 386", pn) return } - case '9': + case sys.PPC64: if elfobj.machine != ElfMachPower64 || hdr.Ident[4] != ElfClass64 { Diag("%s: elf object but not ppc64", pn) return } - case 'z': + + case sys.S390X: if elfobj.machine != ElfMachS390 || hdr.Ident[4] != ElfClass64 { Diag("%s: elf object but not s390x", pn) return @@ -1056,7 +1058,7 @@ func readelfsym(elfobj *ElfObj, i int, sym *ElfSym, needSym int) (err error) { } case ElfSymBindLocal: - if Thearch.Thechar == '5' && (strings.HasPrefix(sym.name, "$a") || strings.HasPrefix(sym.name, "$d")) { + if SysArch.Family == sys.ARM && (strings.HasPrefix(sym.name, "$a") || strings.HasPrefix(sym.name, "$d")) { // binutils for arm generate these mapping // symbols, ignore these break @@ -1127,7 +1129,9 @@ func (x rbyoff) Less(i, j int) bool { } func reltype(pn string, elftype int, siz *uint8) int { - switch uint32(Thearch.Thechar) | uint32(elftype)<<24 { + // TODO(mdempsky): Remove dependency on ArchFamily char values. + + switch uint32(SysArch.Family) | uint32(elftype)<<24 { default: Diag("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype) fallthrough diff --git a/src/cmd/link/internal/ld/ldmacho.go b/src/cmd/link/internal/ld/ldmacho.go index c4c13f13b9..9fbb2123af 100644 --- a/src/cmd/link/internal/ld/ldmacho.go +++ b/src/cmd/link/internal/ld/ldmacho.go @@ -2,6 +2,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "encoding/binary" "fmt" "log" @@ -471,18 +472,18 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { m.length = length m.name = pn - switch Thearch.Thechar { + switch SysArch.Family { default: - Diag("%s: mach-o %s unimplemented", pn, Thestring) + Diag("%s: mach-o %s unimplemented", pn, SysArch.Name) return - case '6': + case sys.AMD64: if e != binary.LittleEndian || m.cputype != LdMachoCpuAmd64 { Diag("%s: mach-o object but not amd64", pn) return } - case '8': + case sys.I386: if e != binary.LittleEndian || m.cputype != LdMachoCpu386 { Diag("%s: mach-o object but not 386", pn) return @@ -724,10 +725,9 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { rp = &r[rpi] rel = §.rel[j] if rel.scattered != 0 { - if Thearch.Thechar != '8' { + if SysArch.Family != sys.I386 { // mach-o only uses scattered relocation on 32-bit platforms Diag("unexpected scattered relocation") - continue } @@ -821,7 +821,7 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { rp.Off = int32(rel.addr) // Handle X86_64_RELOC_SIGNED referencing a section (rel->extrn == 0). - if Thearch.Thechar == '6' && rel.extrn == 0 && rel.type_ == 1 { + if SysArch.Family == sys.AMD64 && rel.extrn == 0 && rel.type_ == 1 { // Calculate the addend as the offset into the section. // // The rip-relative offset stored in the object file is encoded @@ -847,7 +847,7 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { // For i386 Mach-O PC-relative, the addend is written such that // it *is* the PC being subtracted. Use that to make // it match our version of PC-relative. - if rel.pcrel != 0 && Thearch.Thechar == '8' { + if rel.pcrel != 0 && SysArch.Family == sys.I386 { rp.Add += int64(rp.Off) + int64(rp.Siz) } if rel.extrn == 0 { @@ -866,7 +866,7 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { // include that information in the addend. // We only care about the delta from the // section base. - if Thearch.Thechar == '8' { + if SysArch.Family == sys.I386 { rp.Add -= int64(c.seg.sect[rel.symnum-1].addr) } } else { diff --git a/src/cmd/link/internal/ld/ldpe.go b/src/cmd/link/internal/ld/ldpe.go index 5c3e99c44f..ea0c482838 100644 --- a/src/cmd/link/internal/ld/ldpe.go +++ b/src/cmd/link/internal/ld/ldpe.go @@ -6,6 +6,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "encoding/binary" "fmt" "log" @@ -492,7 +493,7 @@ func readpesym(peobj *PeObj, i int, y **PeSym) (err error) { if strings.HasPrefix(name, "__imp_") { name = name[6:] // __imp_Name => Name } - if Thearch.Thechar == '8' && name[0] == '_' { + if SysArch.Family == sys.I386 && name[0] == '_' { name = name[1:] // _Name => Name } } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 5616700445..3e0bd8ebc4 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -34,6 +34,7 @@ import ( "bufio" "bytes" "cmd/internal/obj" + "cmd/internal/sys" "crypto/sha1" "debug/elf" "encoding/binary" @@ -82,14 +83,9 @@ import ( // THE SOFTWARE. type Arch struct { - Thechar int - Ptrsize int - Intsize int - Regsize int Funcalign int Maxalign int Minalign int - Minlc int Dwarfregsp int Dwarfreglr int Linuxdynld string @@ -191,8 +187,7 @@ func UseRelro() bool { } var ( - Thestring string - Thelinkarch *LinkArch + SysArch *sys.Arch outfile string dynexp []*LSym dynlib []string @@ -509,7 +504,7 @@ func loadlib() { } loadinternal("runtime") - if Thearch.Thechar == '5' { + if SysArch.Family == sys.ARM { loadinternal("math") } if flag_race != 0 { @@ -562,7 +557,7 @@ func loadlib() { // dependency problems when compiling natively (external linking requires // runtime/cgo, runtime/cgo requires cmd/cgo, but cmd/cgo needs to be // compiled using external linking.) - if (Thearch.Thechar == '5' || Thearch.Thechar == '7') && HEADTYPE == obj.Hdarwin && iscgo { + if SysArch.InFamily(sys.ARM, sys.ARM64) && HEADTYPE == obj.Hdarwin && iscgo { Linkmode = LinkExternal } @@ -621,7 +616,7 @@ func loadlib() { // a variable to hold g in assembly (currently only intel). if tlsg.Type == 0 { tlsg.Type = obj.STLSBSS - tlsg.Size = int64(Thearch.Ptrsize) + tlsg.Size = int64(SysArch.PtrSize) } else if tlsg.Type != obj.SDYNIMPORT { Diag("internal error: runtime declared tlsg variable %d", tlsg.Type) } @@ -639,7 +634,7 @@ func loadlib() { // In addition, on ARM, the runtime depends on the linker // recording the value of GOARM. - if Thearch.Thechar == '5' { + if SysArch.Family == sys.ARM { s := Linklookup(Ctxt, "runtime.goarm", 0) s.Type = obj.SRODATA @@ -1226,7 +1221,7 @@ func hostlink() { if Debug['s'] == 0 && debug_s == 0 && HEADTYPE == obj.Hdarwin { // Skip combining dwarf on arm. - if Thearch.Thechar != '5' && Thearch.Thechar != '7' { + if !SysArch.InFamily(sys.ARM, sys.ARM64) { dsym := filepath.Join(tmpdir, "go.dwarf") if out, err := exec.Command("dsymutil", "-f", outfile, "-o", dsym).CombinedOutput(); err != nil { Ctxt.Cursym = nil @@ -1254,14 +1249,14 @@ func hostlink() { // hostlinkArchArgs returns arguments to pass to the external linker // based on the architecture. func hostlinkArchArgs() []string { - switch Thearch.Thechar { - case '8': + switch SysArch.Family { + case sys.I386: return []string{"-m32"} - case '6', '9', 'z': + case sys.AMD64, sys.PPC64, sys.S390X: return []string{"-m64"} - case '5': + case sys.ARM: return []string{"-marm"} - case '7': + case sys.ARM64: // nothing needed } return nil @@ -1306,10 +1301,10 @@ func ldobj(f *obj.Biobuf, pkg string, length int64, pn string, file string, when if !strings.HasPrefix(line, "go object ") { if strings.HasSuffix(pn, ".go") { - Exitf("%cl: input %s is not .%c file (use %cg to compile .go files)", Thearch.Thechar, pn, Thearch.Thechar, Thearch.Thechar) + Exitf("%cl: input %s is not .%c file (use %cg to compile .go files)", SysArch.Family, pn, SysArch.Family, SysArch.Family) } - if line == Thestring { + if line == SysArch.Name { // old header format: just $GOOS Diag("%s: stale object file", pn) return nil @@ -1500,12 +1495,12 @@ func ldshlibsyms(shlib string) { // the type data. if strings.HasPrefix(lsym.Name, "type.") && !strings.HasPrefix(lsym.Name, "type..") { lsym.P = readelfsymboldata(f, &elfsym) - gcdata_locations[elfsym.Value+2*uint64(Thearch.Ptrsize)+8+1*uint64(Thearch.Ptrsize)] = lsym + gcdata_locations[elfsym.Value+2*uint64(SysArch.PtrSize)+8+1*uint64(SysArch.PtrSize)] = lsym } } } gcdata_addresses := make(map[*LSym]uint64) - if Thearch.Thechar == '7' { + if SysArch.Family == sys.ARM64 { for _, sect := range f.Sections { if sect.Type == elf.SHT_RELA { var rela elf.Rela64 @@ -1565,8 +1560,8 @@ func mywhatsys() { goos = obj.Getgoos() goarch = obj.Getgoarch() - if !strings.HasPrefix(goarch, Thestring) { - log.Fatalf("cannot use %cc with GOARCH=%s", Thearch.Thechar, goarch) + if !strings.HasPrefix(goarch, SysArch.Name) { + log.Fatalf("cannot use %cc with GOARCH=%s", SysArch.Family, goarch) } } @@ -1608,7 +1603,7 @@ func addsection(seg *Segment, name string, rwx int) *Section { sect.Rwx = uint8(rwx) sect.Name = name sect.Seg = seg - sect.Align = int32(Thearch.Ptrsize) // everything is at least pointer-aligned + sect.Align = int32(SysArch.PtrSize) // everything is at least pointer-aligned *l = sect return sect } @@ -1652,7 +1647,7 @@ func callsize() int { if haslinkregister() { return 0 } - return Thearch.Regsize + return SysArch.RegSize } func dostkcheck() { @@ -1986,7 +1981,7 @@ func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) { put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), s.Gotype) // NOTE(ality): acid can't produce a stack trace without .frame symbols - put(nil, ".frame", 'm', int64(s.Locals)+int64(Thearch.Ptrsize), 0, 0, nil) + put(nil, ".frame", 'm', int64(s.Locals)+int64(SysArch.PtrSize), 0, 0, nil) for _, a := range s.Autom { // Emit a or p according to actual offset, even if label is wrong. @@ -1999,7 +1994,7 @@ func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) { if a.Name == obj.A_PARAM { off = a.Aoffset } else { - off = a.Aoffset - int32(Thearch.Ptrsize) + off = a.Aoffset - int32(SysArch.PtrSize) } // FP @@ -2009,8 +2004,8 @@ func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) { } // SP - if off <= int32(-Thearch.Ptrsize) { - put(nil, a.Asym.Name, 'a', -(int64(off) + int64(Thearch.Ptrsize)), 0, 0, a.Gotype) + if off <= int32(-SysArch.PtrSize) { + put(nil, a.Asym.Name, 'a', -(int64(off) + int64(SysArch.PtrSize)), 0, 0, a.Gotype) continue } } diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index 67a855933e..f0811389d2 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -32,8 +32,8 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "debug/elf" - "encoding/binary" "fmt" ) @@ -161,11 +161,9 @@ type Shlib struct { } type Link struct { - Thechar int32 - Thestring string Goarm int32 Headtype int - Arch *LinkArch + Arch *sys.Arch Debugvlog int32 Bso *obj.Biobuf Windows int32 @@ -196,15 +194,15 @@ type Link struct { // on the stack in the function prologue and so always have a pointer between // the hardware stack pointer and the local variable area. func (ctxt *Link) FixedFrameSize() int64 { - switch ctxt.Arch.Thechar { - case '6', '8': + switch ctxt.Arch.Family { + case sys.AMD64, sys.I386: return 0 - case '9': + case sys.PPC64: // PIC code on ppc64le requires 32 bytes of stack, and it's easier to // just use that much stack always on ppc64x. - return int64(4 * ctxt.Arch.Ptrsize) + return int64(4 * ctxt.Arch.PtrSize) default: - return int64(ctxt.Arch.Ptrsize) + return int64(ctxt.Arch.PtrSize) } } @@ -213,15 +211,6 @@ func (l *Link) IncVersion() { l.Hash = append(l.Hash, make(map[string]*LSym)) } -type LinkArch struct { - ByteOrder binary.ByteOrder - Name string - Thechar int - Minlc int - Ptrsize int - Regsize int -} - type Library struct { Objref string Srcref string diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index cafc6b0382..25d48fbf22 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -6,6 +6,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "sort" "strings" ) @@ -131,15 +132,7 @@ var nsortsym int var load_budget int = INITIAL_MACHO_HEADR - 2*1024 func Machoinit() { - switch Thearch.Thechar { - // 64-bit architectures - case '6', '7', '9': - macho64 = true - - // 32-bit architectures - default: - break - } + macho64 = SysArch.RegSize == 8 } func getMachoHdr() *MachoHdr { @@ -356,8 +349,8 @@ func machoshbits(mseg *MachoSeg, sect *Section, segname string) { buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1) var msect *MachoSect - if sect.Rwx&1 == 0 && segname != "__DWARF" && (Thearch.Thechar == '7' || // arm64 - (Thearch.Thechar == '6' && (Buildmode == BuildmodeCShared || Buildmode == BuildmodeCArchive))) { // amd64 + if sect.Rwx&1 == 0 && segname != "__DWARF" && (SysArch.Family == sys.ARM64 || + (SysArch.Family == sys.AMD64 && (Buildmode == BuildmodeCShared || Buildmode == BuildmodeCArchive))) { // Darwin external linker on arm64 and on amd64 in c-shared/c-archive buildmode // complains about absolute relocs in __TEXT, so if the section is not // executable, put it in __DATA segment. @@ -422,23 +415,23 @@ func Asmbmacho() { va := INITTEXT - int64(HEADR) mh := getMachoHdr() - switch Thearch.Thechar { + switch SysArch.Family { default: - Exitf("unknown macho architecture: %v", Thearch.Thechar) + Exitf("unknown macho architecture: %v", SysArch.Family) - case '5': + case sys.ARM: mh.cpu = MACHO_CPU_ARM mh.subcpu = MACHO_SUBCPU_ARMV7 - case '6': + case sys.AMD64: mh.cpu = MACHO_CPU_AMD64 mh.subcpu = MACHO_SUBCPU_X86 - case '7': + case sys.ARM64: mh.cpu = MACHO_CPU_ARM64 mh.subcpu = MACHO_SUBCPU_ARM64_ALL - case '8': + case sys.I386: mh.cpu = MACHO_CPU_386 mh.subcpu = MACHO_SUBCPU_X86 } @@ -449,7 +442,7 @@ func Asmbmacho() { ms = newMachoSeg("", 40) ms.fileoffset = Segtext.Fileoff - if Thearch.Thechar == '5' || Buildmode == BuildmodeCArchive { + if SysArch.Family == sys.ARM || Buildmode == BuildmodeCArchive { ms.filesize = Segdata.Fileoff + Segdata.Filelen - Segtext.Fileoff } else { ms.filesize = Segdwarf.Fileoff + Segdwarf.Filelen - Segtext.Fileoff @@ -511,31 +504,31 @@ func Asmbmacho() { } if Linkmode != LinkExternal { - switch Thearch.Thechar { + switch SysArch.Family { default: - Exitf("unknown macho architecture: %v", Thearch.Thechar) + Exitf("unknown macho architecture: %v", SysArch.Family) - case '5': + case sys.ARM: ml := newMachoLoad(5, 17+2) /* unix thread */ ml.data[0] = 1 /* thread type */ ml.data[1] = 17 /* word count */ ml.data[2+15] = uint32(Entryvalue()) /* start pc */ - case '6': + case sys.AMD64: ml := newMachoLoad(5, 42+2) /* unix thread */ ml.data[0] = 4 /* thread type */ ml.data[1] = 42 /* word count */ ml.data[2+32] = uint32(Entryvalue()) /* start pc */ ml.data[2+32+1] = uint32(Entryvalue() >> 32) - case '7': + case sys.ARM64: ml := newMachoLoad(5, 68+2) /* unix thread */ ml.data[0] = 6 /* thread type */ ml.data[1] = 68 /* word count */ ml.data[2+64] = uint32(Entryvalue()) /* start pc */ ml.data[2+64+1] = uint32(Entryvalue() >> 32) - case '8': + case sys.I386: ml := newMachoLoad(5, 16+2) /* unix thread */ ml.data[0] = 1 /* thread type */ ml.data[1] = 16 /* word count */ @@ -546,7 +539,6 @@ func Asmbmacho() { if Debug['d'] == 0 { // must match domacholink below s1 := Linklookup(Ctxt, ".machosymtab", 0) - s2 := Linklookup(Ctxt, ".linkedit.plt", 0) s3 := Linklookup(Ctxt, ".linkedit.got", 0) s4 := Linklookup(Ctxt, ".machosymstr", 0) @@ -729,7 +721,7 @@ func machosymtab() { Adduint8(Ctxt, symtab, 0x01) // type N_EXT, external symbol Adduint8(Ctxt, symtab, 0) // no section Adduint16(Ctxt, symtab, 0) // desc - adduintxx(Ctxt, symtab, 0, Thearch.Ptrsize) // no value + adduintxx(Ctxt, symtab, 0, SysArch.PtrSize) // no value } else { if s.Attr.CgoExport() { Adduint8(Ctxt, symtab, 0x0f) @@ -747,7 +739,7 @@ func machosymtab() { Adduint8(Ctxt, symtab, uint8(o.Sect.Extnum)) } Adduint16(Ctxt, symtab, 0) // desc - adduintxx(Ctxt, symtab, uint64(Symaddr(s)), Thearch.Ptrsize) + adduintxx(Ctxt, symtab, uint64(Symaddr(s)), SysArch.PtrSize) } } } diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index ff29ce2d70..471dda712f 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -93,7 +93,7 @@ func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) { it.value = -1 it.start = 1 it.done = 0 - it.pcscale = uint32(ctxt.Arch.Minlc) + it.pcscale = uint32(ctxt.Arch.MinLC) pciternext(it) } @@ -242,12 +242,12 @@ func pclntab() { } pclntabNfunc = nfunc - Symgrow(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize)+int64(Thearch.Ptrsize)+4) + Symgrow(Ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize)+4) setuint32(Ctxt, ftab, 0, 0xfffffffb) - setuint8(Ctxt, ftab, 6, uint8(Thearch.Minlc)) - setuint8(Ctxt, ftab, 7, uint8(Thearch.Ptrsize)) - setuintxx(Ctxt, ftab, 8, uint64(nfunc), int64(Thearch.Ptrsize)) - pclntabPclntabOffset = int32(8 + Thearch.Ptrsize) + setuint8(Ctxt, ftab, 6, uint8(SysArch.MinLC)) + setuint8(Ctxt, ftab, 7, uint8(SysArch.PtrSize)) + setuintxx(Ctxt, ftab, 8, uint64(nfunc), int64(SysArch.PtrSize)) + pclntabPclntabOffset = int32(8 + SysArch.PtrSize) nfunc = 0 var last *LSym @@ -272,16 +272,16 @@ func pclntab() { } funcstart = int32(len(ftab.P)) - funcstart += int32(-len(ftab.P)) & (int32(Thearch.Ptrsize) - 1) + funcstart += int32(-len(ftab.P)) & (int32(SysArch.PtrSize) - 1) - setaddr(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize), Ctxt.Cursym) - setuintxx(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize)+int64(Thearch.Ptrsize), uint64(funcstart), int64(Thearch.Ptrsize)) + setaddr(Ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize), Ctxt.Cursym) + setuintxx(Ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize), uint64(funcstart), int64(SysArch.PtrSize)) // fixed size of struct, checked below off = funcstart - end = funcstart + int32(Thearch.Ptrsize) + 3*4 + 5*4 + int32(len(pcln.Pcdata))*4 + int32(len(pcln.Funcdata))*int32(Thearch.Ptrsize) - if len(pcln.Funcdata) > 0 && (end&int32(Thearch.Ptrsize-1) != 0) { + end = funcstart + int32(SysArch.PtrSize) + 3*4 + 5*4 + int32(len(pcln.Pcdata))*4 + int32(len(pcln.Funcdata))*int32(SysArch.PtrSize) + if len(pcln.Funcdata) > 0 && (end&int32(SysArch.PtrSize-1) != 0) { end += 4 } Symgrow(Ctxt, ftab, int64(end)) @@ -330,25 +330,25 @@ func pclntab() { // funcdata, must be pointer-aligned and we're only int32-aligned. // Missing funcdata will be 0 (nil pointer). if len(pcln.Funcdata) > 0 { - if off&int32(Thearch.Ptrsize-1) != 0 { + if off&int32(SysArch.PtrSize-1) != 0 { off += 4 } for i = 0; i < int32(len(pcln.Funcdata)); i++ { if pcln.Funcdata[i] == nil { - setuintxx(Ctxt, ftab, int64(off)+int64(Thearch.Ptrsize)*int64(i), uint64(pcln.Funcdataoff[i]), int64(Thearch.Ptrsize)) + setuintxx(Ctxt, ftab, int64(off)+int64(SysArch.PtrSize)*int64(i), uint64(pcln.Funcdataoff[i]), int64(SysArch.PtrSize)) } else { // TODO: Dedup. funcdata_bytes += pcln.Funcdata[i].Size - setaddrplus(Ctxt, ftab, int64(off)+int64(Thearch.Ptrsize)*int64(i), pcln.Funcdata[i], pcln.Funcdataoff[i]) + setaddrplus(Ctxt, ftab, int64(off)+int64(SysArch.PtrSize)*int64(i), pcln.Funcdata[i], pcln.Funcdataoff[i]) } } - off += int32(len(pcln.Funcdata)) * int32(Thearch.Ptrsize) + off += int32(len(pcln.Funcdata)) * int32(SysArch.PtrSize) } if off != end { - Diag("bad math in functab: funcstart=%d off=%d but end=%d (npcdata=%d nfuncdata=%d ptrsize=%d)", funcstart, off, end, len(pcln.Pcdata), len(pcln.Funcdata), Thearch.Ptrsize) + Diag("bad math in functab: funcstart=%d off=%d but end=%d (npcdata=%d nfuncdata=%d ptrsize=%d)", funcstart, off, end, len(pcln.Pcdata), len(pcln.Funcdata), SysArch.PtrSize) errorexit() } @@ -357,14 +357,14 @@ func pclntab() { pclntabLastFunc = last // Final entry of table is just end pc. - setaddrplus(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize), last, last.Size) + setaddrplus(Ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize), last, last.Size) // Start file table. start := int32(len(ftab.P)) - start += int32(-len(ftab.P)) & (int32(Thearch.Ptrsize) - 1) + start += int32(-len(ftab.P)) & (int32(SysArch.PtrSize) - 1) pclntabFiletabOffset = start - setuint32(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize)+int64(Thearch.Ptrsize), uint32(start)) + setuint32(Ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize), uint32(start)) Symgrow(Ctxt, ftab, int64(start)+(int64(Ctxt.Nhistfile)+1)*4) setuint32(Ctxt, ftab, int64(start), uint32(Ctxt.Nhistfile)) diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 56698361d0..0204b8c8c2 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -6,6 +6,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "encoding/binary" "fmt" "os" @@ -419,9 +420,9 @@ func chksectseg(h *IMAGE_SECTION_HEADER, s *Segment) { func Peinit() { var l int - switch Thearch.Thechar { + switch SysArch.Family { // 64-bit architectures - case '6': + case sys.AMD64: pe64 = 1 l = binary.Size(&oh64) @@ -506,7 +507,7 @@ func initdynimport() *Dll { if err != nil { Diag("failed to parse stdcall decoration: %v", err) } - m.argsize *= Thearch.Ptrsize + m.argsize *= SysArch.PtrSize s.Extname = s.Extname[:i] } @@ -520,10 +521,10 @@ func initdynimport() *Dll { for d := dr; d != nil; d = d.next { for m = d.ms; m != nil; m = m.next { m.s.Type = obj.SDATA - Symgrow(Ctxt, m.s, int64(Thearch.Ptrsize)) + Symgrow(Ctxt, m.s, int64(SysArch.PtrSize)) dynName := m.s.Extname // only windows/386 requires stdcall decoration - if Thearch.Thechar == '8' && m.argsize >= 0 { + if SysArch.Family == sys.I386 && m.argsize >= 0 { dynName += fmt.Sprintf("@%d", m.argsize) } dynSym := Linklookup(Ctxt, dynName, 0) @@ -532,7 +533,7 @@ func initdynimport() *Dll { r := Addrel(m.s) r.Sym = dynSym r.Off = 0 - r.Siz = uint8(Thearch.Ptrsize) + r.Siz = uint8(SysArch.PtrSize) r.Type = obj.R_ADDR } } @@ -546,10 +547,10 @@ func initdynimport() *Dll { m.s.Sub = dynamic.Sub dynamic.Sub = m.s m.s.Value = dynamic.Size - dynamic.Size += int64(Thearch.Ptrsize) + dynamic.Size += int64(SysArch.PtrSize) } - dynamic.Size += int64(Thearch.Ptrsize) + dynamic.Size += int64(SysArch.PtrSize) } } @@ -946,7 +947,7 @@ func writePESymTableRecords() int { } // only windows/386 requires underscore prefix on external symbols - if Thearch.Thechar == '8' && + if SysArch.Family == sys.I386 && Linkmode == LinkExternal && (s.Type != obj.SDYNIMPORT || s.Attr.CgoExport()) && s.Name == s.Extname && @@ -1002,7 +1003,7 @@ func writePESymTableRecords() int { for d := dr; d != nil; d = d.next { for m := d.ms; m != nil; m = m.next { s := m.s.R[0].Xsym - put(s, s.Name, 'U', 0, int64(Thearch.Ptrsize), 0, nil) + put(s, s.Name, 'U', 0, int64(SysArch.PtrSize), 0, nil) } } @@ -1129,12 +1130,12 @@ func addinitarray() (c *IMAGE_SECTION_HEADER) { } func Asmbpe() { - switch Thearch.Thechar { + switch SysArch.Family { default: - Exitf("unknown PE architecture: %v", Thearch.Thechar) - case '6': + Exitf("unknown PE architecture: %v", SysArch.Family) + case sys.AMD64: fh.Machine = IMAGE_FILE_MACHINE_AMD64 - case '8': + case sys.I386: fh.Machine = IMAGE_FILE_MACHINE_I386 } diff --git a/src/cmd/link/internal/ld/pobj.go b/src/cmd/link/internal/ld/pobj.go index f48b54efda..b9902a5e5e 100644 --- a/src/cmd/link/internal/ld/pobj.go +++ b/src/cmd/link/internal/ld/pobj.go @@ -32,6 +32,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "flag" "fmt" "os" @@ -44,9 +45,7 @@ var ( ) func Ldmain() { - Ctxt = linknew(Thelinkarch) - Ctxt.Thechar = int32(Thearch.Thechar) - Ctxt.Thestring = Thestring + Ctxt = linknew(SysArch) Ctxt.Diag = Diag Ctxt.Bso = &Bso @@ -70,7 +69,7 @@ func Ldmain() { } } - if Thearch.Thechar == '6' && obj.Getgoos() == "plan9" { + if SysArch.Family == sys.AMD64 && obj.Getgoos() == "plan9" { obj.Flagcount("8", "use 64-bit addresses in symbol table", &Debug['8']) } obj.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF", addbuildinfo) @@ -107,7 +106,7 @@ func Ldmain() { obj.Flagcount("race", "enable race detector", &flag_race) obj.Flagcount("s", "disable symbol table", &Debug['s']) var flagShared int - if Thearch.Thechar == '5' || Thearch.Thechar == '6' { + if SysArch.InFamily(sys.ARM, sys.AMD64) { obj.Flagcount("shared", "generate shared object (implies -linkmode external)", &flagShared) } obj.Flagstr("tmpdir", "use `directory` for temporary files", &tmpdir) diff --git a/src/cmd/link/internal/ld/sym.go b/src/cmd/link/internal/ld/sym.go index 3deb94644e..76fe7dab79 100644 --- a/src/cmd/link/internal/ld/sym.go +++ b/src/cmd/link/internal/ld/sym.go @@ -33,6 +33,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "log" "strconv" ) @@ -55,7 +56,7 @@ var headers = []struct { {"windowsgui", obj.Hwindows}, } -func linknew(arch *LinkArch) *Link { +func linknew(arch *sys.Arch) *Link { ctxt := &Link{ Hash: []map[string]*LSym{ // preallocate about 2mb for hash of @@ -98,33 +99,33 @@ func linknew(arch *LinkArch) *Link { obj.Hdragonfly, obj.Hsolaris: if obj.Getgoos() == "android" { - switch ctxt.Arch.Thechar { - case '6': + switch ctxt.Arch.Family { + case sys.AMD64: // Android/amd64 constant - offset from 0(FS) to our TLS slot. // Explained in src/runtime/cgo/gcc_android_*.c ctxt.Tlsoffset = 0x1d0 - case '8': + case sys.I386: // Android/386 constant - offset from 0(GS) to our TLS slot. ctxt.Tlsoffset = 0xf8 default: - ctxt.Tlsoffset = -1 * ctxt.Arch.Ptrsize + ctxt.Tlsoffset = -1 * ctxt.Arch.PtrSize } } else { - ctxt.Tlsoffset = -1 * ctxt.Arch.Ptrsize + ctxt.Tlsoffset = -1 * ctxt.Arch.PtrSize } case obj.Hnacl: - switch ctxt.Arch.Thechar { + switch ctxt.Arch.Family { default: log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name) - case '5': + case sys.ARM: ctxt.Tlsoffset = 0 - case '6': + case sys.AMD64: ctxt.Tlsoffset = 0 - case '8': + case sys.I386: ctxt.Tlsoffset = -8 } @@ -133,26 +134,26 @@ func linknew(arch *LinkArch) *Link { * Explained in src/runtime/cgo/gcc_darwin_*.c. */ case obj.Hdarwin: - switch ctxt.Arch.Thechar { + switch ctxt.Arch.Family { default: log.Fatalf("unknown thread-local storage offset for darwin/%s", ctxt.Arch.Name) - case '5': + case sys.ARM: ctxt.Tlsoffset = 0 // dummy value, not needed - case '6': + case sys.AMD64: ctxt.Tlsoffset = 0x8a0 - case '7': + case sys.ARM64: ctxt.Tlsoffset = 0 // dummy value, not needed - case '8': + case sys.I386: ctxt.Tlsoffset = 0x468 } } // On arm, record goarm. - if ctxt.Arch.Thechar == '5' { + if ctxt.Arch.Family == sys.ARM { ctxt.Goarm = obj.Getgoarm() } diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 167176cc2d..ecd5c741bb 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -32,6 +32,7 @@ package ld import ( "cmd/internal/obj" + "cmd/internal/sys" "fmt" "path/filepath" "strings" @@ -160,7 +161,7 @@ func putelfsym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *L if x.Type&obj.SHIDDEN != 0 { other = STV_HIDDEN } - if (Buildmode == BuildmodePIE || DynlinkingGo()) && Thearch.Thechar == '9' && type_ == STT_FUNC && x.Name != "runtime.duffzero" && x.Name != "runtime.duffcopy" { + if (Buildmode == BuildmodePIE || DynlinkingGo()) && SysArch.Family == sys.PPC64 && type_ == STT_FUNC && x.Name != "runtime.duffzero" && x.Name != "runtime.duffcopy" { // On ppc64 the top three bits of the st_other field indicate how // many instructions separate the global and local entry points. In // our case it is two instructions, indicated by the value 3. @@ -229,7 +230,7 @@ func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ 'Z', 'm': l := 4 - if HEADTYPE == obj.Hplan9 && Thearch.Thechar == '6' && Debug['8'] == 0 { + if HEADTYPE == obj.Hplan9 && SysArch.Family == sys.AMD64 && Debug['8'] == 0 { Lputb(uint32(addr >> 32)) l = 8 } diff --git a/src/cmd/link/internal/mips64/asm.go b/src/cmd/link/internal/mips64/asm.go index d0977e9b00..9a145e373a 100644 --- a/src/cmd/link/internal/mips64/asm.go +++ b/src/cmd/link/internal/mips64/asm.go @@ -32,6 +32,7 @@ package mips64 import ( "cmd/internal/obj" + "cmd/internal/sys" "cmd/link/internal/ld" "encoding/binary" "fmt" @@ -82,8 +83,8 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { // the first instruction is always at the lower address, this is endian neutral; // but note that o1 and o2 should still use the target endian. - o1 := ld.Thelinkarch.ByteOrder.Uint32(s.P[r.Off:]) - o2 := ld.Thelinkarch.ByteOrder.Uint32(s.P[r.Off+4:]) + o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) + o2 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off+4:]) o1 = o1&0xffff0000 | uint32(t>>16)&0xffff o2 = o2&0xffff0000 | uint32(t)&0xffff @@ -99,7 +100,7 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { obj.R_JMPMIPS: // Low 26 bits = (S + A) >> 2 t := ld.Symaddr(r.Sym) + r.Add - o1 := ld.Thelinkarch.ByteOrder.Uint32(s.P[r.Off:]) + o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) *val = int64(o1&0xfc000000 | uint32(t>>2)&^0xfc000000) return 0 } @@ -214,7 +215,7 @@ func asmb() { default: case obj.Hplan9: /* plan 9 */ magic := uint32(4*18*18 + 7) - if ld.Thestring == "mips64le" { + if ld.SysArch == sys.ArchMIPS64LE { magic = uint32(4*26*26 + 7) } ld.Thearch.Lput(uint32(magic)) /* magic */ diff --git a/src/cmd/link/internal/mips64/l.go b/src/cmd/link/internal/mips64/l.go index 003ee5ce71..f4191e69ab 100644 --- a/src/cmd/link/internal/mips64/l.go +++ b/src/cmd/link/internal/mips64/l.go @@ -62,11 +62,9 @@ package mips64 // THE SOFTWARE. const ( - thechar = '0' MaxAlign = 32 // max data alignment MinAlign = 1 // min data alignment FuncAlign = 8 - MINLC = 4 ) /* Used by ../internal/ld/dwarf.go */ diff --git a/src/cmd/link/internal/mips64/obj.go b/src/cmd/link/internal/mips64/obj.go index 57a1b2ab14..87bb3a079b 100644 --- a/src/cmd/link/internal/mips64/obj.go +++ b/src/cmd/link/internal/mips64/obj.go @@ -32,6 +32,7 @@ package mips64 import ( "cmd/internal/obj" + "cmd/internal/sys" "cmd/link/internal/ld" "fmt" "log" @@ -45,21 +46,15 @@ func Main() { } func linkarchinit() { - ld.Thestring = obj.Getgoarch() - if ld.Thestring == "mips64le" { - ld.Thelinkarch = &ld.Linkmips64le + if obj.Getgoarch() == "mips64le" { + ld.SysArch = sys.ArchMIPS64LE } else { - ld.Thelinkarch = &ld.Linkmips64 + ld.SysArch = sys.ArchMIPS64 } - ld.Thearch.Thechar = thechar - ld.Thearch.Ptrsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Intsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Regsize = ld.Thelinkarch.Regsize ld.Thearch.Funcalign = FuncAlign ld.Thearch.Maxalign = MaxAlign ld.Thearch.Minalign = MinAlign - ld.Thearch.Minlc = MINLC ld.Thearch.Dwarfregsp = DWARFREGSP ld.Thearch.Dwarfreglr = DWARFREGLR @@ -72,7 +67,7 @@ func linkarchinit() { ld.Thearch.Elfsetupplt = elfsetupplt ld.Thearch.Gentext = gentext ld.Thearch.Machoreloc1 = machoreloc1 - if ld.Thelinkarch == &ld.Linkmips64le { + if ld.SysArch == sys.ArchMIPS64LE { ld.Thearch.Lput = ld.Lputl ld.Thearch.Wput = ld.Wputl ld.Thearch.Vput = ld.Vputl diff --git a/src/cmd/link/internal/ppc64/l.go b/src/cmd/link/internal/ppc64/l.go index 622d6bb12e..a720993fbc 100644 --- a/src/cmd/link/internal/ppc64/l.go +++ b/src/cmd/link/internal/ppc64/l.go @@ -62,11 +62,9 @@ package ppc64 // THE SOFTWARE. const ( - thechar = '9' MaxAlign = 32 // max data alignment MinAlign = 1 // min data alignment FuncAlign = 8 - MINLC = 4 ) /* Used by ../internal/ld/dwarf.go */ diff --git a/src/cmd/link/internal/ppc64/obj.go b/src/cmd/link/internal/ppc64/obj.go index 539ab1ac02..a540ab85b5 100644 --- a/src/cmd/link/internal/ppc64/obj.go +++ b/src/cmd/link/internal/ppc64/obj.go @@ -32,6 +32,7 @@ package ppc64 import ( "cmd/internal/obj" + "cmd/internal/sys" "cmd/link/internal/ld" "fmt" "log" @@ -45,21 +46,15 @@ func Main() { } func linkarchinit() { - ld.Thestring = obj.Getgoarch() - if ld.Thestring == "ppc64le" { - ld.Thelinkarch = &ld.Linkppc64le + if obj.Getgoarch() == "ppc64le" { + ld.SysArch = sys.ArchPPC64LE } else { - ld.Thelinkarch = &ld.Linkppc64 + ld.SysArch = sys.ArchPPC64 } - ld.Thearch.Thechar = thechar - ld.Thearch.Ptrsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Intsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Regsize = ld.Thelinkarch.Regsize ld.Thearch.Funcalign = FuncAlign ld.Thearch.Maxalign = MaxAlign ld.Thearch.Minalign = MinAlign - ld.Thearch.Minlc = MINLC ld.Thearch.Dwarfregsp = DWARFREGSP ld.Thearch.Dwarfreglr = DWARFREGLR @@ -72,7 +67,7 @@ func linkarchinit() { ld.Thearch.Elfsetupplt = elfsetupplt ld.Thearch.Gentext = gentext ld.Thearch.Machoreloc1 = machoreloc1 - if ld.Thelinkarch == &ld.Linkppc64le { + if ld.SysArch == sys.ArchPPC64LE { ld.Thearch.Lput = ld.Lputl ld.Thearch.Wput = ld.Wputl ld.Thearch.Vput = ld.Vputl @@ -150,7 +145,7 @@ func archinit() { } case obj.Hlinux: /* ppc64 elf */ - if ld.Thestring == "ppc64" { + if ld.SysArch == sys.ArchPPC64 { ld.Debug['d'] = 1 // TODO(austin): ELF ABI v1 not supported yet } ld.Elfinit() diff --git a/src/cmd/link/internal/s390x/l.go b/src/cmd/link/internal/s390x/l.go index 839a9849c8..42cf15ee85 100644 --- a/src/cmd/link/internal/s390x/l.go +++ b/src/cmd/link/internal/s390x/l.go @@ -62,14 +62,9 @@ package s390x // THE SOFTWARE. const ( - thechar = 'z' - PtrSize = 8 - IntSize = 8 - RegSize = 8 MaxAlign = 32 // max data alignment MinAlign = 2 // min data alignment FuncAlign = 16 - MINLC = 2 ) /* Used by ../internal/ld/dwarf.go */ diff --git a/src/cmd/link/internal/s390x/obj.go b/src/cmd/link/internal/s390x/obj.go index ef88d22bbd..fdb9898181 100644 --- a/src/cmd/link/internal/s390x/obj.go +++ b/src/cmd/link/internal/s390x/obj.go @@ -32,6 +32,7 @@ package s390x import ( "cmd/internal/obj" + "cmd/internal/sys" "cmd/link/internal/ld" "fmt" ) @@ -44,17 +45,11 @@ func Main() { } func linkarchinit() { - ld.Thestring = obj.Getgoarch() - ld.Thelinkarch = &ld.Links390x + ld.SysArch = sys.ArchS390X - ld.Thearch.Thechar = thechar - ld.Thearch.Ptrsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Intsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Regsize = ld.Thelinkarch.Regsize ld.Thearch.Funcalign = FuncAlign ld.Thearch.Maxalign = MaxAlign ld.Thearch.Minalign = MinAlign - ld.Thearch.Minlc = MINLC ld.Thearch.Dwarfregsp = DWARFREGSP ld.Thearch.Dwarfreglr = DWARFREGLR diff --git a/src/cmd/link/internal/x86/asm.go b/src/cmd/link/internal/x86/asm.go index 7da5dd02be..91251de15e 100644 --- a/src/cmd/link/internal/x86/asm.go +++ b/src/cmd/link/internal/x86/asm.go @@ -292,7 +292,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) { return } - if ld.HEADTYPE == obj.Hdarwin && s.Size == PtrSize && r.Off == 0 { + if ld.HEADTYPE == obj.Hdarwin && s.Size == int64(ld.SysArch.PtrSize) && r.Off == 0 { // Mach-O relocations are a royal pain to lay out. // They use a compact stateful bytecode representation // that is too much bother to deal with. @@ -317,7 +317,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) { return } - if ld.HEADTYPE == obj.Hwindows && s.Size == PtrSize { + if ld.HEADTYPE == obj.Hwindows && s.Size == int64(ld.SysArch.PtrSize) { // nothing to do, the relocation will be laid out in pereloc1 return } diff --git a/src/cmd/link/internal/x86/l.go b/src/cmd/link/internal/x86/l.go index 068fed9c8d..2043f9bb4e 100644 --- a/src/cmd/link/internal/x86/l.go +++ b/src/cmd/link/internal/x86/l.go @@ -31,12 +31,9 @@ package x86 const ( - thechar = '8' - PtrSize = 4 MaxAlign = 32 // max data alignment MinAlign = 1 // min data alignment FuncAlign = 16 - MINLC = 1 ) /* Used by ../internal/ld/dwarf.go */ diff --git a/src/cmd/link/internal/x86/obj.go b/src/cmd/link/internal/x86/obj.go index 4380c41ebb..574c0dad2d 100644 --- a/src/cmd/link/internal/x86/obj.go +++ b/src/cmd/link/internal/x86/obj.go @@ -32,6 +32,7 @@ package x86 import ( "cmd/internal/obj" + "cmd/internal/sys" "cmd/link/internal/ld" "fmt" "log" @@ -45,17 +46,11 @@ func Main() { } func linkarchinit() { - ld.Thestring = "386" - ld.Thelinkarch = &ld.Link386 + ld.SysArch = sys.Arch386 - ld.Thearch.Thechar = thechar - ld.Thearch.Ptrsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Intsize = ld.Thelinkarch.Ptrsize - ld.Thearch.Regsize = ld.Thelinkarch.Regsize ld.Thearch.Funcalign = FuncAlign ld.Thearch.Maxalign = MaxAlign ld.Thearch.Minalign = MinAlign - ld.Thearch.Minlc = MINLC ld.Thearch.Dwarfregsp = DWARFREGSP ld.Thearch.Dwarfreglr = DWARFREGLR -- cgit v1.3 From d481ffc1afeae8852caa3452a0e23b1cd90d1e10 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 6 Apr 2016 18:54:17 -0700 Subject: cmd/compile, cmd/link: eliminate uses of ArchFamily in error messages Two of these error messages are already dead code: cmd/compile.main and cmd/link.main already switch on $GOARCH, ensuring it must be a prefix of the sys.Arch.Family. The error message about uncompiled Go source files can be just be simplified: anyone who's manually constructing Go object file archives probably knows what tool to use to compile Go source files. Change-Id: Ia4a67c0a1d1158379c127c91e909226d3367f3c2 Reviewed-on: https://go-review.googlesource.com/21626 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/main.go | 9 +-------- src/cmd/link/internal/ld/lib.go | 7 ++----- 2 files changed, 3 insertions(+), 13 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 72e6478afe..079f4916c7 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -93,14 +93,7 @@ func doversion() { func Main() { defer hidePanic() - // Allow GOARCH=thearch.thestring or GOARCH=thearch.thestringsuffix, - // but not other values. - p := obj.Getgoarch() - - if !strings.HasPrefix(p, Thearch.LinkArch.Name) { - log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.LinkArch.Family, p) - } - goarch = p + goarch = obj.Getgoarch() Ctxt = obj.Linknew(Thearch.LinkArch) Ctxt.DiagFunc = Yyerror diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 3e0bd8ebc4..305a3bc0db 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -1301,7 +1301,8 @@ func ldobj(f *obj.Biobuf, pkg string, length int64, pn string, file string, when if !strings.HasPrefix(line, "go object ") { if strings.HasSuffix(pn, ".go") { - Exitf("%cl: input %s is not .%c file (use %cg to compile .go files)", SysArch.Family, pn, SysArch.Family, SysArch.Family) + Exitf("%s: uncompiled .go source file", pn) + return nil } if line == SysArch.Name { @@ -1559,10 +1560,6 @@ func mywhatsys() { goroot = obj.Getgoroot() goos = obj.Getgoos() goarch = obj.Getgoarch() - - if !strings.HasPrefix(goarch, SysArch.Name) { - log.Fatalf("cannot use %cc with GOARCH=%s", SysArch.Family, goarch) - } } // Copied from ../gc/subr.c:/^pathtoprefix; must stay in sync. -- cgit v1.3 From 4b7e36cdfe8e0c3579a2503a81474fe43db4db69 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 6 Apr 2016 21:45:29 -0700 Subject: cmd: extract obj's Biobuf code into new bio package API could still be made more Go-ey. Updates #15165. Change-Id: I514ffceffa43c293ae5d7e5f1e9193fda0098865 Reviewed-on: https://go-review.googlesource.com/21644 Reviewed-by: Brad Fitzpatrick Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/asm/internal/asm/endtoend_test.go | 5 +- src/cmd/asm/main.go | 5 +- src/cmd/compile/internal/gc/bexport.go | 10 +- src/cmd/compile/internal/gc/export.go | 8 +- src/cmd/compile/internal/gc/go.go | 5 +- src/cmd/compile/internal/gc/main.go | 5 +- src/cmd/compile/internal/gc/obj.go | 31 +++--- src/cmd/dist/buildtool.go | 1 + src/cmd/internal/bio/buf.go | 150 ++++++++++++++++++++++++++++++ src/cmd/internal/obj/link.go | 7 +- src/cmd/internal/obj/objfile.go | 9 +- src/cmd/internal/obj/util.go | 140 ---------------------------- src/cmd/link/internal/ld/ar.go | 15 +-- src/cmd/link/internal/ld/go.go | 5 +- src/cmd/link/internal/ld/ldelf.go | 13 +-- src/cmd/link/internal/ld/ldmacho.go | 23 ++--- src/cmd/link/internal/ld/ldpe.go | 27 +++--- src/cmd/link/internal/ld/lib.go | 83 ++++++++--------- src/cmd/link/internal/ld/link.go | 4 +- src/cmd/link/internal/ld/objfile.go | 9 +- src/cmd/link/internal/ld/pobj.go | 3 +- 21 files changed, 291 insertions(+), 267 deletions(-) create mode 100644 src/cmd/internal/bio/buf.go (limited to 'src/cmd/compile') diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go index 1307c4243f..8986281f10 100644 --- a/src/cmd/asm/internal/asm/endtoend_test.go +++ b/src/cmd/asm/internal/asm/endtoend_test.go @@ -17,6 +17,7 @@ import ( "testing" "cmd/asm/internal/lex" + "cmd/internal/bio" "cmd/internal/obj" ) @@ -33,7 +34,7 @@ func testEndToEnd(t *testing.T, goarch, file string) { pList := obj.Linknewplist(ctxt) var ok bool testOut = new(bytes.Buffer) // The assembler writes test output to this buffer. - ctxt.Bso = obj.Binitw(os.Stdout) + ctxt.Bso = bio.BufWriter(os.Stdout) defer ctxt.Bso.Flush() failed := false ctxt.DiagFunc = func(format string, args ...interface{}) { @@ -271,7 +272,7 @@ func testErrors(t *testing.T, goarch, file string) { pList := obj.Linknewplist(ctxt) var ok bool testOut = new(bytes.Buffer) // The assembler writes test output to this buffer. - ctxt.Bso = obj.Binitw(os.Stdout) + ctxt.Bso = bio.BufWriter(os.Stdout) defer ctxt.Bso.Flush() failed := false var errBuf bytes.Buffer diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go index 4e450bec98..75cb8f75d3 100644 --- a/src/cmd/asm/main.go +++ b/src/cmd/asm/main.go @@ -15,6 +15,7 @@ import ( "cmd/asm/internal/flags" "cmd/asm/internal/lex" + "cmd/internal/bio" "cmd/internal/obj" ) @@ -45,9 +46,9 @@ func main() { if *flags.Shared || *flags.Dynlink { ctxt.Flag_shared = 1 } - ctxt.Bso = obj.Binitw(os.Stdout) + ctxt.Bso = bio.BufWriter(os.Stdout) defer ctxt.Bso.Flush() - output := obj.Binitw(fd) + output := bio.BufWriter(fd) fmt.Fprintf(output, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion()) fmt.Fprintf(output, "!\n") diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 092cdac2f6..702090280f 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -92,7 +92,7 @@ package gc import ( "bytes" "cmd/compile/internal/big" - "cmd/internal/obj" + "cmd/internal/bio" "encoding/binary" "fmt" "sort" @@ -124,7 +124,7 @@ const exportVersion = "v0" const exportInlined = true // default: true type exporter struct { - out *obj.Biobuf + out *bio.Buf pkgIndex map[*Pkg]int typIndex map[*Type]int inlined []*Func @@ -136,7 +136,7 @@ type exporter struct { } // Export writes the exportlist for localpkg to out and returns the number of bytes written. -func Export(out *obj.Biobuf, trace bool) int { +func Export(out *bio.Buf, trace bool) int { p := exporter{ out: out, pkgIndex: make(map[*Pkg]int), @@ -1531,10 +1531,10 @@ func (p *exporter) byte(b byte) { fallthrough case '|': // write '|' as '|' '|' - obj.Bputc(p.out, '|') + p.out.WriteByte('|') p.written++ } - obj.Bputc(p.out, b) + p.out.WriteByte(b) p.written++ } diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 17311cf6af..5d4add8ff4 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -7,7 +7,7 @@ package gc import ( "bufio" "bytes" - "cmd/internal/obj" + "cmd/internal/bio" "fmt" "sort" "unicode" @@ -384,7 +384,7 @@ func dumpexport() { if debugFormat { // save a copy of the export data var copy bytes.Buffer - bcopy := obj.Binitw(©) + bcopy := bio.BufWriter(©) size = Export(bcopy, Debug_export != 0) bcopy.Flush() // flushing to bytes.Buffer cannot fail if n, err := bout.Write(copy.Bytes()); n != size || err != nil { @@ -577,7 +577,7 @@ func importtype(pt *Type, t *Type) { } func dumpasmhdr() { - b, err := obj.Bopenw(asmhdr) + b, err := bio.Create(asmhdr) if err != nil { Fatalf("%v", err) } @@ -604,5 +604,5 @@ func dumpasmhdr() { } } - obj.Bterm(b) + b.Close() } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index ef8b516ea5..cd9db38fb4 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -6,6 +6,7 @@ package gc import ( "cmd/compile/internal/ssa" + "cmd/internal/bio" "cmd/internal/obj" ) @@ -132,7 +133,7 @@ var infile string var outfile string -var bout *obj.Biobuf +var bout *bio.Buf var nerrors int @@ -287,7 +288,7 @@ var Ctxt *obj.Link var writearchive int -var bstdout obj.Biobuf +var bstdout *bio.Buf var Nacl bool diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 079f4916c7..c8a778c34a 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -9,6 +9,7 @@ package gc import ( "bufio" "cmd/compile/internal/ssa" + "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/sys" "flag" @@ -97,8 +98,8 @@ func Main() { Ctxt = obj.Linknew(Thearch.LinkArch) Ctxt.DiagFunc = Yyerror - Ctxt.Bso = &bstdout - bstdout = *obj.Binitw(os.Stdout) + bstdout = bio.BufWriter(os.Stdout) + Ctxt.Bso = bstdout localpkg = mkpkg("") localpkg.Prefix = "\"\"" diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 99eb73bd94..3920e25224 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -5,6 +5,7 @@ package gc import ( + "cmd/internal/bio" "cmd/internal/obj" "crypto/sha256" "fmt" @@ -23,7 +24,7 @@ func formathdr(arhdr []byte, name string, size int64) { func dumpobj() { var err error - bout, err = obj.Bopenw(outfile) + bout, err = bio.Create(outfile) if err != nil { Flusherrors() fmt.Printf("can't create %s: %v\n", outfile, err) @@ -33,10 +34,10 @@ func dumpobj() { startobj := int64(0) var arhdr [ArhdrSize]byte if writearchive != 0 { - obj.Bwritestring(bout, "!\n") + bout.WriteString("!\n") arhdr = [ArhdrSize]byte{} bout.Write(arhdr[:]) - startobj = obj.Boffset(bout) + startobj = bio.Boffset(bout) } fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring()) @@ -44,19 +45,19 @@ func dumpobj() { if writearchive != 0 { bout.Flush() - size := obj.Boffset(bout) - startobj + size := bio.Boffset(bout) - startobj if size&1 != 0 { - obj.Bputc(bout, 0) + bout.WriteByte(0) } - obj.Bseek(bout, startobj-ArhdrSize, 0) + bio.Bseek(bout, startobj-ArhdrSize, 0) formathdr(arhdr[:], "__.PKGDEF", size) bout.Write(arhdr[:]) bout.Flush() - obj.Bseek(bout, startobj+size+(size&1), 0) + bio.Bseek(bout, startobj+size+(size&1), 0) arhdr = [ArhdrSize]byte{} bout.Write(arhdr[:]) - startobj = obj.Boffset(bout) + startobj = bio.Boffset(bout) fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring()) } @@ -91,16 +92,16 @@ func dumpobj() { if writearchive != 0 { bout.Flush() - size := obj.Boffset(bout) - startobj + size := bio.Boffset(bout) - startobj if size&1 != 0 { - obj.Bputc(bout, 0) + bout.WriteByte(0) } - obj.Bseek(bout, startobj-ArhdrSize, 0) + bio.Bseek(bout, startobj-ArhdrSize, 0) formathdr(arhdr[:], "_go_.o", size) bout.Write(arhdr[:]) } - obj.Bterm(bout) + bout.Close() } func dumpglobls() { @@ -132,9 +133,9 @@ func dumpglobls() { funcsyms = nil } -func Bputname(b *obj.Biobuf, s *obj.LSym) { - obj.Bwritestring(b, s.Name) - obj.Bputc(b, 0) +func Bputname(b *bio.Buf, s *obj.LSym) { + b.WriteString(s.Name) + b.WriteByte(0) } func Linksym(s *Sym) *obj.LSym { diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 123d5ccf82..777c92c726 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -38,6 +38,7 @@ var bootstrapDirs = []string{ "compile/internal/ppc64", "compile/internal/ssa", "compile/internal/x86", + "internal/bio", "internal/gcprog", "internal/obj", "internal/obj/arm", diff --git a/src/cmd/internal/bio/buf.go b/src/cmd/internal/bio/buf.go new file mode 100644 index 0000000000..a1df26ca9c --- /dev/null +++ b/src/cmd/internal/bio/buf.go @@ -0,0 +1,150 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bio implements seekable buffered I/O. +package bio + +import ( + "bufio" + "io" + "log" + "os" +) + +const EOF = -1 + +// Buf implements a seekable buffered I/O abstraction. +type Buf struct { + f *os.File + r *bufio.Reader + w *bufio.Writer +} + +func (b *Buf) Reader() *bufio.Reader { return b.r } +func (b *Buf) Writer() *bufio.Writer { return b.w } + +func Create(name string) (*Buf, error) { + f, err := os.Create(name) + if err != nil { + return nil, err + } + return &Buf{f: f, w: bufio.NewWriter(f)}, nil +} + +func Open(name string) (*Buf, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + return &Buf{f: f, r: bufio.NewReader(f)}, nil +} + +func BufWriter(w io.Writer) *Buf { + return &Buf{w: bufio.NewWriter(w)} +} + +func BufReader(r io.Reader) *Buf { + return &Buf{r: bufio.NewReader(r)} +} + +func (b *Buf) Write(p []byte) (int, error) { + return b.w.Write(p) +} + +func (b *Buf) WriteString(p string) (int, error) { + return b.w.WriteString(p) +} + +func Bseek(b *Buf, offset int64, whence int) int64 { + if b.w != nil { + if err := b.w.Flush(); err != nil { + log.Fatalf("writing output: %v", err) + } + } else if b.r != nil { + if whence == 1 { + offset -= int64(b.r.Buffered()) + } + } + off, err := b.f.Seek(offset, whence) + if err != nil { + log.Fatalf("seeking in output: %v", err) + } + if b.r != nil { + b.r.Reset(b.f) + } + return off +} + +func Boffset(b *Buf) int64 { + if b.w != nil { + if err := b.w.Flush(); err != nil { + log.Fatalf("writing output: %v", err) + } + } + off, err := b.f.Seek(0, 1) + if err != nil { + log.Fatalf("seeking in output [0, 1]: %v", err) + } + if b.r != nil { + off -= int64(b.r.Buffered()) + } + return off +} + +func (b *Buf) Flush() error { + return b.w.Flush() +} + +func (b *Buf) WriteByte(c byte) error { + return b.w.WriteByte(c) +} + +func Bread(b *Buf, p []byte) int { + n, err := io.ReadFull(b.r, p) + if n == 0 { + if err != nil && err != io.EOF { + n = -1 + } + } + return n +} + +func Bgetc(b *Buf) int { + c, err := b.r.ReadByte() + if err != nil { + if err != io.EOF { + log.Fatalf("reading input: %v", err) + } + return EOF + } + return int(c) +} + +func (b *Buf) Read(p []byte) (int, error) { + return b.r.Read(p) +} + +func (b *Buf) Peek(n int) ([]byte, error) { + return b.r.Peek(n) +} + +func Brdline(b *Buf, delim int) string { + s, err := b.r.ReadBytes(byte(delim)) + if err != nil { + log.Fatalf("reading input: %v", err) + } + return string(s) +} + +func (b *Buf) Close() error { + var err error + if b.w != nil { + err = b.w.Flush() + } + err1 := b.f.Close() + if err == nil { + err = err1 + } + return err +} diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 81a5689aef..2c81ca2f08 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -30,7 +30,10 @@ package obj -import "cmd/internal/sys" +import ( + "cmd/internal/bio" + "cmd/internal/sys" +) // An Addr is an argument to an instruction. // The general forms and their encodings are: @@ -626,7 +629,7 @@ type Link struct { Flag_shared int32 Flag_dynlink bool Flag_optimize bool - Bso *Biobuf + Bso *bio.Buf Pathname string Goroot string Goroot_final string diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index bdd3bfc826..405cbf446a 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -109,6 +109,7 @@ package obj import ( "bufio" + "cmd/internal/bio" "cmd/internal/sys" "fmt" "log" @@ -120,7 +121,7 @@ import ( // The Go and C compilers, and the assembler, call writeobj to write // out a Go object file. The linker does not call this; the linker // does not write out object files. -func Writeobjdirect(ctxt *Link, b *Biobuf) { +func Writeobjdirect(ctxt *Link, b *bio.Buf) { Flushplist(ctxt) WriteObjFile(ctxt, b) } @@ -373,16 +374,16 @@ func (w *objWriter) writeLengths() { w.writeInt(int64(w.nFile)) } -func newObjWriter(ctxt *Link, b *Biobuf) *objWriter { +func newObjWriter(ctxt *Link, b *bio.Buf) *objWriter { return &objWriter{ ctxt: ctxt, - wr: b.w, + wr: b.Writer(), vrefIdx: make(map[string]int), refIdx: make(map[string]int), } } -func WriteObjFile(ctxt *Link, b *Biobuf) { +func WriteObjFile(ctxt *Link, b *bio.Buf) { w := newObjWriter(ctxt, b) // Magic header diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go index 245fab9690..04e6a76e1a 100644 --- a/src/cmd/internal/obj/util.go +++ b/src/cmd/internal/obj/util.go @@ -5,10 +5,8 @@ package obj import ( - "bufio" "bytes" "fmt" - "io" "log" "os" "strings" @@ -26,144 +24,6 @@ func Cputime() float64 { return time.Since(start).Seconds() } -type Biobuf struct { - f *os.File - r *bufio.Reader - w *bufio.Writer - linelen int -} - -func (b *Biobuf) Reader() *bufio.Reader { return b.r } - -func Bopenw(name string) (*Biobuf, error) { - f, err := os.Create(name) - if err != nil { - return nil, err - } - return &Biobuf{f: f, w: bufio.NewWriter(f)}, nil -} - -func Bopenr(name string) (*Biobuf, error) { - f, err := os.Open(name) - if err != nil { - return nil, err - } - return &Biobuf{f: f, r: bufio.NewReader(f)}, nil -} - -func Binitw(w io.Writer) *Biobuf { - return &Biobuf{w: bufio.NewWriter(w)} -} - -func Binitr(r io.Reader) *Biobuf { - return &Biobuf{r: bufio.NewReader(r)} -} - -func (b *Biobuf) Write(p []byte) (int, error) { - return b.w.Write(p) -} - -func Bwritestring(b *Biobuf, p string) (int, error) { - return b.w.WriteString(p) -} - -func Bseek(b *Biobuf, offset int64, whence int) int64 { - if b.w != nil { - if err := b.w.Flush(); err != nil { - log.Fatalf("writing output: %v", err) - } - } else if b.r != nil { - if whence == 1 { - offset -= int64(b.r.Buffered()) - } - } - off, err := b.f.Seek(offset, whence) - if err != nil { - log.Fatalf("seeking in output: %v", err) - } - if b.r != nil { - b.r.Reset(b.f) - } - return off -} - -func Boffset(b *Biobuf) int64 { - if b.w != nil { - if err := b.w.Flush(); err != nil { - log.Fatalf("writing output: %v", err) - } - } - off, err := b.f.Seek(0, 1) - if err != nil { - log.Fatalf("seeking in output [0, 1]: %v", err) - } - if b.r != nil { - off -= int64(b.r.Buffered()) - } - return off -} - -func (b *Biobuf) Flush() error { - return b.w.Flush() -} - -func Bputc(b *Biobuf, c byte) { - b.w.WriteByte(c) -} - -const Beof = -1 - -func Bread(b *Biobuf, p []byte) int { - n, err := io.ReadFull(b.r, p) - if n == 0 { - if err != nil && err != io.EOF { - n = -1 - } - } - return n -} - -func Bgetc(b *Biobuf) int { - c, err := b.r.ReadByte() - if err != nil { - return -1 - } - return int(c) -} - -func (b *Biobuf) Read(p []byte) (int, error) { - return b.r.Read(p) -} - -func (b *Biobuf) Peek(n int) ([]byte, error) { - return b.r.Peek(n) -} - -func Brdline(b *Biobuf, delim int) string { - s, err := b.r.ReadBytes(byte(delim)) - if err != nil { - log.Fatalf("reading input: %v", err) - } - b.linelen = len(s) - return string(s) -} - -func Blinelen(b *Biobuf) int { - return b.linelen -} - -func Bterm(b *Biobuf) error { - var err error - if b.w != nil { - err = b.w.Flush() - } - err1 := b.f.Close() - if err == nil { - err = err1 - } - return err -} - func envOr(key, value string) string { if x := os.Getenv(key); x != "" { return x diff --git a/src/cmd/link/internal/ld/ar.go b/src/cmd/link/internal/ld/ar.go index d07756071d..205773c7f8 100644 --- a/src/cmd/link/internal/ld/ar.go +++ b/src/cmd/link/internal/ld/ar.go @@ -31,6 +31,7 @@ package ld import ( + "cmd/internal/bio" "cmd/internal/obj" "encoding/binary" "fmt" @@ -62,7 +63,7 @@ type ArHdr struct { // define them. This is used for the compiler support library // libgcc.a. func hostArchive(name string) { - f, err := obj.Bopenr(name) + f, err := bio.Open(name) if err != nil { if os.IsNotExist(err) { // It's OK if we don't have a libgcc file at all. @@ -73,15 +74,15 @@ func hostArchive(name string) { } Exitf("cannot open file %s: %v", name, err) } - defer obj.Bterm(f) + defer f.Close() magbuf := make([]byte, len(ARMAG)) - if obj.Bread(f, magbuf) != len(magbuf) { + if bio.Bread(f, magbuf) != len(magbuf) { Exitf("file %s too short", name) } var arhdr ArHdr - l := nextar(f, obj.Boffset(f), &arhdr) + l := nextar(f, bio.Boffset(f), &arhdr) if l <= 0 { Exitf("%s missing armap", name) } @@ -117,7 +118,7 @@ func hostArchive(name string) { l = atolwhex(arhdr.size) h := ldobj(f, "libgcc", l, pname, name, ArchiveObj) - obj.Bseek(f, h.off, 0) + bio.Bseek(f, h.off, 0) h.ld(f, h.pkg, h.length, h.pn) } @@ -130,7 +131,7 @@ func hostArchive(name string) { type archiveMap map[string]uint64 // readArmap reads the archive symbol map. -func readArmap(filename string, f *obj.Biobuf, arhdr ArHdr) archiveMap { +func readArmap(filename string, f *bio.Buf, arhdr ArHdr) archiveMap { is64 := arhdr.name == "/SYM64/" wordSize := 4 if is64 { @@ -139,7 +140,7 @@ func readArmap(filename string, f *obj.Biobuf, arhdr ArHdr) archiveMap { l := atolwhex(arhdr.size) contents := make([]byte, l) - if obj.Bread(f, contents) != int(l) { + if bio.Bread(f, contents) != int(l) { Exitf("short read from %s", filename) } diff --git a/src/cmd/link/internal/ld/go.go b/src/cmd/link/internal/ld/go.go index 027e05d845..8bafaffd7c 100644 --- a/src/cmd/link/internal/ld/go.go +++ b/src/cmd/link/internal/ld/go.go @@ -8,6 +8,7 @@ package ld import ( "bytes" + "cmd/internal/bio" "cmd/internal/obj" "fmt" "os" @@ -26,7 +27,7 @@ func expandpkg(t0 string, pkg string) string { // once the dust settles, try to move some code to // libmach, so that other linkers and ar can share. -func ldpkg(f *obj.Biobuf, pkg string, length int64, filename string, whence int) { +func ldpkg(f *bio.Buf, pkg string, length int64, filename string, whence int) { var p0, p1 int if Debug['g'] != 0 { @@ -48,7 +49,7 @@ func ldpkg(f *obj.Biobuf, pkg string, length int64, filename string, whence int) } bdata := make([]byte, length) - if int64(obj.Bread(f, bdata)) != length { + if int64(bio.Bread(f, bdata)) != length { fmt.Fprintf(os.Stderr, "%s: short pkg read %s\n", os.Args[0], filename) if Debug['u'] != 0 { errorexit() diff --git a/src/cmd/link/internal/ld/ldelf.go b/src/cmd/link/internal/ld/ldelf.go index 485599be62..eafc6930d5 100644 --- a/src/cmd/link/internal/ld/ldelf.go +++ b/src/cmd/link/internal/ld/ldelf.go @@ -2,6 +2,7 @@ package ld import ( "bytes" + "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/sys" "encoding/binary" @@ -267,7 +268,7 @@ type ElfSect struct { } type ElfObj struct { - f *obj.Biobuf + f *bio.Buf base int64 // offset in f where ELF begins length int64 // length of ELF is64 int @@ -446,13 +447,13 @@ func parseArmAttributes(e binary.ByteOrder, data []byte) { } } -func ldelf(f *obj.Biobuf, pkg string, length int64, pn string) { +func ldelf(f *bio.Buf, pkg string, length int64, pn string) { if Debug['v'] != 0 { fmt.Fprintf(&Bso, "%5.2f ldelf %s\n", obj.Cputime(), pn) } Ctxt.IncVersion() - base := int32(obj.Boffset(f)) + base := int32(bio.Boffset(f)) var add uint64 var e binary.ByteOrder @@ -475,7 +476,7 @@ func ldelf(f *obj.Biobuf, pkg string, length int64, pn string) { var sect *ElfSect var sym ElfSym var symbols []*LSym - if obj.Bread(f, hdrbuf[:]) != len(hdrbuf) { + if bio.Bread(f, hdrbuf[:]) != len(hdrbuf) { goto bad } hdr = new(ElfHdrBytes) @@ -600,7 +601,7 @@ func ldelf(f *obj.Biobuf, pkg string, length int64, pn string) { elfobj.nsect = uint(elfobj.shnum) for i := 0; uint(i) < elfobj.nsect; i++ { - if obj.Bseek(f, int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 { + if bio.Bseek(f, int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 { goto bad } sect = &elfobj.sect[i] @@ -986,7 +987,7 @@ func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) { sect.base = make([]byte, sect.size) err = fmt.Errorf("short read") - if obj.Bseek(elfobj.f, int64(uint64(elfobj.base)+sect.off), 0) < 0 || obj.Bread(elfobj.f, sect.base) != len(sect.base) { + if bio.Bseek(elfobj.f, int64(uint64(elfobj.base)+sect.off), 0) < 0 || bio.Bread(elfobj.f, sect.base) != len(sect.base) { return err } diff --git a/src/cmd/link/internal/ld/ldmacho.go b/src/cmd/link/internal/ld/ldmacho.go index 9fbb2123af..6376116d04 100644 --- a/src/cmd/link/internal/ld/ldmacho.go +++ b/src/cmd/link/internal/ld/ldmacho.go @@ -1,6 +1,7 @@ package ld import ( + "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/sys" "encoding/binary" @@ -42,7 +43,7 @@ const ( ) type LdMachoObj struct { - f *obj.Biobuf + f *bio.Buf base int64 // off in f where Mach-O begins length int64 // length of Mach-O is64 bool @@ -298,7 +299,7 @@ func macholoadrel(m *LdMachoObj, sect *LdMachoSect) int { rel := make([]LdMachoRel, sect.nreloc) n := int(sect.nreloc * 8) buf := make([]byte, n) - if obj.Bseek(m.f, m.base+int64(sect.reloff), 0) < 0 || obj.Bread(m.f, buf) != n { + if bio.Bseek(m.f, m.base+int64(sect.reloff), 0) < 0 || bio.Bread(m.f, buf) != n { return -1 } var p []byte @@ -344,7 +345,7 @@ func macholoaddsym(m *LdMachoObj, d *LdMachoDysymtab) int { n := int(d.nindirectsyms) p := make([]byte, n*4) - if obj.Bseek(m.f, m.base+int64(d.indirectsymoff), 0) < 0 || obj.Bread(m.f, p) != len(p) { + if bio.Bseek(m.f, m.base+int64(d.indirectsymoff), 0) < 0 || bio.Bread(m.f, p) != len(p) { return -1 } @@ -361,7 +362,7 @@ func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int { } strbuf := make([]byte, symtab.strsize) - if obj.Bseek(m.f, m.base+int64(symtab.stroff), 0) < 0 || obj.Bread(m.f, strbuf) != len(strbuf) { + if bio.Bseek(m.f, m.base+int64(symtab.stroff), 0) < 0 || bio.Bread(m.f, strbuf) != len(strbuf) { return -1 } @@ -371,7 +372,7 @@ func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int { } n := int(symtab.nsym * uint32(symsize)) symbuf := make([]byte, n) - if obj.Bseek(m.f, m.base+int64(symtab.symoff), 0) < 0 || obj.Bread(m.f, symbuf) != len(symbuf) { + if bio.Bseek(m.f, m.base+int64(symtab.symoff), 0) < 0 || bio.Bread(m.f, symbuf) != len(symbuf) { return -1 } sym := make([]LdMachoSym, symtab.nsym) @@ -401,7 +402,7 @@ func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int { return 0 } -func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { +func ldmacho(f *bio.Buf, pkg string, length int64, pn string) { var err error var j int var is64 bool @@ -431,8 +432,8 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { var name string Ctxt.IncVersion() - base := obj.Boffset(f) - if obj.Bread(f, hdr[:]) != len(hdr) { + base := bio.Boffset(f) + if bio.Bread(f, hdr[:]) != len(hdr) { goto bad } @@ -455,7 +456,7 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { if is64 { var tmp [4]uint8 - obj.Bread(f, tmp[:4]) // skip reserved word in header + bio.Bread(f, tmp[:4]) // skip reserved word in header } m = new(LdMachoObj) @@ -493,7 +494,7 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { m.cmd = make([]LdMachoCmd, ncmd) off = uint32(len(hdr)) cmdp = make([]byte, cmdsz) - if obj.Bread(f, cmdp) != len(cmdp) { + if bio.Bread(f, cmdp) != len(cmdp) { err = fmt.Errorf("reading cmds: %v", err) goto bad } @@ -556,7 +557,7 @@ func ldmacho(f *obj.Biobuf, pkg string, length int64, pn string) { } dat = make([]byte, c.seg.filesz) - if obj.Bseek(f, m.base+int64(c.seg.fileoff), 0) < 0 || obj.Bread(f, dat) != len(dat) { + if bio.Bseek(f, m.base+int64(c.seg.fileoff), 0) < 0 || bio.Bread(f, dat) != len(dat) { err = fmt.Errorf("cannot load object data: %v", err) goto bad } diff --git a/src/cmd/link/internal/ld/ldpe.go b/src/cmd/link/internal/ld/ldpe.go index ea0c482838..e97e842e7f 100644 --- a/src/cmd/link/internal/ld/ldpe.go +++ b/src/cmd/link/internal/ld/ldpe.go @@ -5,6 +5,7 @@ package ld import ( + "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/sys" "encoding/binary" @@ -117,7 +118,7 @@ type PeSect struct { } type PeObj struct { - f *obj.Biobuf + f *bio.Buf name string base uint32 sect []PeSect @@ -128,14 +129,14 @@ type PeObj struct { snames []byte } -func ldpe(f *obj.Biobuf, pkg string, length int64, pn string) { +func ldpe(f *bio.Buf, pkg string, length int64, pn string) { if Debug['v'] != 0 { fmt.Fprintf(&Bso, "%5.2f ldpe %s\n", obj.Cputime(), pn) } var sect *PeSect Ctxt.IncVersion() - base := int32(obj.Boffset(f)) + base := int32(bio.Boffset(f)) peobj := new(PeObj) peobj.f = f @@ -173,15 +174,15 @@ func ldpe(f *obj.Biobuf, pkg string, length int64, pn string) { // TODO return error if found .cormeta // load string table - obj.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) + bio.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) - if obj.Bread(f, symbuf[:4]) != 4 { + if bio.Bread(f, symbuf[:4]) != 4 { goto bad } l = Le32(symbuf[:]) peobj.snames = make([]byte, l) - obj.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) - if obj.Bread(f, peobj.snames) != len(peobj.snames) { + bio.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) + if bio.Bread(f, peobj.snames) != len(peobj.snames) { goto bad } @@ -201,10 +202,10 @@ func ldpe(f *obj.Biobuf, pkg string, length int64, pn string) { peobj.pesym = make([]PeSym, peobj.fh.NumberOfSymbols) peobj.npesym = uint(peobj.fh.NumberOfSymbols) - obj.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable), 0) + bio.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable), 0) for i := 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 { - obj.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0) - if obj.Bread(f, symbuf[:]) != len(symbuf) { + bio.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0) + if bio.Bread(f, symbuf[:]) != len(symbuf) { goto bad } @@ -289,10 +290,10 @@ func ldpe(f *obj.Biobuf, pkg string, length int64, pn string) { } r = make([]Reloc, rsect.sh.NumberOfRelocations) - obj.Bseek(f, int64(peobj.base)+int64(rsect.sh.PointerToRelocations), 0) + bio.Bseek(f, int64(peobj.base)+int64(rsect.sh.PointerToRelocations), 0) for j = 0; j < int(rsect.sh.NumberOfRelocations); j++ { rp = &r[j] - if obj.Bread(f, symbuf[:10]) != 10 { + if bio.Bread(f, symbuf[:10]) != 10 { goto bad } rva := Le32(symbuf[0:]) @@ -465,7 +466,7 @@ func pemap(peobj *PeObj, sect *PeSect) int { if sect.sh.PointerToRawData == 0 { // .bss doesn't have data in object file return 0 } - if obj.Bseek(peobj.f, int64(peobj.base)+int64(sect.sh.PointerToRawData), 0) < 0 || obj.Bread(peobj.f, sect.base) != len(sect.base) { + if bio.Bseek(peobj.f, int64(peobj.base)+int64(sect.sh.PointerToRawData), 0) < 0 || bio.Bread(peobj.f, sect.base) != len(sect.base) { return -1 } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 305a3bc0db..789eaef1a5 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -33,6 +33,7 @@ package ld import ( "bufio" "bytes" + "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/sys" "crypto/sha1" @@ -240,7 +241,7 @@ const ( var ( headstring string // buffered output - Bso obj.Biobuf + Bso bio.Buf ) type outBuf struct { @@ -738,13 +739,13 @@ func loadlib() { * look for the next file in an archive. * adapted from libmach. */ -func nextar(bp *obj.Biobuf, off int64, a *ArHdr) int64 { +func nextar(bp *bio.Buf, off int64, a *ArHdr) int64 { if off&1 != 0 { off++ } - obj.Bseek(bp, off, 0) + bio.Bseek(bp, off, 0) buf := make([]byte, SAR_HDR) - if n := obj.Bread(bp, buf); n < len(buf) { + if n := bio.Bread(bp, buf); n < len(buf) { if n >= 0 { return 0 } @@ -773,25 +774,25 @@ func objfile(lib *Library) { fmt.Fprintf(&Bso, "%5.2f ldobj: %s (%s)\n", obj.Cputime(), lib.File, pkg) } Bso.Flush() - f, err := obj.Bopenr(lib.File) + f, err := bio.Open(lib.File) if err != nil { Exitf("cannot open file %s: %v", lib.File, err) } magbuf := make([]byte, len(ARMAG)) - if obj.Bread(f, magbuf) != len(magbuf) || !strings.HasPrefix(string(magbuf), ARMAG) { + if bio.Bread(f, magbuf) != len(magbuf) || !strings.HasPrefix(string(magbuf), ARMAG) { /* load it as a regular file */ - l := obj.Bseek(f, 0, 2) + l := bio.Bseek(f, 0, 2) - obj.Bseek(f, 0, 0) + bio.Bseek(f, 0, 0) ldobj(f, pkg, l, lib.File, lib.File, FileObj) - obj.Bterm(f) + f.Close() return } /* process __.PKGDEF */ - off := obj.Boffset(f) + off := bio.Boffset(f) var arhdr ArHdr l := nextar(f, off, &arhdr) @@ -807,12 +808,12 @@ func objfile(lib *Library) { } if Buildmode == BuildmodeShared { - before := obj.Boffset(f) + before := bio.Boffset(f) pkgdefBytes := make([]byte, atolwhex(arhdr.size)) - obj.Bread(f, pkgdefBytes) + bio.Bread(f, pkgdefBytes) hash := sha1.Sum(pkgdefBytes) lib.hash = hash[:] - obj.Bseek(f, before, 0) + bio.Bseek(f, before, 0) } off += l @@ -848,11 +849,11 @@ func objfile(lib *Library) { } out: - obj.Bterm(f) + f.Close() } type Hostobj struct { - ld func(*obj.Biobuf, string, int64, string) + ld func(*bio.Buf, string, int64, string) pkg string pn string file string @@ -873,7 +874,7 @@ var internalpkg = []string{ "runtime/msan", } -func ldhostobj(ld func(*obj.Biobuf, string, int64, string), f *obj.Biobuf, pkg string, length int64, pn string, file string) *Hostobj { +func ldhostobj(ld func(*bio.Buf, string, int64, string), f *bio.Buf, pkg string, length int64, pn string, file string) *Hostobj { isinternal := false for i := 0; i < len(internalpkg); i++ { if pkg == internalpkg[i] { @@ -904,26 +905,26 @@ func ldhostobj(ld func(*obj.Biobuf, string, int64, string), f *obj.Biobuf, pkg s h.pkg = pkg h.pn = pn h.file = file - h.off = obj.Boffset(f) + h.off = bio.Boffset(f) h.length = length return h } func hostobjs() { - var f *obj.Biobuf + var f *bio.Buf var h *Hostobj for i := 0; i < len(hostobj); i++ { h = &hostobj[i] var err error - f, err = obj.Bopenr(h.file) + f, err = bio.Open(h.file) if f == nil { Exitf("cannot reopen %s: %v", h.pn, err) } - obj.Bseek(f, h.off, 0) + bio.Bseek(f, h.off, 0) h.ld(f, h.pkg, h.length, h.pn) - obj.Bterm(f) + f.Close() } } @@ -1265,15 +1266,15 @@ func hostlinkArchArgs() []string { // ldobj loads an input object. If it is a host object (an object // compiled by a non-Go compiler) it returns the Hostobj pointer. If // it is a Go object, it returns nil. -func ldobj(f *obj.Biobuf, pkg string, length int64, pn string, file string, whence int) *Hostobj { - eof := obj.Boffset(f) + length +func ldobj(f *bio.Buf, pkg string, length int64, pn string, file string, whence int) *Hostobj { + eof := bio.Boffset(f) + length - start := obj.Boffset(f) - c1 := obj.Bgetc(f) - c2 := obj.Bgetc(f) - c3 := obj.Bgetc(f) - c4 := obj.Bgetc(f) - obj.Bseek(f, start, 0) + start := bio.Boffset(f) + c1 := bio.Bgetc(f) + c2 := bio.Bgetc(f) + c3 := bio.Bgetc(f) + c4 := bio.Bgetc(f) + bio.Bseek(f, start, 0) magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4) if magic == 0x7f454c46 { // \x7F E L F @@ -1289,12 +1290,8 @@ func ldobj(f *obj.Biobuf, pkg string, length int64, pn string, file string, when } /* check the header */ - line := obj.Brdline(f, '\n') + line := bio.Brdline(f, '\n') if line == "" { - if obj.Blinelen(f) > 0 { - Diag("%s: not an object file", pn) - return nil - } Diag("truncated object file: %s", pn) return nil } @@ -1337,28 +1334,28 @@ func ldobj(f *obj.Biobuf, pkg string, length int64, pn string, file string, when } /* skip over exports and other info -- ends with \n!\n */ - import0 := obj.Boffset(f) + import0 := bio.Boffset(f) c1 = '\n' // the last line ended in \n - c2 = obj.Bgetc(f) - c3 = obj.Bgetc(f) + c2 = bio.Bgetc(f) + c3 = bio.Bgetc(f) for c1 != '\n' || c2 != '!' || c3 != '\n' { c1 = c2 c2 = c3 - c3 = obj.Bgetc(f) - if c3 == obj.Beof { + c3 = bio.Bgetc(f) + if c3 == bio.EOF { Diag("truncated object file: %s", pn) return nil } } - import1 := obj.Boffset(f) + import1 := bio.Boffset(f) - obj.Bseek(f, import0, 0) + bio.Bseek(f, import0, 0) ldpkg(f, pkg, import1-import0-2, pn, whence) // -2 for !\n - obj.Bseek(f, import1, 0) + bio.Bseek(f, import1, 0) - LoadObjFile(Ctxt, f, pkg, eof-obj.Boffset(f), pn) + LoadObjFile(Ctxt, f, pkg, eof-bio.Boffset(f), pn) return nil } diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index f0811389d2..d3f9ed3703 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -31,7 +31,7 @@ package ld import ( - "cmd/internal/obj" + "cmd/internal/bio" "cmd/internal/sys" "debug/elf" "fmt" @@ -165,7 +165,7 @@ type Link struct { Headtype int Arch *sys.Arch Debugvlog int32 - Bso *obj.Biobuf + Bso *bio.Buf Windows int32 Goroot string diff --git a/src/cmd/link/internal/ld/objfile.go b/src/cmd/link/internal/ld/objfile.go index 8a406d17a6..6f177861f0 100644 --- a/src/cmd/link/internal/ld/objfile.go +++ b/src/cmd/link/internal/ld/objfile.go @@ -110,6 +110,7 @@ package ld import ( "bufio" "bytes" + "cmd/internal/bio" "cmd/internal/obj" "io" "log" @@ -146,8 +147,8 @@ type objReader struct { file []*LSym } -func LoadObjFile(ctxt *Link, f *obj.Biobuf, pkg string, length int64, pn string) { - start := obj.Boffset(f) +func LoadObjFile(ctxt *Link, f *bio.Buf, pkg string, length int64, pn string) { + start := bio.Boffset(f) r := &objReader{ rd: f.Reader(), pkg: pkg, @@ -156,8 +157,8 @@ func LoadObjFile(ctxt *Link, f *obj.Biobuf, pkg string, length int64, pn string) dupSym: &LSym{Name: ".dup"}, } r.loadObjFile() - if obj.Boffset(f) != start+length { - log.Fatalf("%s: unexpected end at %d, want %d", pn, int64(obj.Boffset(f)), int64(start+length)) + if bio.Boffset(f) != start+length { + log.Fatalf("%s: unexpected end at %d, want %d", pn, int64(bio.Boffset(f)), int64(start+length)) } } diff --git a/src/cmd/link/internal/ld/pobj.go b/src/cmd/link/internal/ld/pobj.go index b9902a5e5e..bb48f13185 100644 --- a/src/cmd/link/internal/ld/pobj.go +++ b/src/cmd/link/internal/ld/pobj.go @@ -31,6 +31,7 @@ package ld import ( + "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/sys" "flag" @@ -49,7 +50,7 @@ func Ldmain() { Ctxt.Diag = Diag Ctxt.Bso = &Bso - Bso = *obj.Binitw(os.Stdout) + Bso = *bio.BufWriter(os.Stdout) Debug = [128]int{} nerrors = 0 outfile = "" -- cgit v1.3 From 8448d3aace7f26bd6eca14e8b89c5a981c2ab9d3 Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Tue, 5 Apr 2016 23:32:49 +0200 Subject: cmd/compile: fold CMPconst and SHR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fold the comparison when the SHR result is small. Useful for: - murmur mix like hashing where higher bits are desirable, i.e. hash = uint32(i * C) >> 18 - integer log2 via DeBruijn sequence: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogDeBruijn Change-Id: If70ae18cb86f4cc83ab6213f88ced03cc4986156 Reviewed-on: https://go-review.googlesource.com/21514 Run-TryBot: Alexandru Moșoi TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 2 ++ src/cmd/compile/internal/ssa/rewriteAMD64.go | 32 ++++++++++++++++++++++++++++ test/checkbce.go | 12 +++++++++++ 3 files changed, 46 insertions(+) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index b37720eb39..d7f361dc2e 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1101,6 +1101,8 @@ (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT) (CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c -> (FlagLT_ULT) (CMPQconst (MOVLQZX _) [c]) && 0xFFFFFFFF < c -> (FlagLT_ULT) +(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1< (FlagLT_ULT) +(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1< (FlagLT_ULT) (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT) (CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a1d1e4edd9..34a393bbc5 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -2869,6 +2869,22 @@ func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagGT_UGT) return true } + // match: (CMPLconst (SHRLconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 32 && (1<>27]) + useInt(b[uint64(i*0x07C4ACDD)>>58]) + useInt(a[uint(i*0x07C4ACDD)>>59]) + + // The following bounds should removed as they can overflow. + useInt(a[uint32(i*0x106297f105d0cc86)>>26]) // ERROR "Found IsInBounds$" + useInt(b[uint64(i*0x106297f105d0cc86)>>57]) // ERROR "Found IsInBounds$" + useInt(a[int32(i*0x106297f105d0cc86)>>26]) // ERROR "Found IsInBounds$" + useInt(b[int64(i*0x106297f105d0cc86)>>57]) // ERROR "Found IsInBounds$" +} + func g1(a []int) { for i := range a { a[i] = i -- cgit v1.3 From 68ac1f774624faf99e7f6ec6592acb50f23b7874 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 7 Apr 2016 10:21:35 -0700 Subject: cmd/compile: Fix constant-folding of unsigned shifts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure the results of unsigned constant-folded shifts are sign-extended into the AuxInt field. Fixes #15175 Change-Id: I3490d1bc3d9b2e1578ed30964645508577894f58 Reviewed-on: https://go-review.googlesource.com/21586 Reviewed-by: Alexandru Moșoi Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/ssa/gen/generic.rules | 6 +-- src/cmd/compile/internal/ssa/rewritegeneric.go | 12 ++--- test/fixedbugs/issue15175.go | 66 ++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 9 deletions(-) create mode 100644 test/fixedbugs/issue15175.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index b56e3f1b2d..dacc2007c8 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -81,13 +81,13 @@ (Rsh64Ux64 (Const64 [c]) (Const64 [d])) -> (Const64 [int64(uint64(c) >> uint64(d))]) (Lsh32x64 (Const32 [c]) (Const64 [d])) -> (Const32 [int64(int32(c) << uint64(d))]) (Rsh32x64 (Const32 [c]) (Const64 [d])) -> (Const32 [int64(int32(c) >> uint64(d))]) -(Rsh32Ux64 (Const32 [c]) (Const64 [d])) -> (Const32 [int64(uint32(c) >> uint64(d))]) +(Rsh32Ux64 (Const32 [c]) (Const64 [d])) -> (Const32 [int64(int32(uint32(c) >> uint64(d)))]) (Lsh16x64 (Const16 [c]) (Const64 [d])) -> (Const16 [int64(int16(c) << uint64(d))]) (Rsh16x64 (Const16 [c]) (Const64 [d])) -> (Const16 [int64(int16(c) >> uint64(d))]) -(Rsh16Ux64 (Const16 [c]) (Const64 [d])) -> (Const16 [int64(uint16(c) >> uint64(d))]) +(Rsh16Ux64 (Const16 [c]) (Const64 [d])) -> (Const16 [int64(int16(uint16(c) >> uint64(d)))]) (Lsh8x64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(c) << uint64(d))]) (Rsh8x64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(c) >> uint64(d))]) -(Rsh8Ux64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(uint8(c) >> uint64(d))]) +(Rsh8Ux64 (Const8 [c]) (Const64 [d])) -> (Const8 [int64(int8(uint8(c) >> uint64(d)))]) (Lsh64x64 (Const64 [0]) _) -> (Const64 [0]) (Rsh64x64 (Const64 [0]) _) -> (Const64 [0]) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 932cb42235..9b0f43c414 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -6185,7 +6185,7 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux64 (Const16 [c]) (Const64 [d])) // cond: - // result: (Const16 [int64(uint16(c) >> uint64(d))]) + // result: (Const16 [int64(int16(uint16(c) >> uint64(d)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst16 { @@ -6198,7 +6198,7 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value, config *Config) bool { } d := v_1.AuxInt v.reset(OpConst16) - v.AuxInt = int64(uint16(c) >> uint64(d)) + v.AuxInt = int64(int16(uint16(c) >> uint64(d))) return true } // match: (Rsh16Ux64 (Const16 [0]) _) @@ -6547,7 +6547,7 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool { _ = b // match: (Rsh32Ux64 (Const32 [c]) (Const64 [d])) // cond: - // result: (Const32 [int64(uint32(c) >> uint64(d))]) + // result: (Const32 [int64(int32(uint32(c) >> uint64(d)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst32 { @@ -6560,7 +6560,7 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value, config *Config) bool { } d := v_1.AuxInt v.reset(OpConst32) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) return true } // match: (Rsh32Ux64 (Const32 [0]) _) @@ -7353,7 +7353,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux64 (Const8 [c]) (Const64 [d])) // cond: - // result: (Const8 [int64(uint8(c) >> uint64(d))]) + // result: (Const8 [int64(int8(uint8(c) >> uint64(d)))]) for { v_0 := v.Args[0] if v_0.Op != OpConst8 { @@ -7366,7 +7366,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value, config *Config) bool { } d := v_1.AuxInt v.reset(OpConst8) - v.AuxInt = int64(uint8(c) >> uint64(d)) + v.AuxInt = int64(int8(uint8(c) >> uint64(d))) return true } // match: (Rsh8Ux64 (Const8 [0]) _) diff --git a/test/fixedbugs/issue15175.go b/test/fixedbugs/issue15175.go new file mode 100644 index 0000000000..c6cab532f8 --- /dev/null +++ b/test/fixedbugs/issue15175.go @@ -0,0 +1,66 @@ +// run + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure unsigned shift results get sign-extended correctly. +package main + +import "fmt" + +func main() { + failed := false + a6 := uint8(253) + if got := a6 >> 0; got != 253 { + fmt.Printf("uint8(253)>>0 = %v, wanted 253\n", got) + failed = true + } + if got := f1(0, 2, 1, 0, 0, 1, true); got != 255 { + fmt.Printf("f1(...) = %v, wanted 255\n", got) + failed = true + } + if got := f2(1); got != 242 { + fmt.Printf("f2(...) = %v, wanted 242\n", got) + failed = true + } + if got := f3(false, 0, 0); got != 254 { + fmt.Printf("f3(...) = %v, wanted 254\n", got) + failed = true + } + if failed { + panic("bad") + } +} + +func f1(a1 uint, a2 int8, a3 int8, a4 int8, a5 uint8, a6 int, a7 bool) uint8 { + a5-- + a4 += (a2 << a1 << 2) | (a4 ^ a4<<(a1&a1)) - a3 // int8 + a6 -= a6 >> (2 + uint32(a2)>>3) // int + a1 += a1 // uint + a3 *= a4 << (a1 | a1) << (uint16(3) >> 2 & (1 - 0) & (uint16(1) << a5 << 3)) // int8 + a7 = a7 || ((a2 == a4) || (a7 && a7) || ((a5 == a5) || (a7 || a7))) // bool + return a5 >> a1 +} + +func f2(a1 uint8) uint8 { + a1-- + a1-- + a1 -= a1 + (a1 << 1) - (a1*a1*a1)<<(2-0+(3|3)-1) // uint8 + v1 := 0 * ((2 * 1) ^ 1) & ((uint(0) >> a1) + (2+0)*(uint(2)+0)) // uint + _ = v1 + return a1 >> (((2 ^ 2) >> (v1 | 2)) + 0) +} + +func f3(a1 bool, a2 uint, a3 int64) uint8 { + a3-- + v1 := 1 & (2 & 1 * (1 ^ 2) & (uint8(3*1) >> 0)) // uint8 + _ = v1 + v1 += v1 - (v1 >> a2) + (v1 << (a2 ^ a2) & v1) // uint8 + v1 *= v1 // uint8 + a3-- + v1 += v1 & v1 // uint8 + v1-- + v1 = ((v1 << 0) | v1>>0) + v1 // uint8 + return v1 >> 0 +} -- cgit v1.3 From 49e07f2b7e25a1f7a050f73fbb7807185e09e46b Mon Sep 17 00:00:00 2001 From: Dave Cheney Date: Fri, 8 Apr 2016 20:09:10 +1000 Subject: cmd/compile/internal/gc: unexport Export Export does not need to be exported. Change-Id: I252f0c024732f1d056817cab13e8e3c589b54d13 Reviewed-on: https://go-review.googlesource.com/21721 Run-TryBot: Dave Cheney TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/bexport.go | 4 ++-- src/cmd/compile/internal/gc/export.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 702090280f..909ff14982 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -135,8 +135,8 @@ type exporter struct { trace bool } -// Export writes the exportlist for localpkg to out and returns the number of bytes written. -func Export(out *bio.Buf, trace bool) int { +// export writes the exportlist for localpkg to out and returns the number of bytes written. +func export(out *bio.Buf, trace bool) int { p := exporter{ out: out, pkgIndex: make(map[*Pkg]int), diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 5d4add8ff4..2f94b9c62f 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -385,7 +385,7 @@ func dumpexport() { // save a copy of the export data var copy bytes.Buffer bcopy := bio.BufWriter(©) - size = Export(bcopy, Debug_export != 0) + size = export(bcopy, Debug_export != 0) bcopy.Flush() // flushing to bytes.Buffer cannot fail if n, err := bout.Write(copy.Bytes()); n != size || err != nil { Fatalf("error writing export data: got %d bytes, want %d bytes, err = %v", n, size, err) @@ -407,7 +407,7 @@ func dumpexport() { pkgs = savedPkgs pkgMap = savedPkgMap } else { - size = Export(bout, Debug_export != 0) + size = export(bout, Debug_export != 0) } exportf("\n$$\n") } else { -- cgit v1.3 From d22357ce9dc650a69e78b37a6b25be1ee0b8b26c Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Thu, 7 Apr 2016 15:31:49 -0400 Subject: cmd/compile: cleanup -dynlink/-shared support check Moves the list of architectures that support shared libraries into a function. Also adds s390x to that list. Change-Id: I99c8a9f6cd4816ce3d53abaabaf8d002e25e6b28 Reviewed-on: https://go-review.googlesource.com/21661 Reviewed-by: Matthew Dempsky Reviewed-by: Michael Hudson-Doyle Run-TryBot: Michael Munday --- src/cmd/compile/internal/gc/main.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index c8a778c34a..03143f5d0a 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -91,6 +91,12 @@ func doversion() { os.Exit(0) } +// supportsDynlink reports whether or not the code generator for the given +// architecture supports the -shared and -dynlink flags. +func supportsDynlink(arch *sys.Arch) bool { + return arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.S390X) +} + func Main() { defer hidePanic() @@ -195,15 +201,13 @@ func Main() { obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y']) var flag_shared int var flag_dynlink bool - if Thearch.LinkArch.InFamily(sys.ARM, sys.AMD64, sys.ARM64, sys.I386, sys.PPC64) { + if supportsDynlink(Thearch.LinkArch.Arch) { obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared) + flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries") } if Thearch.LinkArch.Family == sys.AMD64 { obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel) } - if Thearch.LinkArch.InFamily(sys.ARM, sys.AMD64, sys.ARM64, sys.I386, sys.PPC64) { - flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries") - } obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile) obj.Flagstr("memprofile", "write memory profile to `file`", &memprofile) obj.Flagint64("memprofilerate", "set runtime.MemProfileRate to `rate`", &memprofilerate) -- cgit v1.3 From 8f2edf11998a30b497586ac0e9f75036a318280a Mon Sep 17 00:00:00 2001 From: Dave Cheney Date: Fri, 8 Apr 2016 19:14:03 +1000 Subject: cmd: replace bio.Buf with bio.Reader and bio.Writer Replace the bidirectional bio.Buf type with a pair of unidirectional buffered seekable Reader and Writers. Change-Id: I86664a06f93c94595dc67c2cbd21356feb6680ef Reviewed-on: https://go-review.googlesource.com/21720 Reviewed-by: Brad Fitzpatrick Run-TryBot: Dave Cheney TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/bexport.go | 4 +- src/cmd/compile/internal/gc/go.go | 4 +- src/cmd/compile/internal/gc/obj.go | 16 ++-- src/cmd/internal/bio/buf.go | 137 +++++++++++++++++++-------------- src/cmd/internal/obj/link.go | 2 +- src/cmd/internal/obj/objfile.go | 6 +- src/cmd/link/internal/ld/ar.go | 6 +- src/cmd/link/internal/ld/go.go | 2 +- src/cmd/link/internal/ld/ldelf.go | 10 +-- src/cmd/link/internal/ld/ldmacho.go | 16 ++-- src/cmd/link/internal/ld/ldpe.go | 18 ++--- src/cmd/link/internal/ld/lib.go | 49 ++++++------ src/cmd/link/internal/ld/link.go | 7 +- src/cmd/link/internal/ld/objfile.go | 8 +- 14 files changed, 154 insertions(+), 131 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 909ff14982..bb0a34e67b 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -124,7 +124,7 @@ const exportVersion = "v0" const exportInlined = true // default: true type exporter struct { - out *bio.Buf + out *bio.Writer pkgIndex map[*Pkg]int typIndex map[*Type]int inlined []*Func @@ -136,7 +136,7 @@ type exporter struct { } // export writes the exportlist for localpkg to out and returns the number of bytes written. -func export(out *bio.Buf, trace bool) int { +func export(out *bio.Writer, trace bool) int { p := exporter{ out: out, pkgIndex: make(map[*Pkg]int), diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index cd9db38fb4..ec7e219d95 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -133,7 +133,7 @@ var infile string var outfile string -var bout *bio.Buf +var bout *bio.Writer var nerrors int @@ -288,7 +288,7 @@ var Ctxt *obj.Link var writearchive int -var bstdout *bio.Buf +var bstdout *bio.Writer var Nacl bool diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 3920e25224..23c8be645c 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -37,7 +37,7 @@ func dumpobj() { bout.WriteString("!\n") arhdr = [ArhdrSize]byte{} bout.Write(arhdr[:]) - startobj = bio.Boffset(bout) + startobj = bout.Offset() } fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring()) @@ -45,19 +45,19 @@ func dumpobj() { if writearchive != 0 { bout.Flush() - size := bio.Boffset(bout) - startobj + size := bout.Offset() - startobj if size&1 != 0 { bout.WriteByte(0) } - bio.Bseek(bout, startobj-ArhdrSize, 0) + bout.Seek(startobj-ArhdrSize, 0) formathdr(arhdr[:], "__.PKGDEF", size) bout.Write(arhdr[:]) bout.Flush() - bio.Bseek(bout, startobj+size+(size&1), 0) + bout.Seek(startobj+size+(size&1), 0) arhdr = [ArhdrSize]byte{} bout.Write(arhdr[:]) - startobj = bio.Boffset(bout) + startobj = bout.Offset() fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring()) } @@ -92,11 +92,11 @@ func dumpobj() { if writearchive != 0 { bout.Flush() - size := bio.Boffset(bout) - startobj + size := bout.Offset() - startobj if size&1 != 0 { bout.WriteByte(0) } - bio.Bseek(bout, startobj-ArhdrSize, 0) + bout.Seek(startobj-ArhdrSize, 0) formathdr(arhdr[:], "_go_.o", size) bout.Write(arhdr[:]) } @@ -133,7 +133,7 @@ func dumpglobls() { funcsyms = nil } -func Bputname(b *bio.Buf, s *obj.LSym) { +func Bputname(b *bio.Writer, s *obj.LSym) { b.WriteString(s.Name) b.WriteByte(0) } diff --git a/src/cmd/internal/bio/buf.go b/src/cmd/internal/bio/buf.go index a1df26ca9c..0bd4658cdd 100644 --- a/src/cmd/internal/bio/buf.go +++ b/src/cmd/internal/bio/buf.go @@ -14,94 +14,116 @@ import ( const EOF = -1 -// Buf implements a seekable buffered I/O abstraction. -type Buf struct { +// Reader implements a seekable buffered io.Reader. +type Reader struct { f *os.File r *bufio.Reader +} + +// Writer implements a seekable buffered io.Writer. +type Writer struct { + f *os.File w *bufio.Writer } -func (b *Buf) Reader() *bufio.Reader { return b.r } -func (b *Buf) Writer() *bufio.Writer { return b.w } +// Reader returns this Reader's underlying bufio.Reader. +func (r *Reader) Reader() *bufio.Reader { return r.r } -func Create(name string) (*Buf, error) { +// Writer returns this Writer's underlying bufio.Writer. +func (w *Writer) Writer() *bufio.Writer { return w.w } + +// Create creates the file named name and returns a Writer +// for that file. +func Create(name string) (*Writer, error) { f, err := os.Create(name) if err != nil { return nil, err } - return &Buf{f: f, w: bufio.NewWriter(f)}, nil + return &Writer{f: f, w: bufio.NewWriter(f)}, nil } -func Open(name string) (*Buf, error) { +// Open returns a Reader for the file named name. +func Open(name string) (*Reader, error) { f, err := os.Open(name) if err != nil { return nil, err } - return &Buf{f: f, r: bufio.NewReader(f)}, nil + return &Reader{f: f, r: bufio.NewReader(f)}, nil } -func BufWriter(w io.Writer) *Buf { - return &Buf{w: bufio.NewWriter(w)} +// BufWriter returns a Writer on top of w. +// TODO(dfc) remove this method and replace caller with bufio.Writer. +func BufWriter(w io.Writer) *Writer { + return &Writer{w: bufio.NewWriter(w)} } -func BufReader(r io.Reader) *Buf { - return &Buf{r: bufio.NewReader(r)} +// BufWriter returns a Reader on top of r. +// TODO(dfc) remove this method and replace caller with bufio.Reader. +func BufReader(r io.Reader) *Reader { + return &Reader{r: bufio.NewReader(r)} } -func (b *Buf) Write(p []byte) (int, error) { - return b.w.Write(p) +func (w *Writer) Write(p []byte) (int, error) { + return w.w.Write(p) } -func (b *Buf) WriteString(p string) (int, error) { - return b.w.WriteString(p) +func (w *Writer) WriteString(p string) (int, error) { + return w.w.WriteString(p) } -func Bseek(b *Buf, offset int64, whence int) int64 { - if b.w != nil { - if err := b.w.Flush(); err != nil { - log.Fatalf("writing output: %v", err) - } - } else if b.r != nil { - if whence == 1 { - offset -= int64(b.r.Buffered()) - } +func (r *Reader) Seek(offset int64, whence int) int64 { + if whence == 1 { + offset -= int64(r.r.Buffered()) } - off, err := b.f.Seek(offset, whence) + off, err := r.f.Seek(offset, whence) if err != nil { log.Fatalf("seeking in output: %v", err) } - if b.r != nil { - b.r.Reset(b.f) - } + r.r.Reset(r.f) return off } -func Boffset(b *Buf) int64 { - if b.w != nil { - if err := b.w.Flush(); err != nil { - log.Fatalf("writing output: %v", err) - } +func (w *Writer) Seek(offset int64, whence int) int64 { + if err := w.w.Flush(); err != nil { + log.Fatalf("writing output: %v", err) } - off, err := b.f.Seek(0, 1) + off, err := w.f.Seek(offset, whence) + if err != nil { + log.Fatalf("seeking in output: %v", err) + } + return off +} + +func (r *Reader) Offset() int64 { + off, err := r.f.Seek(0, 1) if err != nil { log.Fatalf("seeking in output [0, 1]: %v", err) } - if b.r != nil { - off -= int64(b.r.Buffered()) + off -= int64(r.r.Buffered()) + return off +} + +func (w *Writer) Offset() int64 { + if err := w.w.Flush(); err != nil { + log.Fatalf("writing output: %v", err) + } + off, err := w.f.Seek(0, 1) + if err != nil { + log.Fatalf("seeking in output [0, 1]: %v", err) } return off } -func (b *Buf) Flush() error { - return b.w.Flush() +func (w *Writer) Flush() error { + return w.w.Flush() } -func (b *Buf) WriteByte(c byte) error { - return b.w.WriteByte(c) +func (w *Writer) WriteByte(c byte) error { + return w.w.WriteByte(c) } -func Bread(b *Buf, p []byte) int { - n, err := io.ReadFull(b.r, p) +func Bread(r *Reader, p []byte) int { + n, err := io.ReadFull(r.r, p) if n == 0 { if err != nil && err != io.EOF { n = -1 @@ -110,8 +132,8 @@ func Bread(b *Buf, p []byte) int { return n } -func Bgetc(b *Buf) int { - c, err := b.r.ReadByte() +func Bgetc(r *Reader) int { + c, err := r.r.ReadByte() if err != nil { if err != io.EOF { log.Fatalf("reading input: %v", err) @@ -121,28 +143,29 @@ func Bgetc(b *Buf) int { return int(c) } -func (b *Buf) Read(p []byte) (int, error) { - return b.r.Read(p) +func (r *Reader) Read(p []byte) (int, error) { + return r.r.Read(p) } -func (b *Buf) Peek(n int) ([]byte, error) { - return b.r.Peek(n) +func (r *Reader) Peek(n int) ([]byte, error) { + return r.r.Peek(n) } -func Brdline(b *Buf, delim int) string { - s, err := b.r.ReadBytes(byte(delim)) +func Brdline(r *Reader, delim int) string { + s, err := r.r.ReadBytes(byte(delim)) if err != nil { log.Fatalf("reading input: %v", err) } return string(s) } -func (b *Buf) Close() error { - var err error - if b.w != nil { - err = b.w.Flush() - } - err1 := b.f.Close() +func (r *Reader) Close() error { + return r.f.Close() +} + +func (w *Writer) Close() error { + err := w.w.Flush() + err1 := w.f.Close() if err == nil { err = err1 } diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 2c81ca2f08..c48c3d807f 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -629,7 +629,7 @@ type Link struct { Flag_shared int32 Flag_dynlink bool Flag_optimize bool - Bso *bio.Buf + Bso *bio.Writer Pathname string Goroot string Goroot_final string diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index 405cbf446a..ed6d75eba3 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -121,7 +121,7 @@ import ( // The Go and C compilers, and the assembler, call writeobj to write // out a Go object file. The linker does not call this; the linker // does not write out object files. -func Writeobjdirect(ctxt *Link, b *bio.Buf) { +func Writeobjdirect(ctxt *Link, b *bio.Writer) { Flushplist(ctxt) WriteObjFile(ctxt, b) } @@ -374,7 +374,7 @@ func (w *objWriter) writeLengths() { w.writeInt(int64(w.nFile)) } -func newObjWriter(ctxt *Link, b *bio.Buf) *objWriter { +func newObjWriter(ctxt *Link, b *bio.Writer) *objWriter { return &objWriter{ ctxt: ctxt, wr: b.Writer(), @@ -383,7 +383,7 @@ func newObjWriter(ctxt *Link, b *bio.Buf) *objWriter { } } -func WriteObjFile(ctxt *Link, b *bio.Buf) { +func WriteObjFile(ctxt *Link, b *bio.Writer) { w := newObjWriter(ctxt, b) // Magic header diff --git a/src/cmd/link/internal/ld/ar.go b/src/cmd/link/internal/ld/ar.go index 205773c7f8..6a0aeb121f 100644 --- a/src/cmd/link/internal/ld/ar.go +++ b/src/cmd/link/internal/ld/ar.go @@ -82,7 +82,7 @@ func hostArchive(name string) { } var arhdr ArHdr - l := nextar(f, bio.Boffset(f), &arhdr) + l := nextar(f, f.Offset(), &arhdr) if l <= 0 { Exitf("%s missing armap", name) } @@ -118,7 +118,7 @@ func hostArchive(name string) { l = atolwhex(arhdr.size) h := ldobj(f, "libgcc", l, pname, name, ArchiveObj) - bio.Bseek(f, h.off, 0) + f.Seek(h.off, 0) h.ld(f, h.pkg, h.length, h.pn) } @@ -131,7 +131,7 @@ func hostArchive(name string) { type archiveMap map[string]uint64 // readArmap reads the archive symbol map. -func readArmap(filename string, f *bio.Buf, arhdr ArHdr) archiveMap { +func readArmap(filename string, f *bio.Reader, arhdr ArHdr) archiveMap { is64 := arhdr.name == "/SYM64/" wordSize := 4 if is64 { diff --git a/src/cmd/link/internal/ld/go.go b/src/cmd/link/internal/ld/go.go index 8bafaffd7c..5dad90dae6 100644 --- a/src/cmd/link/internal/ld/go.go +++ b/src/cmd/link/internal/ld/go.go @@ -27,7 +27,7 @@ func expandpkg(t0 string, pkg string) string { // once the dust settles, try to move some code to // libmach, so that other linkers and ar can share. -func ldpkg(f *bio.Buf, pkg string, length int64, filename string, whence int) { +func ldpkg(f *bio.Reader, pkg string, length int64, filename string, whence int) { var p0, p1 int if Debug['g'] != 0 { diff --git a/src/cmd/link/internal/ld/ldelf.go b/src/cmd/link/internal/ld/ldelf.go index eafc6930d5..55884c07a2 100644 --- a/src/cmd/link/internal/ld/ldelf.go +++ b/src/cmd/link/internal/ld/ldelf.go @@ -268,7 +268,7 @@ type ElfSect struct { } type ElfObj struct { - f *bio.Buf + f *bio.Reader base int64 // offset in f where ELF begins length int64 // length of ELF is64 int @@ -447,13 +447,13 @@ func parseArmAttributes(e binary.ByteOrder, data []byte) { } } -func ldelf(f *bio.Buf, pkg string, length int64, pn string) { +func ldelf(f *bio.Reader, pkg string, length int64, pn string) { if Debug['v'] != 0 { fmt.Fprintf(&Bso, "%5.2f ldelf %s\n", obj.Cputime(), pn) } Ctxt.IncVersion() - base := int32(bio.Boffset(f)) + base := f.Offset() var add uint64 var e binary.ByteOrder @@ -601,7 +601,7 @@ func ldelf(f *bio.Buf, pkg string, length int64, pn string) { elfobj.nsect = uint(elfobj.shnum) for i := 0; uint(i) < elfobj.nsect; i++ { - if bio.Bseek(f, int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 { + if f.Seek(int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 { goto bad } sect = &elfobj.sect[i] @@ -987,7 +987,7 @@ func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) { sect.base = make([]byte, sect.size) err = fmt.Errorf("short read") - if bio.Bseek(elfobj.f, int64(uint64(elfobj.base)+sect.off), 0) < 0 || bio.Bread(elfobj.f, sect.base) != len(sect.base) { + if elfobj.f.Seek(int64(uint64(elfobj.base)+sect.off), 0) < 0 || bio.Bread(elfobj.f, sect.base) != len(sect.base) { return err } diff --git a/src/cmd/link/internal/ld/ldmacho.go b/src/cmd/link/internal/ld/ldmacho.go index 6376116d04..dffe6f69ce 100644 --- a/src/cmd/link/internal/ld/ldmacho.go +++ b/src/cmd/link/internal/ld/ldmacho.go @@ -43,7 +43,7 @@ const ( ) type LdMachoObj struct { - f *bio.Buf + f *bio.Reader base int64 // off in f where Mach-O begins length int64 // length of Mach-O is64 bool @@ -299,7 +299,7 @@ func macholoadrel(m *LdMachoObj, sect *LdMachoSect) int { rel := make([]LdMachoRel, sect.nreloc) n := int(sect.nreloc * 8) buf := make([]byte, n) - if bio.Bseek(m.f, m.base+int64(sect.reloff), 0) < 0 || bio.Bread(m.f, buf) != n { + if m.f.Seek(m.base+int64(sect.reloff), 0) < 0 || bio.Bread(m.f, buf) != n { return -1 } var p []byte @@ -345,7 +345,7 @@ func macholoaddsym(m *LdMachoObj, d *LdMachoDysymtab) int { n := int(d.nindirectsyms) p := make([]byte, n*4) - if bio.Bseek(m.f, m.base+int64(d.indirectsymoff), 0) < 0 || bio.Bread(m.f, p) != len(p) { + if m.f.Seek(m.base+int64(d.indirectsymoff), 0) < 0 || bio.Bread(m.f, p) != len(p) { return -1 } @@ -362,7 +362,7 @@ func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int { } strbuf := make([]byte, symtab.strsize) - if bio.Bseek(m.f, m.base+int64(symtab.stroff), 0) < 0 || bio.Bread(m.f, strbuf) != len(strbuf) { + if m.f.Seek(m.base+int64(symtab.stroff), 0) < 0 || bio.Bread(m.f, strbuf) != len(strbuf) { return -1 } @@ -372,7 +372,7 @@ func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int { } n := int(symtab.nsym * uint32(symsize)) symbuf := make([]byte, n) - if bio.Bseek(m.f, m.base+int64(symtab.symoff), 0) < 0 || bio.Bread(m.f, symbuf) != len(symbuf) { + if m.f.Seek(m.base+int64(symtab.symoff), 0) < 0 || bio.Bread(m.f, symbuf) != len(symbuf) { return -1 } sym := make([]LdMachoSym, symtab.nsym) @@ -402,7 +402,7 @@ func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int { return 0 } -func ldmacho(f *bio.Buf, pkg string, length int64, pn string) { +func ldmacho(f *bio.Reader, pkg string, length int64, pn string) { var err error var j int var is64 bool @@ -432,7 +432,7 @@ func ldmacho(f *bio.Buf, pkg string, length int64, pn string) { var name string Ctxt.IncVersion() - base := bio.Boffset(f) + base := f.Offset() if bio.Bread(f, hdr[:]) != len(hdr) { goto bad } @@ -557,7 +557,7 @@ func ldmacho(f *bio.Buf, pkg string, length int64, pn string) { } dat = make([]byte, c.seg.filesz) - if bio.Bseek(f, m.base+int64(c.seg.fileoff), 0) < 0 || bio.Bread(f, dat) != len(dat) { + if f.Seek(m.base+int64(c.seg.fileoff), 0) < 0 || bio.Bread(f, dat) != len(dat) { err = fmt.Errorf("cannot load object data: %v", err) goto bad } diff --git a/src/cmd/link/internal/ld/ldpe.go b/src/cmd/link/internal/ld/ldpe.go index e97e842e7f..ba5b928ea0 100644 --- a/src/cmd/link/internal/ld/ldpe.go +++ b/src/cmd/link/internal/ld/ldpe.go @@ -118,7 +118,7 @@ type PeSect struct { } type PeObj struct { - f *bio.Buf + f *bio.Reader name string base uint32 sect []PeSect @@ -129,14 +129,14 @@ type PeObj struct { snames []byte } -func ldpe(f *bio.Buf, pkg string, length int64, pn string) { +func ldpe(f *bio.Reader, pkg string, length int64, pn string) { if Debug['v'] != 0 { fmt.Fprintf(&Bso, "%5.2f ldpe %s\n", obj.Cputime(), pn) } var sect *PeSect Ctxt.IncVersion() - base := int32(bio.Boffset(f)) + base := f.Offset() peobj := new(PeObj) peobj.f = f @@ -174,14 +174,14 @@ func ldpe(f *bio.Buf, pkg string, length int64, pn string) { // TODO return error if found .cormeta // load string table - bio.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) + f.Seek(int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) if bio.Bread(f, symbuf[:4]) != 4 { goto bad } l = Le32(symbuf[:]) peobj.snames = make([]byte, l) - bio.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) + f.Seek(int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) if bio.Bread(f, peobj.snames) != len(peobj.snames) { goto bad } @@ -202,9 +202,9 @@ func ldpe(f *bio.Buf, pkg string, length int64, pn string) { peobj.pesym = make([]PeSym, peobj.fh.NumberOfSymbols) peobj.npesym = uint(peobj.fh.NumberOfSymbols) - bio.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable), 0) + f.Seek(int64(base)+int64(peobj.fh.PointerToSymbolTable), 0) for i := 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 { - bio.Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0) + f.Seek(int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0) if bio.Bread(f, symbuf[:]) != len(symbuf) { goto bad } @@ -290,7 +290,7 @@ func ldpe(f *bio.Buf, pkg string, length int64, pn string) { } r = make([]Reloc, rsect.sh.NumberOfRelocations) - bio.Bseek(f, int64(peobj.base)+int64(rsect.sh.PointerToRelocations), 0) + f.Seek(int64(peobj.base)+int64(rsect.sh.PointerToRelocations), 0) for j = 0; j < int(rsect.sh.NumberOfRelocations); j++ { rp = &r[j] if bio.Bread(f, symbuf[:10]) != 10 { @@ -466,7 +466,7 @@ func pemap(peobj *PeObj, sect *PeSect) int { if sect.sh.PointerToRawData == 0 { // .bss doesn't have data in object file return 0 } - if bio.Bseek(peobj.f, int64(peobj.base)+int64(sect.sh.PointerToRawData), 0) < 0 || bio.Bread(peobj.f, sect.base) != len(sect.base) { + if peobj.f.Seek(int64(peobj.base)+int64(sect.sh.PointerToRawData), 0) < 0 || bio.Bread(peobj.f, sect.base) != len(sect.base) { return -1 } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 789eaef1a5..f8cc995c30 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -241,9 +241,10 @@ const ( var ( headstring string // buffered output - Bso bio.Buf + Bso bio.Writer ) +// TODO(dfc) outBuf duplicates bio.Writer type outBuf struct { w *bufio.Writer f *os.File @@ -739,11 +740,11 @@ func loadlib() { * look for the next file in an archive. * adapted from libmach. */ -func nextar(bp *bio.Buf, off int64, a *ArHdr) int64 { +func nextar(bp *bio.Reader, off int64, a *ArHdr) int64 { if off&1 != 0 { off++ } - bio.Bseek(bp, off, 0) + bp.Seek(off, 0) buf := make([]byte, SAR_HDR) if n := bio.Bread(bp, buf); n < len(buf) { if n >= 0 { @@ -782,9 +783,9 @@ func objfile(lib *Library) { magbuf := make([]byte, len(ARMAG)) if bio.Bread(f, magbuf) != len(magbuf) || !strings.HasPrefix(string(magbuf), ARMAG) { /* load it as a regular file */ - l := bio.Bseek(f, 0, 2) + l := f.Seek(0, 2) - bio.Bseek(f, 0, 0) + f.Seek(0, 0) ldobj(f, pkg, l, lib.File, lib.File, FileObj) f.Close() @@ -792,7 +793,7 @@ func objfile(lib *Library) { } /* process __.PKGDEF */ - off := bio.Boffset(f) + off := f.Offset() var arhdr ArHdr l := nextar(f, off, &arhdr) @@ -808,12 +809,12 @@ func objfile(lib *Library) { } if Buildmode == BuildmodeShared { - before := bio.Boffset(f) + before := f.Offset() pkgdefBytes := make([]byte, atolwhex(arhdr.size)) bio.Bread(f, pkgdefBytes) hash := sha1.Sum(pkgdefBytes) lib.hash = hash[:] - bio.Bseek(f, before, 0) + f.Seek(before, 0) } off += l @@ -853,7 +854,7 @@ out: } type Hostobj struct { - ld func(*bio.Buf, string, int64, string) + ld func(*bio.Reader, string, int64, string) pkg string pn string file string @@ -874,7 +875,7 @@ var internalpkg = []string{ "runtime/msan", } -func ldhostobj(ld func(*bio.Buf, string, int64, string), f *bio.Buf, pkg string, length int64, pn string, file string) *Hostobj { +func ldhostobj(ld func(*bio.Reader, string, int64, string), f *bio.Reader, pkg string, length int64, pn string, file string) *Hostobj { isinternal := false for i := 0; i < len(internalpkg); i++ { if pkg == internalpkg[i] { @@ -905,24 +906,22 @@ func ldhostobj(ld func(*bio.Buf, string, int64, string), f *bio.Buf, pkg string, h.pkg = pkg h.pn = pn h.file = file - h.off = bio.Boffset(f) + h.off = f.Offset() h.length = length return h } func hostobjs() { - var f *bio.Buf var h *Hostobj for i := 0; i < len(hostobj); i++ { h = &hostobj[i] - var err error - f, err = bio.Open(h.file) - if f == nil { + f, err := bio.Open(h.file) + if err != nil { Exitf("cannot reopen %s: %v", h.pn, err) } - bio.Bseek(f, h.off, 0) + f.Seek(h.off, 0) h.ld(f, h.pkg, h.length, h.pn) f.Close() } @@ -1266,15 +1265,15 @@ func hostlinkArchArgs() []string { // ldobj loads an input object. If it is a host object (an object // compiled by a non-Go compiler) it returns the Hostobj pointer. If // it is a Go object, it returns nil. -func ldobj(f *bio.Buf, pkg string, length int64, pn string, file string, whence int) *Hostobj { - eof := bio.Boffset(f) + length +func ldobj(f *bio.Reader, pkg string, length int64, pn string, file string, whence int) *Hostobj { + eof := f.Offset() + length - start := bio.Boffset(f) + start := f.Offset() c1 := bio.Bgetc(f) c2 := bio.Bgetc(f) c3 := bio.Bgetc(f) c4 := bio.Bgetc(f) - bio.Bseek(f, start, 0) + f.Seek(start, 0) magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4) if magic == 0x7f454c46 { // \x7F E L F @@ -1334,7 +1333,7 @@ func ldobj(f *bio.Buf, pkg string, length int64, pn string, file string, whence } /* skip over exports and other info -- ends with \n!\n */ - import0 := bio.Boffset(f) + import0 := f.Offset() c1 = '\n' // the last line ended in \n c2 = bio.Bgetc(f) @@ -1349,13 +1348,13 @@ func ldobj(f *bio.Buf, pkg string, length int64, pn string, file string, whence } } - import1 := bio.Boffset(f) + import1 := f.Offset() - bio.Bseek(f, import0, 0) + f.Seek(import0, 0) ldpkg(f, pkg, import1-import0-2, pn, whence) // -2 for !\n - bio.Bseek(f, import1, 0) + f.Seek(import1, 0) - LoadObjFile(Ctxt, f, pkg, eof-bio.Boffset(f), pn) + LoadObjFile(Ctxt, f, pkg, eof-f.Offset(), pn) return nil } diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index d3f9ed3703..cbcc979c85 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -165,9 +165,10 @@ type Link struct { Headtype int Arch *sys.Arch Debugvlog int32 - Bso *bio.Buf - Windows int32 - Goroot string + + Bso *bio.Writer + Windows int32 + Goroot string // Symbol lookup based on name and indexed by version. Hash []map[string]*LSym diff --git a/src/cmd/link/internal/ld/objfile.go b/src/cmd/link/internal/ld/objfile.go index 6f177861f0..61a67cf94c 100644 --- a/src/cmd/link/internal/ld/objfile.go +++ b/src/cmd/link/internal/ld/objfile.go @@ -147,8 +147,8 @@ type objReader struct { file []*LSym } -func LoadObjFile(ctxt *Link, f *bio.Buf, pkg string, length int64, pn string) { - start := bio.Boffset(f) +func LoadObjFile(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + start := f.Offset() r := &objReader{ rd: f.Reader(), pkg: pkg, @@ -157,8 +157,8 @@ func LoadObjFile(ctxt *Link, f *bio.Buf, pkg string, length int64, pn string) { dupSym: &LSym{Name: ".dup"}, } r.loadObjFile() - if bio.Boffset(f) != start+length { - log.Fatalf("%s: unexpected end at %d, want %d", pn, int64(bio.Boffset(f)), int64(start+length)) + if f.Offset() != start+length { + log.Fatalf("%s: unexpected end at %d, want %d", pn, f.Offset(), start+length) } } -- cgit v1.3 From c3b3e7b4ef9dff1fc0cc504f81465ded5663b4e4 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 8 Apr 2016 13:33:43 -0400 Subject: cmd/compile: insert instrumentation more carefully in racewalk Be more careful about inserting instrumentation in racewalk. If the node being instrumented is an OAS, and it has a non- empty Ninit, then append instrumentation to the Ninit list rather than letting it be inserted before the OAS (and the compilation of its init list). This deals with the case that the Ninit list defines a variable used in the RHS of the OAS. Fixes #15091. Change-Id: Iac91696d9104d07f0bf1bd3499bbf56b2e1ef073 Reviewed-on: https://go-review.googlesource.com/21771 Reviewed-by: Josh Bleecher Snyder Run-TryBot: David Chase --- src/cmd/compile/internal/gc/fmt.go | 3 +++ src/cmd/compile/internal/gc/racewalk.go | 8 +++++++- src/cmd/compile/internal/gc/ssa.go | 4 ++++ src/cmd/compile/internal/ssa/regalloc.go | 4 ++-- test/fixedbugs/issue15091.go | 25 +++++++++++++++++++++++++ 5 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 test/fixedbugs/issue15091.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 5c5503619f..19f109055d 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -737,6 +737,9 @@ func typefmt(t *Type, flag FmtFlag) string { Fatalf("cannot use TDDDFIELD with old exporter") } return fmt.Sprintf("%v <%v> %v", Econv(t.Etype), t.Sym, t.DDDField()) + + case Txxx: + return "Txxx" } if fmtmode == FExp { diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 09889a40f3..f6e65146d6 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -164,7 +164,13 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { var outn Nodes outn.Set(out) instrumentnode(&ls[i], &outn, 0, 0) - out = append(outn.Slice(), ls[i]) + if ls[i].Op != OAS || ls[i].Ninit.Len() == 0 { + out = append(outn.Slice(), ls[i]) + } else { + // Splice outn onto end of ls[i].Ninit + ls[i].Ninit.AppendNodes(&outn) + out = append(out, ls[i]) + } } } n.List.Set(out) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 90c4d4e95e..7c5f906d76 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -3699,6 +3699,10 @@ func (s *state) resolveFwdRef(v *ssa.Value) { if b == s.f.Entry { // Live variable at start of function. if s.canSSA(name) { + if strings.HasPrefix(name.Sym.Name, "autotmp_") { + // It's likely that this is an uninitialized variable in the entry block. + s.Fatalf("Treating auto as if it were arg, func %s, node %v, value %v", b.Func.Name, name, v) + } v.Op = ssa.OpArg v.Aux = name return diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 22b9d12c19..aec23a1368 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -417,7 +417,7 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, line // Load v from its spill location. case vi.spill != nil: if s.f.pass.debug > logSpills { - s.f.Config.Warnl(vi.spill.Line, "load spill") + s.f.Config.Warnl(vi.spill.Line, "load spill for %v from %v", v, vi.spill) } c = s.curBlock.NewValue1(line, OpLoadReg, v.Type, vi.spill) vi.spillUsed = true @@ -1078,7 +1078,7 @@ func (s *regAllocState) regalloc(f *Func) { vi := s.values[i] if vi.spillUsed { if s.f.pass.debug > logSpills { - s.f.Config.Warnl(vi.spill.Line, "spilled value") + s.f.Config.Warnl(vi.spill.Line, "spilled value at %v remains", vi.spill) } continue } diff --git a/test/fixedbugs/issue15091.go b/test/fixedbugs/issue15091.go new file mode 100644 index 0000000000..346e906171 --- /dev/null +++ b/test/fixedbugs/issue15091.go @@ -0,0 +1,25 @@ +// errorcheck -0 -race + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sample + +type Html struct { + headerIDs map[string]int +} + +// We don't want to see: +// internal error: (*Html).xyzzy autotmp_3 (type *int) recorded as live on entry, p.Pc=0 +// or (now, with the error caught earlier) +// Treating auto as if it were arg, func (*Html).xyzzy, node ... +// caused by racewalker inserting instrumentation before an OAS where the Ninit +// of the OAS defines part of its right-hand-side. (I.e., the race instrumentation +// references a variable before it is defined.) +func (options *Html) xyzzy(id string) string { + for count, found := options.headerIDs[id]; found; count, found = options.headerIDs[id] { + _ = count + } + return "" +} -- cgit v1.3 From ca397bb68e4b548843d2886e374f96ec3bb0f9c0 Mon Sep 17 00:00:00 2001 From: Dave Cheney Date: Fri, 8 Apr 2016 19:30:41 +1000 Subject: cmd: remove bio.BufReader and bio.BufWriter bio.BufReader was never used. bio.BufWriter was used to wrap an existing io.Writer, but the bio.Writer returned would not be seekable, so replace all occurences with bufio.Reader instead. Change-Id: I9c6779e35c63178aa4e104c17bb5bb8b52de0359 Reviewed-on: https://go-review.googlesource.com/21722 Reviewed-by: Brad Fitzpatrick Run-TryBot: Dave Cheney TryBot-Result: Gobot Gobot --- src/cmd/asm/internal/asm/endtoend_test.go | 6 +++--- src/cmd/asm/main.go | 15 ++++++++------- src/cmd/compile/internal/gc/bexport.go | 6 +++--- src/cmd/compile/internal/gc/export.go | 4 ++-- src/cmd/compile/internal/gc/go.go | 3 ++- src/cmd/compile/internal/gc/main.go | 3 +-- src/cmd/internal/bio/buf.go | 12 ------------ src/cmd/internal/obj/link.go | 4 ++-- src/cmd/link/internal/ld/lib.go | 2 +- src/cmd/link/internal/ld/link.go | 9 ++++----- src/cmd/link/internal/ld/pobj.go | 4 ++-- 11 files changed, 28 insertions(+), 40 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go index 8986281f10..bc992a7c99 100644 --- a/src/cmd/asm/internal/asm/endtoend_test.go +++ b/src/cmd/asm/internal/asm/endtoend_test.go @@ -5,6 +5,7 @@ package asm import ( + "bufio" "bytes" "fmt" "io/ioutil" @@ -17,7 +18,6 @@ import ( "testing" "cmd/asm/internal/lex" - "cmd/internal/bio" "cmd/internal/obj" ) @@ -34,7 +34,7 @@ func testEndToEnd(t *testing.T, goarch, file string) { pList := obj.Linknewplist(ctxt) var ok bool testOut = new(bytes.Buffer) // The assembler writes test output to this buffer. - ctxt.Bso = bio.BufWriter(os.Stdout) + ctxt.Bso = bufio.NewWriter(os.Stdout) defer ctxt.Bso.Flush() failed := false ctxt.DiagFunc = func(format string, args ...interface{}) { @@ -272,7 +272,7 @@ func testErrors(t *testing.T, goarch, file string) { pList := obj.Linknewplist(ctxt) var ok bool testOut = new(bytes.Buffer) // The assembler writes test output to this buffer. - ctxt.Bso = bio.BufWriter(os.Stdout) + ctxt.Bso = bufio.NewWriter(os.Stdout) defer ctxt.Bso.Flush() failed := false var errBuf bytes.Buffer diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go index 75cb8f75d3..f010ca93f1 100644 --- a/src/cmd/asm/main.go +++ b/src/cmd/asm/main.go @@ -5,6 +5,7 @@ package main import ( + "bufio" "flag" "fmt" "log" @@ -32,11 +33,6 @@ func main() { flags.Parse() - // Create object file, write header. - fd, err := os.Create(*flags.OutputFile) - if err != nil { - log.Fatal(err) - } ctxt := obj.Linknew(architecture.LinkArch) if *flags.PrintOut { ctxt.Debugasm = 1 @@ -46,9 +42,14 @@ func main() { if *flags.Shared || *flags.Dynlink { ctxt.Flag_shared = 1 } - ctxt.Bso = bio.BufWriter(os.Stdout) + ctxt.Bso = bufio.NewWriter(os.Stdout) defer ctxt.Bso.Flush() - output := bio.BufWriter(fd) + + // Create object file, write header. + output, err := bio.Create(*flags.OutputFile) + if err != nil { + log.Fatal(err) + } fmt.Fprintf(output, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion()) fmt.Fprintf(output, "!\n") diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index bb0a34e67b..15e5e3ada6 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -90,9 +90,9 @@ importer. package gc import ( + "bufio" "bytes" "cmd/compile/internal/big" - "cmd/internal/bio" "encoding/binary" "fmt" "sort" @@ -124,7 +124,7 @@ const exportVersion = "v0" const exportInlined = true // default: true type exporter struct { - out *bio.Writer + out *bufio.Writer pkgIndex map[*Pkg]int typIndex map[*Type]int inlined []*Func @@ -136,7 +136,7 @@ type exporter struct { } // export writes the exportlist for localpkg to out and returns the number of bytes written. -func export(out *bio.Writer, trace bool) int { +func export(out *bufio.Writer, trace bool) int { p := exporter{ out: out, pkgIndex: make(map[*Pkg]int), diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 2f94b9c62f..dc7c0869bf 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -384,7 +384,7 @@ func dumpexport() { if debugFormat { // save a copy of the export data var copy bytes.Buffer - bcopy := bio.BufWriter(©) + bcopy := bufio.NewWriter(©) size = export(bcopy, Debug_export != 0) bcopy.Flush() // flushing to bytes.Buffer cannot fail if n, err := bout.Write(copy.Bytes()); n != size || err != nil { @@ -407,7 +407,7 @@ func dumpexport() { pkgs = savedPkgs pkgMap = savedPkgMap } else { - size = export(bout, Debug_export != 0) + size = export(bout.Writer(), Debug_export != 0) } exportf("\n$$\n") } else { diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index ec7e219d95..d9b28ff8e6 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -5,6 +5,7 @@ package gc import ( + "bufio" "cmd/compile/internal/ssa" "cmd/internal/bio" "cmd/internal/obj" @@ -288,7 +289,7 @@ var Ctxt *obj.Link var writearchive int -var bstdout *bio.Writer +var bstdout *bufio.Writer var Nacl bool diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 03143f5d0a..26acf8861f 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -9,7 +9,6 @@ package gc import ( "bufio" "cmd/compile/internal/ssa" - "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/sys" "flag" @@ -104,7 +103,7 @@ func Main() { Ctxt = obj.Linknew(Thearch.LinkArch) Ctxt.DiagFunc = Yyerror - bstdout = bio.BufWriter(os.Stdout) + bstdout = bufio.NewWriter(os.Stdout) Ctxt.Bso = bstdout localpkg = mkpkg("") diff --git a/src/cmd/internal/bio/buf.go b/src/cmd/internal/bio/buf.go index 0bd4658cdd..983ce46627 100644 --- a/src/cmd/internal/bio/buf.go +++ b/src/cmd/internal/bio/buf.go @@ -51,18 +51,6 @@ func Open(name string) (*Reader, error) { return &Reader{f: f, r: bufio.NewReader(f)}, nil } -// BufWriter returns a Writer on top of w. -// TODO(dfc) remove this method and replace caller with bufio.Writer. -func BufWriter(w io.Writer) *Writer { - return &Writer{w: bufio.NewWriter(w)} -} - -// BufWriter returns a Reader on top of r. -// TODO(dfc) remove this method and replace caller with bufio.Reader. -func BufReader(r io.Reader) *Reader { - return &Reader{r: bufio.NewReader(r)} -} - func (w *Writer) Write(p []byte) (int, error) { return w.w.Write(p) } diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index c48c3d807f..62175f9ed8 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -31,7 +31,7 @@ package obj import ( - "cmd/internal/bio" + "bufio" "cmd/internal/sys" ) @@ -629,7 +629,7 @@ type Link struct { Flag_shared int32 Flag_dynlink bool Flag_optimize bool - Bso *bio.Writer + Bso *bufio.Writer Pathname string Goroot string Goroot_final string diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index e35306dd0e..01dca9fc31 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -241,7 +241,7 @@ const ( var ( headstring string // buffered output - Bso *bio.Writer + Bso *bufio.Writer ) // TODO(dfc) outBuf duplicates bio.Writer diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index cbcc979c85..52b52f1cc0 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -31,7 +31,7 @@ package ld import ( - "cmd/internal/bio" + "bufio" "cmd/internal/sys" "debug/elf" "fmt" @@ -165,10 +165,9 @@ type Link struct { Headtype int Arch *sys.Arch Debugvlog int32 - - Bso *bio.Writer - Windows int32 - Goroot string + Bso *bufio.Writer + Windows int32 + Goroot string // Symbol lookup based on name and indexed by version. Hash []map[string]*LSym diff --git a/src/cmd/link/internal/ld/pobj.go b/src/cmd/link/internal/ld/pobj.go index 50066d32d7..f4fb4d4845 100644 --- a/src/cmd/link/internal/ld/pobj.go +++ b/src/cmd/link/internal/ld/pobj.go @@ -31,7 +31,7 @@ package ld import ( - "cmd/internal/bio" + "bufio" "cmd/internal/obj" "cmd/internal/sys" "flag" @@ -46,7 +46,7 @@ var ( ) func Ldmain() { - Bso = bio.BufWriter(os.Stdout) + Bso = bufio.NewWriter(os.Stdout) Ctxt = linknew(SysArch) Ctxt.Diag = Diag -- cgit v1.3 From 93368be61ebaf8069d0d70034097de580441c412 Mon Sep 17 00:00:00 2001 From: Dave Cheney Date: Fri, 8 Apr 2016 20:37:54 +1000 Subject: cmd/internal/bio: embed bufio.{Reader,Writer} in bio.{Reader,Writer} Change-Id: Ie95b0b0d4f724f4769cf2d4f8063cb5019fa9bc9 Reviewed-on: https://go-review.googlesource.com/21781 Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/internal/bio/buf.go | 56 ++++++++--------------------------- src/cmd/internal/obj/objfile.go | 2 +- src/cmd/link/internal/ld/objfile.go | 2 +- 4 files changed, 16 insertions(+), 46 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index dc7c0869bf..ae36657a65 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -407,7 +407,7 @@ func dumpexport() { pkgs = savedPkgs pkgMap = savedPkgMap } else { - size = export(bout.Writer(), Debug_export != 0) + size = export(bout.Writer, Debug_export != 0) } exportf("\n$$\n") } else { diff --git a/src/cmd/internal/bio/buf.go b/src/cmd/internal/bio/buf.go index 983ce46627..564ac77cbf 100644 --- a/src/cmd/internal/bio/buf.go +++ b/src/cmd/internal/bio/buf.go @@ -17,21 +17,15 @@ const EOF = -1 // Reader implements a seekable buffered io.Reader. type Reader struct { f *os.File - r *bufio.Reader + *bufio.Reader } // Writer implements a seekable buffered io.Writer. type Writer struct { f *os.File - w *bufio.Writer + *bufio.Writer } -// Reader returns this Reader's underlying bufio.Reader. -func (r *Reader) Reader() *bufio.Reader { return r.r } - -// Writer returns this Writer's underlying bufio.Writer. -func (w *Writer) Writer() *bufio.Writer { return w.w } - // Create creates the file named name and returns a Writer // for that file. func Create(name string) (*Writer, error) { @@ -39,7 +33,7 @@ func Create(name string) (*Writer, error) { if err != nil { return nil, err } - return &Writer{f: f, w: bufio.NewWriter(f)}, nil + return &Writer{f: f, Writer: bufio.NewWriter(f)}, nil } // Open returns a Reader for the file named name. @@ -48,31 +42,23 @@ func Open(name string) (*Reader, error) { if err != nil { return nil, err } - return &Reader{f: f, r: bufio.NewReader(f)}, nil -} - -func (w *Writer) Write(p []byte) (int, error) { - return w.w.Write(p) -} - -func (w *Writer) WriteString(p string) (int, error) { - return w.w.WriteString(p) + return &Reader{f: f, Reader: bufio.NewReader(f)}, nil } func (r *Reader) Seek(offset int64, whence int) int64 { if whence == 1 { - offset -= int64(r.r.Buffered()) + offset -= int64(r.Buffered()) } off, err := r.f.Seek(offset, whence) if err != nil { log.Fatalf("seeking in output: %v", err) } - r.r.Reset(r.f) + r.Reset(r.f) return off } func (w *Writer) Seek(offset int64, whence int) int64 { - if err := w.w.Flush(); err != nil { + if err := w.Flush(); err != nil { log.Fatalf("writing output: %v", err) } off, err := w.f.Seek(offset, whence) @@ -87,12 +73,12 @@ func (r *Reader) Offset() int64 { if err != nil { log.Fatalf("seeking in output [0, 1]: %v", err) } - off -= int64(r.r.Buffered()) + off -= int64(r.Buffered()) return off } func (w *Writer) Offset() int64 { - if err := w.w.Flush(); err != nil { + if err := w.Flush(); err != nil { log.Fatalf("writing output: %v", err) } off, err := w.f.Seek(0, 1) @@ -102,16 +88,8 @@ func (w *Writer) Offset() int64 { return off } -func (w *Writer) Flush() error { - return w.w.Flush() -} - -func (w *Writer) WriteByte(c byte) error { - return w.w.WriteByte(c) -} - func Bread(r *Reader, p []byte) int { - n, err := io.ReadFull(r.r, p) + n, err := io.ReadFull(r, p) if n == 0 { if err != nil && err != io.EOF { n = -1 @@ -121,7 +99,7 @@ func Bread(r *Reader, p []byte) int { } func Bgetc(r *Reader) int { - c, err := r.r.ReadByte() + c, err := r.ReadByte() if err != nil { if err != io.EOF { log.Fatalf("reading input: %v", err) @@ -131,16 +109,8 @@ func Bgetc(r *Reader) int { return int(c) } -func (r *Reader) Read(p []byte) (int, error) { - return r.r.Read(p) -} - -func (r *Reader) Peek(n int) ([]byte, error) { - return r.r.Peek(n) -} - func Brdline(r *Reader, delim int) string { - s, err := r.r.ReadBytes(byte(delim)) + s, err := r.ReadBytes(byte(delim)) if err != nil { log.Fatalf("reading input: %v", err) } @@ -152,7 +122,7 @@ func (r *Reader) Close() error { } func (w *Writer) Close() error { - err := w.w.Flush() + err := w.Flush() err1 := w.f.Close() if err == nil { err = err1 diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index ed6d75eba3..ee21f39d10 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -377,7 +377,7 @@ func (w *objWriter) writeLengths() { func newObjWriter(ctxt *Link, b *bio.Writer) *objWriter { return &objWriter{ ctxt: ctxt, - wr: b.Writer(), + wr: b.Writer, vrefIdx: make(map[string]int), refIdx: make(map[string]int), } diff --git a/src/cmd/link/internal/ld/objfile.go b/src/cmd/link/internal/ld/objfile.go index 61a67cf94c..578afd4c74 100644 --- a/src/cmd/link/internal/ld/objfile.go +++ b/src/cmd/link/internal/ld/objfile.go @@ -150,7 +150,7 @@ type objReader struct { func LoadObjFile(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { start := f.Offset() r := &objReader{ - rd: f.Reader(), + rd: f.Reader, pkg: pkg, ctxt: ctxt, pn: pn, -- cgit v1.3 From 012557b3769f9286b9488fbfd4bddfeee66b6a55 Mon Sep 17 00:00:00 2001 From: Martin Möhrmann Date: Sun, 10 Apr 2016 08:48:55 +0200 Subject: all: replace magic 0x80 with named constant utf8.RuneSelf Change-Id: Id1c2e8e9d60588de866e8b6ca59cc83dd28f848f Reviewed-on: https://go-review.googlesource.com/21756 Reviewed-by: Brad Fitzpatrick Run-TryBot: Brad Fitzpatrick TryBot-Result: Gobot Gobot --- src/bufio/bufio.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 2 +- src/encoding/asn1/asn1.go | 2 +- src/go/build/build.go | 2 +- src/go/build/read.go | 3 ++- src/go/scanner/scanner.go | 6 +++--- src/html/template/css.go | 2 +- src/net/http/cookiejar/punycode.go | 2 +- 8 files changed, 11 insertions(+), 10 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/bufio/bufio.go b/src/bufio/bufio.go index d2ccc74f52..3b30b8b80c 100644 --- a/src/bufio/bufio.go +++ b/src/bufio/bufio.go @@ -266,7 +266,7 @@ func (b *Reader) ReadRune() (r rune, size int, err error) { return 0, 0, b.readErr() } r, size = rune(b.buf[b.r]), 1 - if r >= 0x80 { + if r >= utf8.RuneSelf { r, size = utf8.DecodeRune(b.buf[b.r:b.w]) } b.r += size diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 19f109055d..41d696574c 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -337,7 +337,7 @@ func Vconv(v Val, flag FmtFlag) string { case CTRUNE: x := v.U.(*Mpint).Int64() - if ' ' <= x && x < 0x80 && x != '\\' && x != '\'' { + if ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'' { return fmt.Sprintf("'%c'", int(x)) } if 0 <= x && x < 1<<16 { diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go index bd2c96d887..2b5ad08551 100644 --- a/src/encoding/asn1/asn1.go +++ b/src/encoding/asn1/asn1.go @@ -393,7 +393,7 @@ func isPrintable(b byte) bool { // byte slice and returns it. func parseIA5String(bytes []byte) (ret string, err error) { for _, b := range bytes { - if b >= 0x80 { + if b >= utf8.RuneSelf { err = SyntaxError{"IA5String contains invalid character"} return } diff --git a/src/go/build/build.go b/src/go/build/build.go index e61d564fa3..04a41a6c2e 100644 --- a/src/go/build/build.go +++ b/src/go/build/build.go @@ -1266,7 +1266,7 @@ func safeCgoName(s string, spaces bool) bool { safe = safe[len(safeSpaces):] } for i := 0; i < len(s); i++ { - if c := s[i]; c < 0x80 && bytes.IndexByte(safe, c) < 0 { + if c := s[i]; c < utf8.RuneSelf && bytes.IndexByte(safe, c) < 0 { return false } } diff --git a/src/go/build/read.go b/src/go/build/read.go index d411c1980e..29b8cdc786 100644 --- a/src/go/build/read.go +++ b/src/go/build/read.go @@ -8,6 +8,7 @@ import ( "bufio" "errors" "io" + "unicode/utf8" ) type importReader struct { @@ -20,7 +21,7 @@ type importReader struct { } func isIdent(c byte) bool { - return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= 0x80 + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf } var ( diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go index 4041d9aa47..ce660c71d5 100644 --- a/src/go/scanner/scanner.go +++ b/src/go/scanner/scanner.go @@ -64,7 +64,7 @@ func (s *Scanner) next() { switch { case r == 0: s.error(s.offset, "illegal character NUL") - case r >= 0x80: + case r >= utf8.RuneSelf: // not ASCII r, w = utf8.DecodeRune(s.src[s.rdOffset:]) if r == utf8.RuneError && w == 1 { @@ -255,11 +255,11 @@ func (s *Scanner) findLineEnd() bool { } func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) } func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) + return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) } func (s *Scanner) scanIdentifier() string { diff --git a/src/html/template/css.go b/src/html/template/css.go index 4c27cce85a..9154d8636d 100644 --- a/src/html/template/css.go +++ b/src/html/template/css.go @@ -243,7 +243,7 @@ func cssValueFilter(args ...interface{}) string { return filterFailsafe } default: - if c < 0x80 && isCSSNmchar(rune(c)) { + if c < utf8.RuneSelf && isCSSNmchar(rune(c)) { id = append(id, c) } } diff --git a/src/net/http/cookiejar/punycode.go b/src/net/http/cookiejar/punycode.go index ea7ceb5ef3..a9cc666e8c 100644 --- a/src/net/http/cookiejar/punycode.go +++ b/src/net/http/cookiejar/punycode.go @@ -37,7 +37,7 @@ func encode(prefix, s string) (string, error) { delta, n, bias := int32(0), initialN, initialBias b, remaining := int32(0), int32(0) for _, r := range s { - if r < 0x80 { + if r < utf8.RuneSelf { b++ output = append(output, byte(r)) } else { -- cgit v1.3 From 6b33b0e98e9be77d98b026ae2adf10dd71be5a1b Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Sun, 10 Apr 2016 09:08:00 -0700 Subject: cmd/compile: avoid a spill in append fast path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of spilling newlen, recalculate it. This removes a spill from the fast path, at the cost of a cheap recalculation on the (rare) growth path. This uses 8 bytes less of stack space. It generates two more bytes of code, but that is due to suboptimal register allocation; see far below. Runtime append microbenchmarks are all over the map, presumably due to incidental code movement. Sample code: func s(b []byte) []byte { b = append(b, 1, 2, 3) return b } Before: "".s t=1 size=160 args=0x30 locals=0x48 0x0000 00000 (append.go:8) TEXT "".s(SB), $72-48 0x0000 00000 (append.go:8) MOVQ (TLS), CX 0x0009 00009 (append.go:8) CMPQ SP, 16(CX) 0x000d 00013 (append.go:8) JLS 149 0x0013 00019 (append.go:8) SUBQ $72, SP 0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB) 0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0017 00023 (append.go:9) MOVQ "".b+88(FP), CX 0x001c 00028 (append.go:9) LEAQ 3(CX), DX 0x0020 00032 (append.go:9) MOVQ DX, "".autotmp_0+64(SP) 0x0025 00037 (append.go:9) MOVQ "".b+96(FP), BX 0x002a 00042 (append.go:9) CMPQ DX, BX 0x002d 00045 (append.go:9) JGT $0, 86 0x002f 00047 (append.go:8) MOVQ "".b+80(FP), AX 0x0034 00052 (append.go:9) MOVB $1, (AX)(CX*1) 0x0038 00056 (append.go:9) MOVB $2, 1(AX)(CX*1) 0x003d 00061 (append.go:9) MOVB $3, 2(AX)(CX*1) 0x0042 00066 (append.go:10) MOVQ AX, "".~r1+104(FP) 0x0047 00071 (append.go:10) MOVQ DX, "".~r1+112(FP) 0x004c 00076 (append.go:10) MOVQ BX, "".~r1+120(FP) 0x0051 00081 (append.go:10) ADDQ $72, SP 0x0055 00085 (append.go:10) RET 0x0056 00086 (append.go:9) LEAQ type.[]uint8(SB), AX 0x005d 00093 (append.go:9) MOVQ AX, (SP) 0x0061 00097 (append.go:9) MOVQ "".b+80(FP), BP 0x0066 00102 (append.go:9) MOVQ BP, 8(SP) 0x006b 00107 (append.go:9) MOVQ CX, 16(SP) 0x0070 00112 (append.go:9) MOVQ BX, 24(SP) 0x0075 00117 (append.go:9) MOVQ DX, 32(SP) 0x007a 00122 (append.go:9) PCDATA $0, $0 0x007a 00122 (append.go:9) CALL runtime.growslice(SB) 0x007f 00127 (append.go:9) MOVQ 40(SP), AX 0x0084 00132 (append.go:9) MOVQ 56(SP), BX 0x0089 00137 (append.go:8) MOVQ "".b+88(FP), CX 0x008e 00142 (append.go:9) MOVQ "".autotmp_0+64(SP), DX 0x0093 00147 (append.go:9) JMP 52 0x0095 00149 (append.go:9) NOP 0x0095 00149 (append.go:8) CALL runtime.morestack_noctxt(SB) 0x009a 00154 (append.go:8) JMP 0 After: "".s t=1 size=176 args=0x30 locals=0x40 0x0000 00000 (append.go:8) TEXT "".s(SB), $64-48 0x0000 00000 (append.go:8) MOVQ (TLS), CX 0x0009 00009 (append.go:8) CMPQ SP, 16(CX) 0x000d 00013 (append.go:8) JLS 151 0x0013 00019 (append.go:8) SUBQ $64, SP 0x0017 00023 (append.go:8) FUNCDATA $0, gclocals·6432f8c6a0d23fa7bee6c5d96f21a92a(SB) 0x0017 00023 (append.go:8) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0017 00023 (append.go:9) MOVQ "".b+80(FP), CX 0x001c 00028 (append.go:9) LEAQ 3(CX), DX 0x0020 00032 (append.go:9) MOVQ "".b+88(FP), BX 0x0025 00037 (append.go:9) CMPQ DX, BX 0x0028 00040 (append.go:9) JGT $0, 81 0x002a 00042 (append.go:8) MOVQ "".b+72(FP), AX 0x002f 00047 (append.go:9) MOVB $1, (AX)(CX*1) 0x0033 00051 (append.go:9) MOVB $2, 1(AX)(CX*1) 0x0038 00056 (append.go:9) MOVB $3, 2(AX)(CX*1) 0x003d 00061 (append.go:10) MOVQ AX, "".~r1+96(FP) 0x0042 00066 (append.go:10) MOVQ DX, "".~r1+104(FP) 0x0047 00071 (append.go:10) MOVQ BX, "".~r1+112(FP) 0x004c 00076 (append.go:10) ADDQ $64, SP 0x0050 00080 (append.go:10) RET 0x0051 00081 (append.go:9) LEAQ type.[]uint8(SB), AX 0x0058 00088 (append.go:9) MOVQ AX, (SP) 0x005c 00092 (append.go:9) MOVQ "".b+72(FP), BP 0x0061 00097 (append.go:9) MOVQ BP, 8(SP) 0x0066 00102 (append.go:9) MOVQ CX, 16(SP) 0x006b 00107 (append.go:9) MOVQ BX, 24(SP) 0x0070 00112 (append.go:9) MOVQ DX, 32(SP) 0x0075 00117 (append.go:9) PCDATA $0, $0 0x0075 00117 (append.go:9) CALL runtime.growslice(SB) 0x007a 00122 (append.go:9) MOVQ 40(SP), AX 0x007f 00127 (append.go:9) MOVQ 48(SP), CX 0x0084 00132 (append.go:9) MOVQ 56(SP), BX 0x0089 00137 (append.go:9) ADDQ $3, CX 0x008d 00141 (append.go:9) MOVQ CX, DX 0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX 0x0095 00149 (append.go:9) JMP 47 0x0097 00151 (append.go:9) NOP 0x0097 00151 (append.go:8) CALL runtime.morestack_noctxt(SB) 0x009c 00156 (append.go:8) JMP 0 Observe that in the following sequence, we should use DX directly instead of using CX as a temporary register, which would make the new code a strict improvement on the old: 0x007f 00127 (append.go:9) MOVQ 48(SP), CX 0x0084 00132 (append.go:9) MOVQ 56(SP), BX 0x0089 00137 (append.go:9) ADDQ $3, CX 0x008d 00141 (append.go:9) MOVQ CX, DX 0x0090 00144 (append.go:8) MOVQ "".b+80(FP), CX Change-Id: I4ee50b18fa53865901d2d7f86c2cbb54c6fa6924 Reviewed-on: https://go-review.googlesource.com/21812 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 30 +++++++++++++++++------------- src/runtime/slice.go | 6 ++++++ 2 files changed, 23 insertions(+), 13 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 7c5f906d76..d69559d945 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -337,12 +337,13 @@ var ( memVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "mem"}} // dummy nodes for temporary variables - ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} - capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} - typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} - idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}} - okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}} - deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}} + ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} + newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}} + capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} + typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} + idataVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "idata"}} + okVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ok"}} + deltaVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "delta"}} ) // startBlock sets the current block we're generating code in to b. @@ -2089,15 +2090,16 @@ func (s *state) expr(n *Node) *ssa.Value { // exprAppend converts an OAPPEND node n to an ssa.Value, adds it to s, and returns the Value. func (s *state) exprAppend(n *Node) *ssa.Value { // append(s, e1, e2, e3). Compile like: - // ptr,len,cap := s + // ptr, len, cap := s // newlen := len + 3 // if newlen > s.cap { - // ptr,_,cap = growslice(s, newlen) + // ptr, len, cap = growslice(s, newlen) + // newlen = len + 3 // recalculate to avoid a spill // } // *(ptr+len) = e1 // *(ptr+len+1) = e2 // *(ptr+len+2) = e3 - // makeslice(ptr,newlen,cap) + // makeslice(ptr, newlen, cap) et := n.Type.Elem() pt := Ptrto(et) @@ -2117,6 +2119,7 @@ func (s *state) exprAppend(n *Node) *ssa.Value { nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) s.vars[&ptrVar] = p + s.vars[&newlenVar] = nl s.vars[&capVar] = c b := s.endBlock() b.Kind = ssa.BlockIf @@ -2132,8 +2135,7 @@ func (s *state) exprAppend(n *Node) *ssa.Value { r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) s.vars[&ptrVar] = r[0] - // Note: we don't need to read r[1], the result's length. It will be nl. - // (or maybe we should, we just have to spill/restore nl otherwise?) + s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) s.vars[&capVar] = r[2] b = s.endBlock() b.AddEdgeTo(assign) @@ -2154,8 +2156,9 @@ func (s *state) exprAppend(n *Node) *ssa.Value { } } - p = s.variable(&ptrVar, pt) // generates phi for ptr - c = s.variable(&capVar, Types[TINT]) // generates phi for cap + p = s.variable(&ptrVar, pt) // generates phi for ptr + nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl + c = s.variable(&capVar, Types[TINT]) // generates phi for cap p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) // TODO: just one write barrier call for all of these writes? // TODO: maybe just one writeBarrier.enabled check? @@ -2178,6 +2181,7 @@ func (s *state) exprAppend(n *Node) *ssa.Value { // make result delete(s.vars, &ptrVar) + delete(s.vars, &newlenVar) delete(s.vars, &capVar) return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) } diff --git a/src/runtime/slice.go b/src/runtime/slice.go index 0bc0299f72..4ab221056c 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -37,6 +37,12 @@ func makeslice(t *slicetype, len64, cap64 int64) slice { // It is passed the slice type, the old slice, and the desired new minimum capacity, // and it returns a new slice with at least that capacity, with the old data // copied into it. +// The new slice's length is set to the old slice's length, +// NOT to the new requested capacity. +// This is for codegen convenience. The old slice's length is used immediately +// to calculate where to write new values during an append. +// TODO: When the old backend is gone, reconsider this decision. +// The SSA backend might prefer the new length or to return only ptr/cap and save stack space. func growslice(t *slicetype, old slice, cap int) slice { if raceenabled { callerpc := getcallerpc(unsafe.Pointer(&t)) -- cgit v1.3 From 0004f34cefcdaad13a5131e3494fb2ff04877cd2 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sun, 10 Apr 2016 08:26:43 -0700 Subject: cmd/compile: regalloc enforces 2-address instructions Instead of being a hint, resultInArg0 is now enforced by regalloc. This allows us to delete all the code from amd64/ssa.go which deals with converting from a semantically three-address instruction into some copies plus a two-address instruction. Change-Id: Id4f39a80be4b678718bfd42a229f9094ab6ecd7c Reviewed-on: https://go-review.googlesource.com/21816 Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/amd64/ssa.go | 240 ++++---------------- src/cmd/compile/internal/ssa/gen/AMD64.rules | 6 + src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 28 +-- src/cmd/compile/internal/ssa/gen/main.go | 8 +- src/cmd/compile/internal/ssa/op.go | 2 +- src/cmd/compile/internal/ssa/opGen.go | 328 +++++++++++++-------------- src/cmd/compile/internal/ssa/regalloc.go | 47 +++- src/cmd/compile/internal/ssa/rewriteAMD64.go | 47 ++++ 8 files changed, 310 insertions(+), 396 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 3f8e0ece12..723a2ddec5 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -192,74 +192,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r } - // 2-address opcode arithmetic, symmetric - case ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, + // 2-address opcode arithmetic + case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL, ssa.OpAMD64SUBW, ssa.OpAMD64SUBB, + ssa.OpAMD64MULQ, ssa.OpAMD64MULL, ssa.OpAMD64MULW, ssa.OpAMD64MULB, ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL, ssa.OpAMD64ANDW, ssa.OpAMD64ANDB, ssa.OpAMD64ORQ, ssa.OpAMD64ORL, ssa.OpAMD64ORW, ssa.OpAMD64ORB, ssa.OpAMD64XORQ, ssa.OpAMD64XORL, ssa.OpAMD64XORW, ssa.OpAMD64XORB, - ssa.OpAMD64MULQ, ssa.OpAMD64MULL, ssa.OpAMD64MULW, ssa.OpAMD64MULB, - ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64PXOR: - r := gc.SSARegNum(v) - x := gc.SSARegNum(v.Args[0]) - y := gc.SSARegNum(v.Args[1]) - if x != r && y != r { - opregreg(moveByType(v.Type), r, x) - x = r - } - p := gc.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.To.Type = obj.TYPE_REG - p.To.Reg = r - if x == r { - p.From.Reg = y - } else { - p.From.Reg = x - } - // 2-address opcode arithmetic, not symmetric - case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL, ssa.OpAMD64SUBW, ssa.OpAMD64SUBB: - r := gc.SSARegNum(v) - x := gc.SSARegNum(v.Args[0]) - y := gc.SSARegNum(v.Args[1]) - var neg bool - if y == r { - // compute -(y-x) instead - x, y = y, x - neg = true - } - if x != r { - opregreg(moveByType(v.Type), r, x) - } - opregreg(v.Op.Asm(), r, y) - - if neg { - if v.Op == ssa.OpAMD64SUBQ { - p := gc.Prog(x86.ANEGQ) - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } else { // Avoids partial registers write - p := gc.Prog(x86.ANEGL) - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } - } - case ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD: + ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL, ssa.OpAMD64SHLW, ssa.OpAMD64SHLB, + ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB, + ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB, + ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD, + ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD, + ssa.OpAMD64PXOR: r := gc.SSARegNum(v) - x := gc.SSARegNum(v.Args[0]) - y := gc.SSARegNum(v.Args[1]) - if y == r && x != r { - // r/y := x op r/y, need to preserve x and rewrite to - // r/y := r/y op x15 - x15 := int16(x86.REG_X15) - // register move y to x15 - // register move x to y - // rename y with x15 - opregreg(moveByType(v.Type), x15, y) - opregreg(moveByType(v.Type), r, x) - y = x15 - } else if x != r { - opregreg(moveByType(v.Type), r, x) + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) } - opregreg(v.Op.Asm(), r, y) + opregreg(v.Op.Asm(), r, gc.SSARegNum(v.Args[1])) case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW, ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU, @@ -372,47 +321,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // Do a 64-bit add, the overflow goes into the carry. // Shift right once and pull the carry back into the 63rd bit. r := gc.SSARegNum(v) - x := gc.SSARegNum(v.Args[0]) - y := gc.SSARegNum(v.Args[1]) - if x != r && y != r { - opregreg(moveByType(v.Type), r, x) - x = r + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) } p := gc.Prog(x86.AADDQ) p.From.Type = obj.TYPE_REG p.To.Type = obj.TYPE_REG p.To.Reg = r - if x == r { - p.From.Reg = y - } else { - p.From.Reg = x - } + p.From.Reg = gc.SSARegNum(v.Args[1]) p = gc.Prog(x86.ARCRQ) p.From.Type = obj.TYPE_CONST p.From.Offset = 1 p.To.Type = obj.TYPE_REG p.To.Reg = r - case ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL, ssa.OpAMD64SHLW, ssa.OpAMD64SHLB, - ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB, - ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB: - x := gc.SSARegNum(v.Args[0]) - r := gc.SSARegNum(v) - if x != r { - if r == x86.REG_CX { - v.Fatalf("can't implement %s, target and shift both in CX", v.LongString()) - } - p := gc.Prog(moveByType(v.Type)) - p.From.Type = obj.TYPE_REG - p.From.Reg = x - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } - p := gc.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_REG - p.From.Reg = gc.SSARegNum(v.Args[1]) // should be CX - p.To.Type = obj.TYPE_REG - p.To.Reg = r case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst, ssa.OpAMD64ADDWconst, ssa.OpAMD64ADDBconst: r := gc.SSARegNum(v) a := gc.SSARegNum(v.Args[0]) @@ -433,7 +355,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r return - } else if v.AuxInt == -1 { + } + if v.AuxInt == -1 { var asm obj.As if v.Op == ssa.OpAMD64ADDQconst { asm = x86.ADECQ @@ -444,14 +367,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r return - } else { - p := gc.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_CONST - p.From.Offset = v.AuxInt - p.To.Type = obj.TYPE_REG - p.To.Reg = r - return } + p := gc.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + return } var asm obj.As if v.Op == ssa.OpAMD64ADDQconst { @@ -469,17 +391,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpAMD64CMOVQEQconst, ssa.OpAMD64CMOVLEQconst, ssa.OpAMD64CMOVWEQconst, ssa.OpAMD64CMOVQNEconst, ssa.OpAMD64CMOVLNEconst, ssa.OpAMD64CMOVWNEconst: r := gc.SSARegNum(v) - x := gc.SSARegNum(v.Args[0]) - // Arg0 is in/out, move in to out if not already same - if r != x { - p := gc.Prog(moveByType(v.Type)) - p.From.Type = obj.TYPE_REG - p.From.Reg = x - p.To.Type = obj.TYPE_REG - p.To.Reg = r + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) } - // Constant into AX, after arg0 movement in case arg0 is in AX + // Constant into AX p := gc.Prog(moveByType(v.Type)) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt @@ -494,13 +410,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst, ssa.OpAMD64MULWconst, ssa.OpAMD64MULBconst: r := gc.SSARegNum(v) - x := gc.SSARegNum(v.Args[0]) - if r != x { - p := gc.Prog(moveByType(v.Type)) - p.From.Type = obj.TYPE_REG - p.From.Reg = x - p.To.Type = obj.TYPE_REG - p.To.Reg = r + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) } p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST @@ -508,87 +419,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r // TODO: Teach doasm to compile the three-address multiply imul $c, r1, r2 - // instead of using the MOVQ above. + // then we don't need to use resultInArg0 for these ops. //p.From3 = new(obj.Addr) //p.From3.Type = obj.TYPE_REG //p.From3.Reg = gc.SSARegNum(v.Args[0]) - case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, ssa.OpAMD64SUBWconst, ssa.OpAMD64SUBBconst: - x := gc.SSARegNum(v.Args[0]) - r := gc.SSARegNum(v) - // We have 3-op add (lea), so transforming a = b - const into - // a = b + (- const), saves us 1 instruction. We can't fit - // - (-1 << 31) into 4 bytes offset in lea. - // We handle 2-address just fine below. - if v.AuxInt == -1<<31 || x == r { - if x != r { - // This code compensates for the fact that the register allocator - // doesn't understand 2-address instructions yet. TODO: fix that. - p := gc.Prog(moveByType(v.Type)) - p.From.Type = obj.TYPE_REG - p.From.Reg = x - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } - p := gc.Prog(v.Op.Asm()) - p.From.Type = obj.TYPE_CONST - p.From.Offset = v.AuxInt - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } else if x == r && v.AuxInt == -1 { - var asm obj.As - // x = x - (-1) is the same as x++ - // See OpAMD64ADDQconst comments about inc vs add $1,reg - if v.Op == ssa.OpAMD64SUBQconst { - asm = x86.AINCQ - } else { - asm = x86.AINCL - } - p := gc.Prog(asm) - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } else if x == r && v.AuxInt == 1 { - var asm obj.As - if v.Op == ssa.OpAMD64SUBQconst { - asm = x86.ADECQ - } else { - asm = x86.ADECL - } - p := gc.Prog(asm) - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } else { - var asm obj.As - if v.Op == ssa.OpAMD64SUBQconst { - asm = x86.ALEAQ - } else { - asm = x86.ALEAL - } - p := gc.Prog(asm) - p.From.Type = obj.TYPE_MEM - p.From.Reg = x - p.From.Offset = -v.AuxInt - p.To.Type = obj.TYPE_REG - p.To.Reg = r - } - case ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst, ssa.OpAMD64ANDWconst, ssa.OpAMD64ANDBconst, + case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, ssa.OpAMD64SUBWconst, ssa.OpAMD64SUBBconst, + ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst, ssa.OpAMD64ANDWconst, ssa.OpAMD64ANDBconst, ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst, ssa.OpAMD64ORWconst, ssa.OpAMD64ORBconst, ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst, ssa.OpAMD64XORWconst, ssa.OpAMD64XORBconst, - ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst, ssa.OpAMD64SHLWconst, - ssa.OpAMD64SHLBconst, ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, - ssa.OpAMD64SHRBconst, ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, - ssa.OpAMD64SARBconst, ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, - ssa.OpAMD64ROLBconst: - // This code compensates for the fact that the register allocator - // doesn't understand 2-address instructions yet. TODO: fix that. - x := gc.SSARegNum(v.Args[0]) + ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst, ssa.OpAMD64SHLWconst, ssa.OpAMD64SHLBconst, + ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst, + ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst, + ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst: r := gc.SSARegNum(v) - if x != r { - p := gc.Prog(moveByType(v.Type)) - p.From.Type = obj.TYPE_REG - p.From.Reg = x - p.To.Type = obj.TYPE_REG - p.To.Reg = r + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) } p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST @@ -821,9 +667,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Offset = v.AuxInt case ssa.OpCopy, ssa.OpAMD64MOVQconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy? - if v.Type.IsMemory() { - return - } x := gc.SSARegNum(v.Args[0]) y := gc.SSARegNum(v) if x != y { @@ -969,14 +812,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64NEGW, ssa.OpAMD64NEGB, ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL, ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL, ssa.OpAMD64NOTW, ssa.OpAMD64NOTB: - x := gc.SSARegNum(v.Args[0]) r := gc.SSARegNum(v) - if x != r { - p := gc.Prog(moveByType(v.Type)) - p.From.Type = obj.TYPE_REG - p.From.Reg = x - p.To.Type = obj.TYPE_REG - p.To.Reg = r + if r != gc.SSARegNum(v.Args[0]) { + v.Fatalf("input[0] and output not in same register %s", v.LongString()) } p := gc.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index d7f361dc2e..dcd5e6a5e1 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1273,6 +1273,12 @@ (XORWconst [c] x) && int16(c)==0 -> x (XORBconst [c] x) && int8(c)==0 -> x +// Convert constant subtracts to constant adds +(SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x) +(SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x) +(SUBWconst [c] x) -> (ADDWconst [int64(int16(-c))] x) +(SUBBconst [c] x) -> (ADDBconst [int64(int8(-c))] x) + // generic constant folding // TODO: more of this (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d]) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index b1698c0cf1..88bb6bc542 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -111,12 +111,14 @@ func init() { // Common regInfo var ( gp01 = regInfo{inputs: []regMask{}, outputs: gponly} - gp11 = regInfo{inputs: []regMask{gpsp}, outputs: gponly, clobbers: flags} + gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly, clobbers: flags} + gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly, clobbers: flags} gp11nf = regInfo{inputs: []regMask{gpsp}, outputs: gponly} // nf: no flags clobbered gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} - gp21 = regInfo{inputs: []regMask{gpsp, gpsp}, outputs: gponly, clobbers: flags} + gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly, clobbers: flags} + gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly, clobbers: flags} gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} - gp21shift = regInfo{inputs: []regMask{gpsp, cx}, outputs: []regMask{gp &^ cx}, clobbers: flags} + gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}, clobbers: flags} gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx | flags} gp11hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, @@ -128,8 +130,8 @@ func init() { gp1flags = regInfo{inputs: []regMask{gpsp}, outputs: flagsonly} flagsgp = regInfo{inputs: flagsonly, outputs: gponly} - // for CMOVconst -- uses AX to hold constant temporary. AX input is moved before temp. - gp1flagsgp = regInfo{inputs: []regMask{gp, flags}, clobbers: ax | flags, outputs: []regMask{gp &^ ax}} + // for CMOVconst -- uses AX to hold constant temporary. + gp1flagsgp = regInfo{inputs: []regMask{gp &^ ax, flags}, clobbers: ax | flags, outputs: []regMask{gp &^ ax}} readflags = regInfo{inputs: flagsonly, outputs: gponly} flagsgpax = regInfo{inputs: flagsonly, clobbers: ax | flags, outputs: []regMask{gp &^ ax}} @@ -186,14 +188,14 @@ func init() { {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by 8i store // binary ops - {name: "ADDQ", argLength: 2, reg: gp21, asm: "ADDQ", commutative: true, resultInArg0: true}, // arg0 + arg1 - {name: "ADDL", argLength: 2, reg: gp21, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1 - {name: "ADDW", argLength: 2, reg: gp21, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1 - {name: "ADDB", argLength: 2, reg: gp21, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1 - {name: "ADDQconst", argLength: 1, reg: gp11, asm: "ADDQ", aux: "Int64", resultInArg0: true, typ: "UInt64"}, // arg0 + auxint - {name: "ADDLconst", argLength: 1, reg: gp11, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint - {name: "ADDWconst", argLength: 1, reg: gp11, asm: "ADDL", aux: "Int16", resultInArg0: true}, // arg0 + auxint - {name: "ADDBconst", argLength: 1, reg: gp11, asm: "ADDL", aux: "Int8", resultInArg0: true}, // arg0 + auxint + {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true}, // arg0 + arg1 + {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 + {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 + {name: "ADDB", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 + {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64"}, // arg0 + auxint + {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32"}, // arg0 + auxint + {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int16"}, // arg0 + auxint + {name: "ADDBconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int8"}, // arg0 + auxint {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true}, // arg0 - arg1 {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1 diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go index db3c43d3a3..2aec4a324b 100644 --- a/src/cmd/compile/internal/ssa/gen/main.go +++ b/src/cmd/compile/internal/ssa/gen/main.go @@ -39,7 +39,7 @@ type opData struct { rematerializeable bool argLength int32 // number of arguments, if -1, then this operation has a variable number of arguments commutative bool // this operation is commutative (e.g. addition) - resultInArg0 bool // prefer v and v.Args[0] to be allocated to the same register + resultInArg0 bool // v and v.Args[0] must be allocated to the same register } type blockData struct { @@ -155,6 +155,12 @@ func genOp() { } if v.resultInArg0 { fmt.Fprintln(w, "resultInArg0: true,") + if v.reg.inputs[0] != v.reg.outputs[0] { + log.Fatalf("input[0] and output registers must be equal for %s", v.name) + } + if v.commutative && v.reg.inputs[1] != v.reg.outputs[0] { + log.Fatalf("input[1] and output registers must be equal for %s", v.name) + } } if a.name == "generic" { fmt.Fprintln(w, "generic:true,") diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index d10ea230ff..64807ec106 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -26,7 +26,7 @@ type opInfo struct { generic bool // this is a generic (arch-independent) opcode rematerializeable bool // this op is rematerializeable commutative bool // this operation is commutative (e.g. addition) - resultInArg0 bool // prefer v and v.Args[0] to be allocated to the same register + resultInArg0 bool // v and v.Args[0] must be allocated to the same register } type inputInfo struct { diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 5465d7f5ed..381422adfd 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -971,15 +971,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDQ", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AADDQ, + name: "ADDQ", + argLen: 2, + commutative: true, + asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -988,15 +987,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDL", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AADDL, + name: "ADDL", + argLen: 2, + commutative: true, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1005,15 +1003,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AADDL, + name: "ADDW", + argLen: 2, + commutative: true, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1022,15 +1019,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AADDL, + name: "ADDB", + argLen: 2, + commutative: true, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1039,11 +1035,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDQconst", - auxType: auxInt64, - argLen: 1, - resultInArg0: true, - asm: x86.AADDQ, + name: "ADDQconst", + auxType: auxInt64, + argLen: 1, + asm: x86.AADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -1055,11 +1050,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDLconst", - auxType: auxInt32, - argLen: 1, - resultInArg0: true, - asm: x86.AADDL, + name: "ADDLconst", + auxType: auxInt32, + argLen: 1, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -1071,11 +1065,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AADDL, + name: "ADDWconst", + auxType: auxInt16, + argLen: 1, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -1087,11 +1080,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AADDL, + name: "ADDBconst", + auxType: auxInt8, + argLen: 1, + asm: x86.AADDL, reg: regInfo{ inputs: []inputInfo{ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -1109,8 +1101,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1125,8 +1117,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1141,8 +1133,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1157,8 +1149,8 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1174,7 +1166,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1190,7 +1182,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1206,7 +1198,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1222,7 +1214,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASUBL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1238,8 +1230,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1255,8 +1247,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1272,8 +1264,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1289,8 +1281,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1306,7 +1298,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1322,7 +1314,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1338,7 +1330,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1354,7 +1346,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AIMULW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1489,8 +1481,8 @@ var opcodeTable = [...]opInfo{ resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1686,8 +1678,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1703,8 +1695,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1720,8 +1712,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1737,8 +1729,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1754,7 +1746,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1770,7 +1762,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1786,7 +1778,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1802,7 +1794,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AANDL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1818,8 +1810,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1835,8 +1827,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1852,8 +1844,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1869,8 +1861,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1886,7 +1878,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1902,7 +1894,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1918,7 +1910,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1934,7 +1926,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1950,8 +1942,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1967,8 +1959,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -1984,8 +1976,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2001,8 +1993,8 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2018,7 +2010,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2034,7 +2026,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2050,7 +2042,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2066,7 +2058,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AXORL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2334,11 +2326,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2350,11 +2342,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2366,11 +2358,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2382,11 +2374,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2398,7 +2390,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2414,7 +2406,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2430,7 +2422,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2446,7 +2438,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHLL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2462,11 +2454,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2478,11 +2470,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2494,11 +2486,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2510,11 +2502,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2526,7 +2518,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2542,7 +2534,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2558,7 +2550,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2574,7 +2566,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASHRB, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2590,11 +2582,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2606,11 +2598,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2622,11 +2614,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2638,11 +2630,11 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 2}, // CX - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ - 65517, // AX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, }, }, @@ -2654,7 +2646,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2670,7 +2662,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2686,7 +2678,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2702,7 +2694,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ASARB, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2718,7 +2710,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AROLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2734,7 +2726,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AROLL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2750,7 +2742,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AROLW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2766,7 +2758,7 @@ var opcodeTable = [...]opInfo{ asm: x86.AROLB, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2781,7 +2773,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ANEGQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2796,7 +2788,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ANEGL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2811,7 +2803,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ANEGL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2826,7 +2818,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ANEGL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2841,7 +2833,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ANOTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2856,7 +2848,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ANOTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2871,7 +2863,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ANOTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2886,7 +2878,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ANOTL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2900,7 +2892,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSFQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2914,7 +2906,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSFL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2928,7 +2920,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSFW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2942,7 +2934,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSRQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2956,7 +2948,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSRL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2970,7 +2962,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSRW, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -2987,7 +2979,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 8589934592}, // FLAGS - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934593, // AX FLAGS outputs: []regMask{ @@ -3004,7 +2996,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 8589934592}, // FLAGS - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934593, // AX FLAGS outputs: []regMask{ @@ -3021,7 +3013,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 8589934592}, // FLAGS - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934593, // AX FLAGS outputs: []regMask{ @@ -3038,7 +3030,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 8589934592}, // FLAGS - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934593, // AX FLAGS outputs: []regMask{ @@ -3055,7 +3047,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 8589934592}, // FLAGS - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934593, // AX FLAGS outputs: []regMask{ @@ -3072,7 +3064,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {1, 8589934592}, // FLAGS - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65518}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934593, // AX FLAGS outputs: []regMask{ @@ -3087,7 +3079,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSWAPQ, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ @@ -3102,7 +3094,7 @@ var opcodeTable = [...]opInfo{ asm: x86.ABSWAPL, reg: regInfo{ inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 }, clobbers: 8589934592, // FLAGS outputs: []regMask{ diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index aec23a1368..dfae8612d6 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -527,6 +527,18 @@ func (s *regAllocState) advanceUses(v *Value) { } } +// liveAfterCurrentInstruction reports whether v is live after +// the current instruction is completed. v must be used by the +// current instruction. +func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool { + u := s.values[v.ID].uses + d := u.dist + for u != nil && u.dist == d { + u = u.next + } + return u != nil && u.dist > d +} + // Sets the state of the registers to that encoded in regs. func (s *regAllocState) setState(regs []endReg) { s.freeRegs(s.used) @@ -891,6 +903,27 @@ func (s *regAllocState) regalloc(f *Func) { args[i.idx] = s.allocValToReg(v.Args[i.idx], i.regs, true, v.Line) } + // If the output clobbers the input register, and the input register is + // live beyond the instruction, make another copy of the input register so + // we don't have to reload the value from the spill location. + if opcodeTable[v.Op].resultInArg0 && + s.liveAfterCurrentInstruction(v.Args[0]) && + countRegs(s.values[v.Args[0].ID].regs) == 1 { + + if opcodeTable[v.Op].commutative && + (!s.liveAfterCurrentInstruction(v.Args[1]) || + countRegs(s.values[v.Args[1].ID].regs) > 1) { + // Input #1 is dead after the instruction, or we have + // more than one copy of it in a register. Either way, + // use that input as the one that is clobbered. + args[0], args[1] = args[1], args[0] + } else { + m := s.compatRegs(v.Args[0].Type) + m &^= s.values[v.Args[0].ID].regs // a register not already holding v.Args[0] + s.allocValToReg(v.Args[0], m, true, v.Line) + } + } + // Now that all args are in regs, we're ready to issue the value itself. // Before we pick a register for the output value, allow input registers // to be deallocated. We do this here so that the output can use the @@ -908,19 +941,9 @@ func (s *regAllocState) regalloc(f *Func) { s.f.Fatalf("bad mask %s\n", v.LongString()) } if opcodeTable[v.Op].resultInArg0 { + // Output must use the same register as input 0. r := register(s.f.getHome(args[0].ID).(*Register).Num) - if (mask&^s.used)>>r&1 != 0 { - mask = regMask(1) << r - } - if opcodeTable[v.Op].commutative { - r := register(s.f.getHome(args[1].ID).(*Register).Num) - if (mask&^s.used)>>r&1 != 0 { - mask = regMask(1) << r - } - } - // TODO: enforce resultInArg0 always, instead of treating it - // as a hint. Then we don't need the special cases adding - // moves all throughout ssa.go:genValue. + mask = regMask(1) << r } r := s.allocReg(v, mask) s.assignReg(r, v, v) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 34a393bbc5..a6600513fa 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -16653,6 +16653,17 @@ func rewriteValueAMD64_OpAMD64SUBBconst(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (SUBBconst [c] x) + // cond: + // result: (ADDBconst [int64(int8(-c))] x) + for { + c := v.AuxInt + x := v.Args[0] + v.reset(OpAMD64ADDBconst) + v.AuxInt = int64(int8(-c)) + v.AddArg(x) + return true + } // match: (SUBBconst (MOVBconst [d]) [c]) // cond: // result: (MOVBconst [int64(int8(d-c))]) @@ -16751,6 +16762,17 @@ func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (SUBLconst [c] x) + // cond: + // result: (ADDLconst [int64(int32(-c))] x) + for { + c := v.AuxInt + x := v.Args[0] + v.reset(OpAMD64ADDLconst) + v.AuxInt = int64(int32(-c)) + v.AddArg(x) + return true + } // match: (SUBLconst (MOVLconst [d]) [c]) // cond: // result: (MOVLconst [int64(int32(d-c))]) @@ -16854,6 +16876,20 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (SUBQconst [c] x) + // cond: c != -(1<<31) + // result: (ADDQconst [-c] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(c != -(1 << 31)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = -c + v.AddArg(x) + return true + } // match: (SUBQconst (MOVQconst [d]) [c]) // cond: // result: (MOVQconst [d-c]) @@ -16955,6 +16991,17 @@ func rewriteValueAMD64_OpAMD64SUBWconst(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (SUBWconst [c] x) + // cond: + // result: (ADDWconst [int64(int16(-c))] x) + for { + c := v.AuxInt + x := v.Args[0] + v.reset(OpAMD64ADDWconst) + v.AuxInt = int64(int16(-c)) + v.AddArg(x) + return true + } // match: (SUBWconst (MOVWconst [d]) [c]) // cond: // result: (MOVWconst [int64(int16(d-c))]) -- cgit v1.3 From 2a4158207edb499f8b210aaa7a9af103b93b5ac7 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Sun, 10 Apr 2016 21:58:37 -0400 Subject: cmd/compile/internal/gc: refactor cgen_div This commit adds two new functions to cgen.go: hasHMUL64 and hasRROTC64. These are used to determine whether or not an architecture supports the instructions needed to perform an optimization in cgen_div. This commit should not affect existing architectures (although it does add s390x to the new functions). However, since most architectures support HMUL the hasHMUL64 function could be modified to enable most of the optimizations in cgen_div on those platforms. Change-Id: I33bf329ddeb6cf2954bd17b7c161012de352fb62 Reviewed-on: https://go-review.googlesource.com/21775 Reviewed-by: Matthew Dempsky Run-TryBot: Matthew Dempsky Reviewed-by: Josh Bleecher Snyder TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/cgen.go | 68 ++++++++++++++++++++++++++----------- 1 file changed, 49 insertions(+), 19 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index a9cedf7cfc..eacbc30f87 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -2622,24 +2622,48 @@ func cgen_ret(n *Node) { } } +// hasHMUL64 reports whether the architecture supports 64-bit +// signed and unsigned high multiplication (OHMUL). +func hasHMUL64() bool { + switch Ctxt.Arch.Family { + case sys.AMD64, sys.S390X: + return true + case sys.ARM, sys.ARM64, sys.I386, sys.MIPS64, sys.PPC64: + return false + } + Fatalf("unknown architecture") + return false +} + +// hasRROTC64 reports whether the architecture supports 64-bit +// rotate through carry instructions (ORROTC). +func hasRROTC64() bool { + switch Ctxt.Arch.Family { + case sys.AMD64: + return true + case sys.ARM, sys.ARM64, sys.I386, sys.MIPS64, sys.PPC64, sys.S390X: + return false + } + Fatalf("unknown architecture") + return false +} + // generate division according to op, one of: // res = nl / nr // res = nl % nr func cgen_div(op Op, nl *Node, nr *Node, res *Node) { var w int - // TODO(rsc): arm64 needs to support the relevant instructions - // in peep and optoas in order to enable this. - // TODO(rsc): ppc64 needs to support the relevant instructions - // in peep and optoas in order to enable this. - if nr.Op != OLITERAL || Ctxt.Arch.Family == sys.MIPS64 || Ctxt.Arch.Family == sys.ARM64 || Ctxt.Arch.Family == sys.PPC64 { + // Architectures need to support 64-bit high multiplications + // (OHMUL) in order to perform divide by constant optimizations. + if nr.Op != OLITERAL || !hasHMUL64() { goto longdiv } w = int(nl.Type.Width * 8) // Front end handled 32-bit division. We only need to handle 64-bit. - // try to do division by multiply by (2^w)/d - // see hacker's delight chapter 10 + // Try to do division using multiplication: (2^w)/d. + // See Hacker's Delight, chapter 10. switch Simtype[nl.Type.Etype] { default: goto longdiv @@ -2652,6 +2676,17 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { if m.Bad != 0 { break } + + // In order to add the numerator we need to be able to + // avoid overflow. This is done by shifting the result of the + // addition right by 1 and inserting the carry bit into + // the MSB. For now this needs the RROTC instruction. + // TODO(mundaym): Hacker's Delight 2nd ed. chapter 10 proposes + // an alternative sequence of instructions for architectures + // that do not have a shift right with carry instruction. + if m.Ua != 0 && !hasRROTC64() { + goto longdiv + } if op == OMOD { goto longmod } @@ -2665,7 +2700,7 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { Thearch.Cgen_hmul(&n1, &n2, &n3) if m.Ua != 0 { - // need to add numerator accounting for overflow + // Need to add numerator accounting for overflow. Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3) Nodconst(&n2, nl.Type, 1) @@ -2703,7 +2738,7 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { Thearch.Cgen_hmul(&n1, &n2, &n3) if m.Sm < 0 { - // need to add numerator + // Need to add numerator (cannot overflow). Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3) } @@ -2716,8 +2751,8 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n1, &n3) // added if m.Sd < 0 { - // this could probably be removed - // by factoring it into the multiplier + // This could probably be removed by factoring it into + // the multiplier. Thearch.Gins(Thearch.Optoas(OMINUS, nl.Type), nil, &n3) } @@ -2729,14 +2764,14 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { goto longdiv - // division and mod using (slow) hardware instruction + // Division and mod using (slow) hardware instruction. longdiv: Thearch.Dodiv(op, nl, nr, res) return - // mod using formula A%B = A-(A/B*B) but - // we know that there is a fast algorithm for A/B + // Mod using formula A%B = A-(A/B*B) but + // we know that there is a fast algorithm for A/B. longmod: var n1 Node Regalloc(&n1, nl.Type, res) @@ -2746,11 +2781,6 @@ longmod: Regalloc(&n2, nl.Type, nil) cgen_div(ODIV, &n1, nr, &n2) a := Thearch.Optoas(OMUL, nl.Type) - if w == 8 { - // use 2-operand 16-bit multiply - // because there is no 2-operand 8-bit multiply - a = Thearch.Optoas(OMUL, Types[TINT16]) // XXX was IMULW - } if !Smallintconst(nr) { var n3 Node -- cgit v1.3 From 6c6089b3fdba9eb0cff863a03074dbac47c92f63 Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Fri, 1 Apr 2016 15:09:19 +0200 Subject: cmd/compile: bce when max and limit are consts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removes 49 more bound checks in make.bash. For example: var a[100]int for i := 0; i < 50; i++ { use a[i+25] } Change-Id: I85e0130ee5d07f0ece9b17044bba1a2047414ce7 Reviewed-on: https://go-review.googlesource.com/21379 Reviewed-by: David Chase Run-TryBot: Alexandru Moșoi TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/ssa/loopbce.go | 41 ++++++++++++++++++ test/checkbce.go | 15 ++++++- test/loopbce.go | 77 +++++++++++++++++++++++++++++++++ 3 files changed, 132 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go index 17486ac49f..c937ead1b2 100644 --- a/src/cmd/compile/internal/ssa/loopbce.go +++ b/src/cmd/compile/internal/ssa/loopbce.go @@ -240,6 +240,37 @@ func removeBoundsChecks(f *Func, sdom sparseTree, m map[*Value]indVar) { } skip2: + // Simplify + // (IsInBounds (Add64 ind) (Const64 [c])) where 0 <= min <= ind < max <= (Const64 [c]) + // (IsSliceInBounds ind (Const64 [c])) where 0 <= min <= ind < max <= (Const64 [c]) + if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds { + ind, add := dropAdd64(v.Args[0]) + if ind.Op != OpPhi { + goto skip3 + } + + // ind + add >= 0 <-> min + add >= 0 <-> min >= -add + if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isGreaterOrEqualThan(iv.min, -add) { + if !v.Args[1].isGenericIntConst() || !iv.max.isGenericIntConst() { + goto skip3 + } + + limit := v.Args[1].AuxInt + if v.Op == OpIsSliceInBounds { + // If limit++ overflows signed integer then 0 <= max && max <= limit will be false. + limit++ + } + + if max := iv.max.AuxInt + add; 0 <= max && max <= limit { // handle overflow + if f.pass.debug > 0 { + f.Config.Warnl(b.Line, "Found redundant (%s ind %d), ind < %d", v.Op, v.Args[1].AuxInt, iv.max.AuxInt+add) + } + goto simplify + } + } + } + skip3: + continue simplify: @@ -258,3 +289,13 @@ func dropAdd64(v *Value) (*Value, int64) { } return v, 0 } + +func isGreaterOrEqualThan(v *Value, c int64) bool { + if c == 0 { + return isNonNegative(v) + } + if v.isGenericIntConst() && v.AuxInt >= c { + return true + } + return false +} diff --git a/test/checkbce.go b/test/checkbce.go index 988375fcc7..fa0ea12803 100644 --- a/test/checkbce.go +++ b/test/checkbce.go @@ -57,7 +57,7 @@ func f6(a [32]int, b [64]int, i int) { useInt(b[uint64(i*0x07C4ACDD)>>58]) useInt(a[uint(i*0x07C4ACDD)>>59]) - // The following bounds should removed as they can overflow. + // The following bounds should not be removed because they can overflow. useInt(a[uint32(i*0x106297f105d0cc86)>>26]) // ERROR "Found IsInBounds$" useInt(b[uint64(i*0x106297f105d0cc86)>>57]) // ERROR "Found IsInBounds$" useInt(a[int32(i*0x106297f105d0cc86)>>26]) // ERROR "Found IsInBounds$" @@ -89,6 +89,19 @@ func g3(a []int) { } } +func g4(a [100]int) { + for i := 10; i < 50; i++ { + useInt(a[i-10]) + useInt(a[i]) + useInt(a[i+25]) + useInt(a[i+50]) + + // The following are out of bounds. + useInt(a[i-11]) // ERROR "Found IsInBounds$" + useInt(a[i+51]) // ERROR "Found IsInBounds$" + } +} + //go:noinline func useInt(a int) { } diff --git a/test/loopbce.go b/test/loopbce.go index eb44092705..ea195217e6 100644 --- a/test/loopbce.go +++ b/test/loopbce.go @@ -139,6 +139,70 @@ func h2(a []byte) { } } +func k0(a [100]int) [100]int { + for i := 10; i < 90; i++ { // ERROR "Induction variable with minimum 10 and increment 1$" + a[i-11] = i + a[i-10] = i // ERROR "Found redundant \(IsInBounds ind 100\), ind < 80$" + a[i-5] = i // ERROR "Found redundant \(IsInBounds ind 100\), ind < 85$" + a[i] = i // ERROR "Found redundant \(IsInBounds ind 100\), ind < 90$" + a[i+5] = i // ERROR "Found redundant \(IsInBounds ind 100\), ind < 95$" + a[i+10] = i // ERROR "Found redundant \(IsInBounds ind 100\), ind < 100$" + a[i+11] = i + } + return a +} + +func k1(a [100]int) [100]int { + for i := 10; i < 90; i++ { // ERROR "Induction variable with minimum 10 and increment 1$" + useSlice(a[:i-11]) + useSlice(a[:i-10]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 80$" + useSlice(a[:i-5]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 85$" + useSlice(a[:i]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 90$" + useSlice(a[:i+5]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 95$" + useSlice(a[:i+10]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 100$" + useSlice(a[:i+11]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 101$" + + } + return a +} + +func k2(a [100]int) [100]int { + for i := 10; i < 90; i++ { // ERROR "Induction variable with minimum 10 and increment 1$" + useSlice(a[i-11:]) + useSlice(a[i-10:]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 80$" + useSlice(a[i-5:]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 85$" + useSlice(a[i:]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 90$" + useSlice(a[i+5:]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 95$" + useSlice(a[i+10:]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 100$" + useSlice(a[i+11:]) // ERROR "Found redundant \(IsSliceInBounds ind 100\), ind < 101$" + } + return a +} + +func k3(a [100]int) [100]int { + for i := -10; i < 90; i++ { // ERROR "Induction variable with minimum -10 and increment 1$" + a[i+10] = i // ERROR "Found redundant \(IsInBounds ind 100\), ind < 100$" + } + return a +} + +func k4(a [100]int) [100]int { + min := (-1) << 63 + for i := min; i < min+50; i++ { // ERROR "Induction variable with minimum -9223372036854775808 and increment 1$" + a[i-min] = i // ERROR "Found redundant \(IsInBounds ind 100\), ind < 50$" + } + return a +} + +func k5(a [100]int) [100]int { + max := (1 << 63) - 1 + for i := max - 50; i < max; i++ { // ERROR "Induction variable with minimum 9223372036854775757 and increment 1$" + a[i-max+50] = i + a[i-(max-70)] = i // ERROR "Found redundant \(IsInBounds ind 100\), ind < 70$" + } + return a +} + func nobce1() { // tests overflow of max-min a := int64(9223372036854774057) @@ -168,9 +232,22 @@ func nobce2(a string) { } } +func nobce3(a [100]int64) [100]int64 { + min := int64((-1) << 63) + max := int64((1 << 63) - 1) + for i := min; i < max; i++ { // ERROR "Induction variable with minimum -9223372036854775808 and increment 1$" + a[i] = i + } + return a +} + //go:noinline func useString(a string) { } +//go:noinline +func useSlice(a []int) { +} + func main() { } -- cgit v1.3 From b04e145248d5d3721a41d4bb26704fdb43caaf38 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 31 Mar 2016 21:24:10 -0700 Subject: cmd/compile: fix naming of decomposed structs When a struct is SSAable, we will name its component parts by their field names. For example, type T struct { a, b, c int } If we ever need to spill a variable x of type T, we will spill its individual components to variables named x.a, x.b, and x.c. Change-Id: I857286ff1f2597f2c4bbd7b4c0b936386fb37131 Reviewed-on: https://go-review.googlesource.com/21389 Reviewed-by: David Chase --- src/cmd/compile/internal/gc/ssa.go | 16 ++++++++- src/cmd/compile/internal/gc/type.go | 3 ++ src/cmd/compile/internal/ssa/config.go | 1 + src/cmd/compile/internal/ssa/decompose.go | 27 ++++++++++---- src/cmd/compile/internal/ssa/export_test.go | 3 ++ src/cmd/compile/internal/ssa/type.go | 56 +++++++++++++++-------------- src/cmd/compile/internal/ssa/type_test.go | 49 ++++++++++++------------- 7 files changed, 97 insertions(+), 58 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index d69559d945..5ee370395b 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4227,7 +4227,7 @@ func (e *ssaExport) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.Local func (e *ssaExport) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) { n := name.N.(*Node) - ptrType := Ptrto(n.Type.Elem()) + ptrType := Ptrto(name.Type.ElemType().(*Type)) lenType := Types[TINT] if n.Class == PAUTO && !n.Addrtaken { // Split this slice up into three separate variables. @@ -4261,6 +4261,20 @@ func (e *ssaExport) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSl return ssa.LocalSlot{n, t, name.Off}, ssa.LocalSlot{n, t, name.Off + s} } +func (e *ssaExport) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { + n := name.N.(*Node) + st := name.Type + ft := st.FieldType(i) + if n.Class == PAUTO && !n.Addrtaken { + // Note: the _ field may appear several times. But + // have no fear, identically-named but distinct Autos are + // ok, albeit maybe confusing for a debugger. + x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft) + return ssa.LocalSlot{x, ft, 0} + } + return ssa.LocalSlot{n, ft, name.Off + st.FieldOff(i)} +} + // namedAuto returns a new AUTO variable with the given name and type. func (e *ssaExport) namedAuto(name string, typ ssa.Type) ssa.GCNode { t := typ.(*Type) diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index eee8e0384a..25c1bcc203 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -1193,6 +1193,9 @@ func (t *Type) FieldType(i int) ssa.Type { func (t *Type) FieldOff(i int) int64 { return t.Field(i).Offset } +func (t *Type) FieldName(i int) string { + return t.Field(i).Sym.Name +} func (t *Type) NumElem() int64 { t.wantEtype(TARRAY) diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 33357124fc..2a676e39b3 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -103,6 +103,7 @@ type Frontend interface { SplitInterface(LocalSlot) (LocalSlot, LocalSlot) SplitSlice(LocalSlot) (LocalSlot, LocalSlot, LocalSlot) SplitComplex(LocalSlot) (LocalSlot, LocalSlot) + SplitStruct(LocalSlot, int) LocalSlot // Line returns a string describing the given line number. Line(int32) string diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index eab9974106..de02885d76 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -21,6 +21,7 @@ func decomposeBuiltIn(f *Func) { // NOTE: the component values we are making are dead at this point. // We must do the opt pass before any deadcode elimination or we will // lose the name->value correspondence. + var newNames []LocalSlot for _, name := range f.Names { t := name.Type switch { @@ -32,29 +33,31 @@ func decomposeBuiltIn(f *Func) { elemType = f.Config.fe.TypeFloat32() } rName, iName := f.Config.fe.SplitComplex(name) - f.Names = append(f.Names, rName, iName) + newNames = append(newNames, rName, iName) for _, v := range f.NamedValues[name] { r := v.Block.NewValue1(v.Line, OpComplexReal, elemType, v) i := v.Block.NewValue1(v.Line, OpComplexImag, elemType, v) f.NamedValues[rName] = append(f.NamedValues[rName], r) f.NamedValues[iName] = append(f.NamedValues[iName], i) } + delete(f.NamedValues, name) case t.IsString(): ptrType := f.Config.fe.TypeBytePtr() lenType := f.Config.fe.TypeInt() ptrName, lenName := f.Config.fe.SplitString(name) - f.Names = append(f.Names, ptrName, lenName) + newNames = append(newNames, ptrName, lenName) for _, v := range f.NamedValues[name] { ptr := v.Block.NewValue1(v.Line, OpStringPtr, ptrType, v) len := v.Block.NewValue1(v.Line, OpStringLen, lenType, v) f.NamedValues[ptrName] = append(f.NamedValues[ptrName], ptr) f.NamedValues[lenName] = append(f.NamedValues[lenName], len) } + delete(f.NamedValues, name) case t.IsSlice(): ptrType := f.Config.fe.TypeBytePtr() lenType := f.Config.fe.TypeInt() ptrName, lenName, capName := f.Config.fe.SplitSlice(name) - f.Names = append(f.Names, ptrName, lenName, capName) + newNames = append(newNames, ptrName, lenName, capName) for _, v := range f.NamedValues[name] { ptr := v.Block.NewValue1(v.Line, OpSlicePtr, ptrType, v) len := v.Block.NewValue1(v.Line, OpSliceLen, lenType, v) @@ -63,20 +66,25 @@ func decomposeBuiltIn(f *Func) { f.NamedValues[lenName] = append(f.NamedValues[lenName], len) f.NamedValues[capName] = append(f.NamedValues[capName], cap) } + delete(f.NamedValues, name) case t.IsInterface(): ptrType := f.Config.fe.TypeBytePtr() typeName, dataName := f.Config.fe.SplitInterface(name) - f.Names = append(f.Names, typeName, dataName) + newNames = append(newNames, typeName, dataName) for _, v := range f.NamedValues[name] { typ := v.Block.NewValue1(v.Line, OpITab, ptrType, v) data := v.Block.NewValue1(v.Line, OpIData, ptrType, v) f.NamedValues[typeName] = append(f.NamedValues[typeName], typ) f.NamedValues[dataName] = append(f.NamedValues[dataName], data) } + delete(f.NamedValues, name) case t.Size() > f.Config.IntSize: f.Unimplementedf("undecomposed named type %s", t) + default: + newNames = append(newNames, name) } } + f.Names = newNames } func decomposeBuiltInPhi(v *Value) { @@ -181,25 +189,32 @@ func decomposeUser(f *Func) { // We must do the opt pass before any deadcode elimination or we will // lose the name->value correspondence. i := 0 + var fnames []LocalSlot + var newNames []LocalSlot for _, name := range f.Names { t := name.Type switch { case t.IsStruct(): n := t.NumFields() + fnames = fnames[:0] + for i := 0; i < n; i++ { + fnames = append(fnames, f.Config.fe.SplitStruct(name, i)) + } for _, v := range f.NamedValues[name] { for i := 0; i < n; i++ { - fname := LocalSlot{name.N, t.FieldType(i), name.Off + t.FieldOff(i)} // TODO: use actual field name? x := v.Block.NewValue1I(v.Line, OpStructSelect, t.FieldType(i), int64(i), v) - f.NamedValues[fname] = append(f.NamedValues[fname], x) + f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], x) } } delete(f.NamedValues, name) + newNames = append(newNames, fnames...) default: f.Names[i] = name i++ } } f.Names = f.Names[:i] + f.Names = append(f.Names, newNames...) } func decomposeUserPhi(v *Value) { diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index ce577ef055..0a67de9f05 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -48,6 +48,9 @@ func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) { } return LocalSlot{s.N, d.TypeFloat32(), s.Off}, LocalSlot{s.N, d.TypeFloat32(), s.Off + 4} } +func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot { + return LocalSlot{s.N, s.Type.FieldType(i), s.Off + s.Type.FieldOff(i)} +} func (DummyFrontend) Line(line int32) string { return "unknown.go:0" } diff --git a/src/cmd/compile/internal/ssa/type.go b/src/cmd/compile/internal/ssa/type.go index 9643b07556..2a3de282cb 100644 --- a/src/cmd/compile/internal/ssa/type.go +++ b/src/cmd/compile/internal/ssa/type.go @@ -31,9 +31,10 @@ type Type interface { ElemType() Type // given []T or *T or [n]T, return T PtrTo() Type // given T, return *T - NumFields() int // # of fields of a struct - FieldType(i int) Type // type of ith field of the struct - FieldOff(i int) int64 // offset of ith field of the struct + NumFields() int // # of fields of a struct + FieldType(i int) Type // type of ith field of the struct + FieldOff(i int) int64 // offset of ith field of the struct + FieldName(i int) string // name of ith field of the struct NumElem() int64 // # of elements of an array @@ -53,30 +54,31 @@ type CompilerType struct { Int128 bool } -func (t *CompilerType) Size() int64 { return t.size } // Size in bytes -func (t *CompilerType) Alignment() int64 { return 0 } -func (t *CompilerType) IsBoolean() bool { return false } -func (t *CompilerType) IsInteger() bool { return false } -func (t *CompilerType) IsSigned() bool { return false } -func (t *CompilerType) IsFloat() bool { return false } -func (t *CompilerType) IsComplex() bool { return false } -func (t *CompilerType) IsPtrShaped() bool { return false } -func (t *CompilerType) IsString() bool { return false } -func (t *CompilerType) IsSlice() bool { return false } -func (t *CompilerType) IsArray() bool { return false } -func (t *CompilerType) IsStruct() bool { return false } -func (t *CompilerType) IsInterface() bool { return false } -func (t *CompilerType) IsMemory() bool { return t.Memory } -func (t *CompilerType) IsFlags() bool { return t.Flags } -func (t *CompilerType) IsVoid() bool { return t.Void } -func (t *CompilerType) String() string { return t.Name } -func (t *CompilerType) SimpleString() string { return t.Name } -func (t *CompilerType) ElemType() Type { panic("not implemented") } -func (t *CompilerType) PtrTo() Type { panic("not implemented") } -func (t *CompilerType) NumFields() int { panic("not implemented") } -func (t *CompilerType) FieldType(i int) Type { panic("not implemented") } -func (t *CompilerType) FieldOff(i int) int64 { panic("not implemented") } -func (t *CompilerType) NumElem() int64 { panic("not implemented") } +func (t *CompilerType) Size() int64 { return t.size } // Size in bytes +func (t *CompilerType) Alignment() int64 { return 0 } +func (t *CompilerType) IsBoolean() bool { return false } +func (t *CompilerType) IsInteger() bool { return false } +func (t *CompilerType) IsSigned() bool { return false } +func (t *CompilerType) IsFloat() bool { return false } +func (t *CompilerType) IsComplex() bool { return false } +func (t *CompilerType) IsPtrShaped() bool { return false } +func (t *CompilerType) IsString() bool { return false } +func (t *CompilerType) IsSlice() bool { return false } +func (t *CompilerType) IsArray() bool { return false } +func (t *CompilerType) IsStruct() bool { return false } +func (t *CompilerType) IsInterface() bool { return false } +func (t *CompilerType) IsMemory() bool { return t.Memory } +func (t *CompilerType) IsFlags() bool { return t.Flags } +func (t *CompilerType) IsVoid() bool { return t.Void } +func (t *CompilerType) String() string { return t.Name } +func (t *CompilerType) SimpleString() string { return t.Name } +func (t *CompilerType) ElemType() Type { panic("not implemented") } +func (t *CompilerType) PtrTo() Type { panic("not implemented") } +func (t *CompilerType) NumFields() int { panic("not implemented") } +func (t *CompilerType) FieldType(i int) Type { panic("not implemented") } +func (t *CompilerType) FieldOff(i int) int64 { panic("not implemented") } +func (t *CompilerType) FieldName(i int) string { panic("not implemented") } +func (t *CompilerType) NumElem() int64 { panic("not implemented") } // Cmp is a comparison between values a and b. // -1 if a < b diff --git a/src/cmd/compile/internal/ssa/type_test.go b/src/cmd/compile/internal/ssa/type_test.go index cd80abf03f..3b1a892083 100644 --- a/src/cmd/compile/internal/ssa/type_test.go +++ b/src/cmd/compile/internal/ssa/type_test.go @@ -24,30 +24,31 @@ type TypeImpl struct { Name string } -func (t *TypeImpl) Size() int64 { return t.Size_ } -func (t *TypeImpl) Alignment() int64 { return t.Align } -func (t *TypeImpl) IsBoolean() bool { return t.Boolean } -func (t *TypeImpl) IsInteger() bool { return t.Integer } -func (t *TypeImpl) IsSigned() bool { return t.Signed } -func (t *TypeImpl) IsFloat() bool { return t.Float } -func (t *TypeImpl) IsComplex() bool { return t.Complex } -func (t *TypeImpl) IsPtrShaped() bool { return t.Ptr } -func (t *TypeImpl) IsString() bool { return t.string } -func (t *TypeImpl) IsSlice() bool { return t.slice } -func (t *TypeImpl) IsArray() bool { return t.array } -func (t *TypeImpl) IsStruct() bool { return t.struct_ } -func (t *TypeImpl) IsInterface() bool { return t.inter } -func (t *TypeImpl) IsMemory() bool { return false } -func (t *TypeImpl) IsFlags() bool { return false } -func (t *TypeImpl) IsVoid() bool { return false } -func (t *TypeImpl) String() string { return t.Name } -func (t *TypeImpl) SimpleString() string { return t.Name } -func (t *TypeImpl) ElemType() Type { return t.Elem_ } -func (t *TypeImpl) PtrTo() Type { panic("not implemented") } -func (t *TypeImpl) NumFields() int { panic("not implemented") } -func (t *TypeImpl) FieldType(i int) Type { panic("not implemented") } -func (t *TypeImpl) FieldOff(i int) int64 { panic("not implemented") } -func (t *TypeImpl) NumElem() int64 { panic("not implemented") } +func (t *TypeImpl) Size() int64 { return t.Size_ } +func (t *TypeImpl) Alignment() int64 { return t.Align } +func (t *TypeImpl) IsBoolean() bool { return t.Boolean } +func (t *TypeImpl) IsInteger() bool { return t.Integer } +func (t *TypeImpl) IsSigned() bool { return t.Signed } +func (t *TypeImpl) IsFloat() bool { return t.Float } +func (t *TypeImpl) IsComplex() bool { return t.Complex } +func (t *TypeImpl) IsPtrShaped() bool { return t.Ptr } +func (t *TypeImpl) IsString() bool { return t.string } +func (t *TypeImpl) IsSlice() bool { return t.slice } +func (t *TypeImpl) IsArray() bool { return t.array } +func (t *TypeImpl) IsStruct() bool { return t.struct_ } +func (t *TypeImpl) IsInterface() bool { return t.inter } +func (t *TypeImpl) IsMemory() bool { return false } +func (t *TypeImpl) IsFlags() bool { return false } +func (t *TypeImpl) IsVoid() bool { return false } +func (t *TypeImpl) String() string { return t.Name } +func (t *TypeImpl) SimpleString() string { return t.Name } +func (t *TypeImpl) ElemType() Type { return t.Elem_ } +func (t *TypeImpl) PtrTo() Type { panic("not implemented") } +func (t *TypeImpl) NumFields() int { panic("not implemented") } +func (t *TypeImpl) FieldType(i int) Type { panic("not implemented") } +func (t *TypeImpl) FieldOff(i int) int64 { panic("not implemented") } +func (t *TypeImpl) FieldName(i int) string { panic("not implemented") } +func (t *TypeImpl) NumElem() int64 { panic("not implemented") } func (t *TypeImpl) Equal(u Type) bool { x, ok := u.(*TypeImpl) -- cgit v1.3 From 7f53391f6b7f2387a5ed00398d34b046c321966f Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 11 Apr 2016 12:22:26 -0700 Subject: cmd/compile: fix -N build The decomposer of builtin types is confused by having structs still around from the user-type decomposer. They're all dead though, so just enabling a deadcode pass fixes things. Change-Id: I2df6bc7e829be03eabfd24c8dda1bff96f3d7091 Reviewed-on: https://go-review.googlesource.com/21839 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/compile.go | 6 +++--- src/cmd/compile/internal/ssa/decompose.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index d52ae9c6da..b4215f119e 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -230,9 +230,9 @@ var passes = [...]pass{ {name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt {name: "short circuit", fn: shortcircuit}, {name: "decompose user", fn: decomposeUser, required: true}, - {name: "opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules - {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values - {name: "opt deadcode", fn: deadcode}, // remove any blocks orphaned during opt + {name: "opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules + {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values + {name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt {name: "generic cse", fn: cse}, {name: "phiopt", fn: phiopt}, {name: "nilcheckelim", fn: nilcheckelim}, diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index de02885d76..53116ba593 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -79,7 +79,7 @@ func decomposeBuiltIn(f *Func) { } delete(f.NamedValues, name) case t.Size() > f.Config.IntSize: - f.Unimplementedf("undecomposed named type %s", t) + f.Unimplementedf("undecomposed named type %s %s", name, t) default: newNames = append(newNames, name) } -- cgit v1.3 From 7e40627a0e595aa321efaf44f8507b678ee5eb1e Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 11 Apr 2016 13:17:52 -0700 Subject: cmd/compile: zero all three argstorage slots These changes were missed when going from 2 to 3 argstorage slots. https://go-review.googlesource.com/20296/ Change-Id: I930a307bb0b695bf1ae088030c9bbb6d14ca31d2 Reviewed-on: https://go-review.googlesource.com/21841 Reviewed-by: Brad Fitzpatrick Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/ssa/func.go | 10 ++++++++-- src/cmd/compile/internal/ssa/value.go | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 6e47b7f19c..8dd75f6093 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -284,7 +284,10 @@ func (b *Block) NewValue2I(line int32, op Op, t Type, auxint int64, arg0, arg1 * func (b *Block) NewValue3(line int32, op Op, t Type, arg0, arg1, arg2 *Value) *Value { v := b.Func.newValue(op, t, b, line) v.AuxInt = 0 - v.Args = []*Value{arg0, arg1, arg2} + v.Args = v.argstorage[:3] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + v.argstorage[2] = arg2 arg0.Uses++ arg1.Uses++ arg2.Uses++ @@ -295,7 +298,10 @@ func (b *Block) NewValue3(line int32, op Op, t Type, arg0, arg1, arg2 *Value) *V func (b *Block) NewValue3I(line int32, op Op, t Type, auxint int64, arg0, arg1, arg2 *Value) *Value { v := b.Func.newValue(op, t, b, line) v.AuxInt = auxint - v.Args = []*Value{arg0, arg1, arg2} + v.Args = v.argstorage[:3] + v.argstorage[0] = arg0 + v.argstorage[1] = arg1 + v.argstorage[2] = arg2 arg0.Uses++ arg1.Uses++ arg2.Uses++ diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index fd4eb64db1..6c364ad932 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -185,6 +185,7 @@ func (v *Value) resetArgs() { } v.argstorage[0] = nil v.argstorage[1] = nil + v.argstorage[2] = nil v.Args = v.argstorage[:0] } -- cgit v1.3 From 32efa16c3d63dd630e2190a8c0f30c0a941f6fd7 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 1 Apr 2016 14:51:29 -0400 Subject: cmd/compile: added stats printing to stackalloc This is controlled by the "regalloc" stats flag, since regalloc calls stackalloc. The plan is for this to allow comparison of cheaper stack allocation algorithms with what we have now. Change-Id: Ibf64a780344c69babfcbb328fd6d053ea2e02cfc Reviewed-on: https://go-review.googlesource.com/21393 Run-TryBot: David Chase Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/stackalloc.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index 1de22dc96e..e3ef66ab1b 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -22,6 +22,13 @@ type stackAllocState struct { names []LocalSlot slots []int used []bool + + nArgSlot, // Number of Values sourced to arg slot + nNotNeed, // Number of Values not needing a stack slot + nNamedSlot, // Number of Values using a named stack slot + nReuse, // Number of values reusing a stack slot + nAuto, // Number of autos allocated for stack slots. + nSelfInterfere int32 // Number of self-interferences } func newStackAllocState(f *Func) *stackAllocState { @@ -54,6 +61,7 @@ func putStackAllocState(s *stackAllocState) { s.f.Config.stackAllocState = s s.f = nil s.live = nil + s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0 } type stackValState struct { @@ -75,6 +83,13 @@ func stackalloc(f *Func, spillLive [][]ID) [][]ID { defer putStackAllocState(s) s.stackalloc() + if f.pass.stats > 0 { + f.logStat("stack_alloc_stats", + s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed", + s.nNamedSlot, "named_slots", s.nAuto, "auto_slots", + s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering") + } + return s.live } @@ -170,9 +185,11 @@ func (s *stackAllocState) stackalloc() { for _, b := range f.Blocks { for _, v := range b.Values { if !s.values[v.ID].needSlot { + s.nNotNeed++ continue } if v.Op == OpArg { + s.nArgSlot++ continue // already picked } @@ -190,12 +207,14 @@ func (s *stackAllocState) stackalloc() { if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off { // A variable can interfere with itself. // It is rare, but but it can happen. + s.nSelfInterfere++ goto noname } } if f.pass.debug > stackDebug { fmt.Printf("stackalloc %s to %s\n", v, name.Name()) } + s.nNamedSlot++ f.setHome(v, name) continue } @@ -217,11 +236,13 @@ func (s *stackAllocState) stackalloc() { var i int for i = 0; i < len(locs); i++ { if !used[i] { + s.nReuse++ break } } // If there is no unused stack slot, allocate a new one. if i == len(locs) { + s.nAuto++ locs = append(locs, LocalSlot{N: f.Config.fe.Auto(v.Type), Type: v.Type, Off: 0}) locations[v.Type] = locs } -- cgit v1.3 From a4650a2111b2bb826ca64a13bdad9c96e3095e47 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Sun, 10 Apr 2016 09:44:17 -0700 Subject: cmd/compile: avoid write barrier in append fast path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we are writing the result of an append back to the same slice, we don’t need a write barrier on the fast path. This re-implements an optimization that was present in the old backend. Updates #14921 Fixes #14969 Sample code: var x []byte func p() { x = append(x, 1, 2, 3) } Before: "".p t=1 size=224 args=0x0 locals=0x48 0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0 0x0000 00000 (append.go:21) MOVQ (TLS), CX 0x0009 00009 (append.go:21) CMPQ SP, 16(CX) 0x000d 00013 (append.go:21) JLS 199 0x0013 00019 (append.go:21) SUBQ $72, SP 0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX 0x001e 00030 (append.go:19) MOVQ "".x(SB), DX 0x0025 00037 (append.go:19) MOVQ "".x+8(SB), BX 0x002c 00044 (append.go:19) MOVQ BX, "".autotmp_0+64(SP) 0x0031 00049 (append.go:22) LEAQ 3(BX), BP 0x0035 00053 (append.go:22) CMPQ BP, CX 0x0038 00056 (append.go:22) JGT $0, 131 0x003a 00058 (append.go:22) MOVB $1, (DX)(BX*1) 0x003e 00062 (append.go:22) MOVB $2, 1(DX)(BX*1) 0x0043 00067 (append.go:22) MOVB $3, 2(DX)(BX*1) 0x0048 00072 (append.go:22) MOVQ BP, "".x+8(SB) 0x004f 00079 (append.go:22) MOVQ CX, "".x+16(SB) 0x0056 00086 (append.go:22) MOVL runtime.writeBarrier(SB), AX 0x005c 00092 (append.go:22) TESTB AL, AL 0x005e 00094 (append.go:22) JNE $0, 108 0x0060 00096 (append.go:22) MOVQ DX, "".x(SB) 0x0067 00103 (append.go:23) ADDQ $72, SP 0x006b 00107 (append.go:23) RET 0x006c 00108 (append.go:22) LEAQ "".x(SB), CX 0x0073 00115 (append.go:22) MOVQ CX, (SP) 0x0077 00119 (append.go:22) MOVQ DX, 8(SP) 0x007c 00124 (append.go:22) PCDATA $0, $0 0x007c 00124 (append.go:22) CALL runtime.writebarrierptr(SB) 0x0081 00129 (append.go:23) JMP 103 0x0083 00131 (append.go:22) LEAQ type.[]uint8(SB), AX 0x008a 00138 (append.go:22) MOVQ AX, (SP) 0x008e 00142 (append.go:22) MOVQ DX, 8(SP) 0x0093 00147 (append.go:22) MOVQ BX, 16(SP) 0x0098 00152 (append.go:22) MOVQ CX, 24(SP) 0x009d 00157 (append.go:22) MOVQ BP, 32(SP) 0x00a2 00162 (append.go:22) PCDATA $0, $0 0x00a2 00162 (append.go:22) CALL runtime.growslice(SB) 0x00a7 00167 (append.go:22) MOVQ 40(SP), DX 0x00ac 00172 (append.go:22) MOVQ 48(SP), AX 0x00b1 00177 (append.go:22) MOVQ 56(SP), CX 0x00b6 00182 (append.go:22) ADDQ $3, AX 0x00ba 00186 (append.go:19) MOVQ "".autotmp_0+64(SP), BX 0x00bf 00191 (append.go:22) MOVQ AX, BP 0x00c2 00194 (append.go:22) JMP 58 0x00c7 00199 (append.go:22) NOP 0x00c7 00199 (append.go:21) CALL runtime.morestack_noctxt(SB) 0x00cc 00204 (append.go:21) JMP 0 After: "".p t=1 size=208 args=0x0 locals=0x48 0x0000 00000 (append.go:21) TEXT "".p(SB), $72-0 0x0000 00000 (append.go:21) MOVQ (TLS), CX 0x0009 00009 (append.go:21) CMPQ SP, 16(CX) 0x000d 00013 (append.go:21) JLS 191 0x0013 00019 (append.go:21) SUBQ $72, SP 0x0017 00023 (append.go:21) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0017 00023 (append.go:21) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0017 00023 (append.go:19) MOVQ "".x+16(SB), CX 0x001e 00030 (append.go:19) MOVQ "".x+8(SB), DX 0x0025 00037 (append.go:19) MOVQ DX, "".autotmp_0+64(SP) 0x002a 00042 (append.go:19) MOVQ "".x(SB), BX 0x0031 00049 (append.go:22) LEAQ 3(DX), BP 0x0035 00053 (append.go:22) MOVQ BP, "".x+8(SB) 0x003c 00060 (append.go:22) CMPQ BP, CX 0x003f 00063 (append.go:22) JGT $0, 84 0x0041 00065 (append.go:22) MOVB $1, (BX)(DX*1) 0x0045 00069 (append.go:22) MOVB $2, 1(BX)(DX*1) 0x004a 00074 (append.go:22) MOVB $3, 2(BX)(DX*1) 0x004f 00079 (append.go:23) ADDQ $72, SP 0x0053 00083 (append.go:23) RET 0x0054 00084 (append.go:22) LEAQ type.[]uint8(SB), AX 0x005b 00091 (append.go:22) MOVQ AX, (SP) 0x005f 00095 (append.go:22) MOVQ BX, 8(SP) 0x0064 00100 (append.go:22) MOVQ DX, 16(SP) 0x0069 00105 (append.go:22) MOVQ CX, 24(SP) 0x006e 00110 (append.go:22) MOVQ BP, 32(SP) 0x0073 00115 (append.go:22) PCDATA $0, $0 0x0073 00115 (append.go:22) CALL runtime.growslice(SB) 0x0078 00120 (append.go:22) MOVQ 40(SP), CX 0x007d 00125 (append.go:22) MOVQ 56(SP), AX 0x0082 00130 (append.go:22) MOVQ AX, "".x+16(SB) 0x0089 00137 (append.go:22) MOVL runtime.writeBarrier(SB), AX 0x008f 00143 (append.go:22) TESTB AL, AL 0x0091 00145 (append.go:22) JNE $0, 168 0x0093 00147 (append.go:22) MOVQ CX, "".x(SB) 0x009a 00154 (append.go:22) MOVQ "".x(SB), BX 0x00a1 00161 (append.go:19) MOVQ "".autotmp_0+64(SP), DX 0x00a6 00166 (append.go:22) JMP 65 0x00a8 00168 (append.go:22) LEAQ "".x(SB), DX 0x00af 00175 (append.go:22) MOVQ DX, (SP) 0x00b3 00179 (append.go:22) MOVQ CX, 8(SP) 0x00b8 00184 (append.go:22) PCDATA $0, $0 0x00b8 00184 (append.go:22) CALL runtime.writebarrierptr(SB) 0x00bd 00189 (append.go:22) JMP 154 0x00bf 00191 (append.go:22) NOP 0x00bf 00191 (append.go:21) CALL runtime.morestack_noctxt(SB) 0x00c4 00196 (append.go:21) JMP 0 Change-Id: I77a41ad3a22557a4bb4654de7d6d24a029efe34a Reviewed-on: https://go-review.googlesource.com/21813 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 123 +++++++++++++++++++++++++++++-------- 1 file changed, 96 insertions(+), 27 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 5ee370395b..beb68b0385 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -683,14 +683,27 @@ func (s *state) stmt(n *Node) { // Evaluate RHS. rhs := n.Right - if rhs != nil && (rhs.Op == OSTRUCTLIT || rhs.Op == OARRAYLIT) { - // All literals with nonzero fields have already been - // rewritten during walk. Any that remain are just T{} - // or equivalents. Use the zero value. - if !iszero(rhs) { - Fatalf("literal with nonzero value in SSA: %v", rhs) + if rhs != nil { + switch rhs.Op { + case OSTRUCTLIT, OARRAYLIT: + // All literals with nonzero fields have already been + // rewritten during walk. Any that remain are just T{} + // or equivalents. Use the zero value. + if !iszero(rhs) { + Fatalf("literal with nonzero value in SSA: %v", rhs) + } + rhs = nil + case OAPPEND: + // If we're writing the result of an append back to the same slice, + // handle it specially to avoid write barriers on the fast (non-growth) path. + // If the slice can be SSA'd, it'll be on the stack, + // so there will be no write barriers, + // so there's no need to attempt to prevent them. + if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) { + s.append(rhs, true) + return + } } - rhs = nil } var r *ssa.Value needwb := n.Op == OASWB && rhs != nil @@ -709,11 +722,11 @@ func (s *state) stmt(n *Node) { } } if rhs != nil && rhs.Op == OAPPEND { - // Yuck! The frontend gets rid of the write barrier, but we need it! - // At least, we need it in the case where growslice is called. - // TODO: Do the write barrier on just the growslice branch. + // The frontend gets rid of the write barrier to enable the special OAPPEND + // handling above, but since this is not a special case, we need it. // TODO: just add a ptr graying to the end of growslice? - // TODO: check whether we need to do this for ODOTTYPE and ORECV also. + // TODO: check whether we need to provide special handling and a write barrier + // for ODOTTYPE and ORECV also. // They get similar wb-removal treatment in walk.go:OAS. needwb = true } @@ -2079,7 +2092,7 @@ func (s *state) expr(n *Node) *ssa.Value { return s.newValue1(ssa.OpGetG, n.Type, s.mem()) case OAPPEND: - return s.exprAppend(n) + return s.append(n, false) default: s.Unimplementedf("unhandled expr %s", opnames[n.Op]) @@ -2087,25 +2100,57 @@ func (s *state) expr(n *Node) *ssa.Value { } } -// exprAppend converts an OAPPEND node n to an ssa.Value, adds it to s, and returns the Value. -func (s *state) exprAppend(n *Node) *ssa.Value { - // append(s, e1, e2, e3). Compile like: +// append converts an OAPPEND node to SSA. +// If inplace is false, it converts the OAPPEND expression n to an ssa.Value, +// adds it to s, and returns the Value. +// If inplace is true, it writes the result of the OAPPEND expression n +// back to the slice being appended to, and returns nil. +// inplace MUST be set to false if the slice can be SSA'd. +func (s *state) append(n *Node, inplace bool) *ssa.Value { + // If inplace is false, process as expression "append(s, e1, e2, e3)": + // // ptr, len, cap := s // newlen := len + 3 - // if newlen > s.cap { + // if newlen > cap { // ptr, len, cap = growslice(s, newlen) // newlen = len + 3 // recalculate to avoid a spill // } + // // with write barriers, if needed: + // *(ptr+len) = e1 + // *(ptr+len+1) = e2 + // *(ptr+len+2) = e3 + // return makeslice(ptr, newlen, cap) + // + // + // If inplace is true, process as statement "s = append(s, e1, e2, e3)": + // + // a := &s + // ptr, len, cap := s + // newlen := len + 3 + // *a.len = newlen // store newlen immediately to avoid a spill + // if newlen > cap { + // newptr, _, newcap = growslice(ptr, len, cap, newlen) + // *a.cap = newcap // write before ptr to avoid a spill + // *a.ptr = newptr // with write barrier + // } + // // with write barriers, if needed: // *(ptr+len) = e1 // *(ptr+len+1) = e2 // *(ptr+len+2) = e3 - // makeslice(ptr, newlen, cap) et := n.Type.Elem() pt := Ptrto(et) // Evaluate slice - slice := s.expr(n.List.First()) + sn := n.List.First() // the slice node is the first in the list + + var slice, addr *ssa.Value + if inplace { + addr = s.addr(sn, false) + slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) + } else { + slice = s.expr(sn) + } // Allocate new blocks grow := s.f.NewBlock(ssa.BlockPlain) @@ -2117,10 +2162,20 @@ func (s *state) exprAppend(n *Node) *ssa.Value { l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice) c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice) nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) + + if inplace { + lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr) + s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) + } + cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) s.vars[&ptrVar] = p - s.vars[&newlenVar] = nl - s.vars[&capVar] = c + + if !inplace { + s.vars[&newlenVar] = nl + s.vars[&capVar] = c + } + b := s.endBlock() b.Kind = ssa.BlockIf b.Likely = ssa.BranchUnlikely @@ -2134,9 +2189,18 @@ func (s *state) exprAppend(n *Node) *ssa.Value { r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) - s.vars[&ptrVar] = r[0] - s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) - s.vars[&capVar] = r[2] + if inplace { + capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr) + s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem()) + s.insertWBstore(pt, addr, r[0], n.Lineno, 0) + // load the value we just stored to avoid having to spill it + s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) + } else { + s.vars[&ptrVar] = r[0] + s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) + s.vars[&capVar] = r[2] + } + b = s.endBlock() b.AddEdgeTo(assign) @@ -2156,9 +2220,11 @@ func (s *state) exprAppend(n *Node) *ssa.Value { } } - p = s.variable(&ptrVar, pt) // generates phi for ptr - nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl - c = s.variable(&capVar, Types[TINT]) // generates phi for cap + p = s.variable(&ptrVar, pt) // generates phi for ptr + if !inplace { + nl = s.variable(&newlenVar, Types[TINT]) // generates phi for nl + c = s.variable(&capVar, Types[TINT]) // generates phi for cap + } p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l) // TODO: just one write barrier call for all of these writes? // TODO: maybe just one writeBarrier.enabled check? @@ -2179,10 +2245,13 @@ func (s *state) exprAppend(n *Node) *ssa.Value { } } - // make result delete(s.vars, &ptrVar) + if inplace { + return nil + } delete(s.vars, &newlenVar) delete(s.vars, &capVar) + // make result return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c) } -- cgit v1.3 From 07669d2737aa51107a4e54b61d6704f6ad8035b5 Mon Sep 17 00:00:00 2001 From: Martin Möhrmann Date: Thu, 7 Apr 2016 08:01:47 +0200 Subject: cmd/compile: cleanup pragcgo Removes dynimport, dynexport, dynlinker cases since they can not be reached due to prefix check for "go:cgo_" in getlinepragma. Replaces the if chains for verb distinction by a switch statement. Replaces fmt.Sprintf by fmt.Sprintln for string concatenation. Removes the more, getimpsym and getquoted functions by introducing a pragmaFields function that partitions a pragma into its components. Adds tests for cgo pragmas. Change-Id: I43c7b9550feb3ddccaff7fb02198a3f994444123 Reviewed-on: https://go-review.googlesource.com/21607 Reviewed-by: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/lex.go | 218 ++++++++++++++------------------ src/cmd/compile/internal/gc/lex_test.go | 79 ++++++++++++ 2 files changed, 173 insertions(+), 124 deletions(-) create mode 100644 src/cmd/compile/internal/gc/lex_test.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index 2dbbd9276b..4b95bb7124 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -44,6 +44,10 @@ func isDigit(c rune) bool { return '0' <= c && c <= '9' } +func isQuoted(s string) bool { + return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' +} + func plan9quote(s string) string { if s == "" { return "''" @@ -853,15 +857,6 @@ func internString(b []byte) string { return s } -func more(pp *string) bool { - p := *pp - for p != "" && isSpace(rune(p[0])) { - p = p[1:] - } - *pp = p - return p != "" -} - // read and interpret syntax that looks like // //line parse.y:15 // as a discontinuity in sequential line numbers. @@ -887,7 +882,7 @@ func (l *lexer) getlinepragma() rune { text := strings.TrimSuffix(lexbuf.String(), "\r") if strings.HasPrefix(text, "go:cgo_") { - pragcgo(text) + pragcgobuf += pragcgo(text) } verb := text @@ -991,139 +986,114 @@ func (l *lexer) getlinepragma() rune { return c } -func getimpsym(pp *string) string { - more(pp) // skip spaces - p := *pp - if p == "" || p[0] == '"' { - return "" - } - i := 0 - for i < len(p) && !isSpace(rune(p[i])) && p[i] != '"' { - i++ - } - sym := p[:i] - *pp = p[i:] - return sym -} - -func getquoted(pp *string) (string, bool) { - more(pp) // skip spaces - p := *pp - if p == "" || p[0] != '"' { - return "", false - } - p = p[1:] - i := strings.Index(p, `"`) - if i < 0 { - return "", false - } - *pp = p[i+1:] - return p[:i], true -} - -// Copied nearly verbatim from the C compiler's #pragma parser. -// TODO: Rewrite more cleanly once the compiler is written in Go. -func pragcgo(text string) { - var q string +func pragcgo(text string) string { + f := pragmaFields(text) - if i := strings.Index(text, " "); i >= 0 { - text, q = text[:i], text[i:] - } + verb := f[0][3:] // skip "go:" + switch verb { + case "cgo_export_static", "cgo_export_dynamic": + switch { + case len(f) == 2 && !isQuoted(f[1]): + local := plan9quote(f[1]) + return fmt.Sprintln(verb, local) - verb := text[3:] // skip "go:" + case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]): + local := plan9quote(f[1]) + remote := plan9quote(f[2]) + return fmt.Sprintln(verb, local, remote) - if verb == "cgo_dynamic_linker" || verb == "dynlinker" { - p, ok := getquoted(&q) - if !ok { - Yyerror("usage: //go:cgo_dynamic_linker \"path\"") - return + default: + Yyerror(`usage: //go:%s local [remote]`, verb) } - pragcgobuf += fmt.Sprintf("cgo_dynamic_linker %v\n", plan9quote(p)) - return + case "cgo_import_dynamic": + switch { + case len(f) == 2 && !isQuoted(f[1]): + local := plan9quote(f[1]) + return fmt.Sprintln(verb, local) - } + case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]): + local := plan9quote(f[1]) + remote := plan9quote(f[2]) + return fmt.Sprintln(verb, local, remote) - if verb == "dynexport" { - verb = "cgo_export_dynamic" - } - if verb == "cgo_export_static" || verb == "cgo_export_dynamic" { - local := getimpsym(&q) - var remote string - if local == "" { - goto err2 - } - if !more(&q) { - pragcgobuf += fmt.Sprintf("%s %v\n", verb, plan9quote(local)) - return - } + case len(f) == 4 && !isQuoted(f[1]) && !isQuoted(f[2]) && isQuoted(f[3]): + local := plan9quote(f[1]) + remote := plan9quote(f[2]) + library := plan9quote(strings.Trim(f[3], `"`)) + return fmt.Sprintln(verb, local, remote, library) - remote = getimpsym(&q) - if remote == "" { - goto err2 + default: + Yyerror(`usage: //go:cgo_import_dynamic local [remote ["library"]]`) } - pragcgobuf += fmt.Sprintf("%s %v %v\n", verb, plan9quote(local), plan9quote(remote)) - return - - err2: - Yyerror("usage: //go:%s local [remote]", verb) - return - } + case "cgo_import_static": + switch { + case len(f) == 2 && !isQuoted(f[1]): + local := plan9quote(f[1]) + return fmt.Sprintln(verb, local) - if verb == "cgo_import_dynamic" || verb == "dynimport" { - var ok bool - local := getimpsym(&q) - var p string - var remote string - if local == "" { - goto err3 - } - if !more(&q) { - pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v\n", plan9quote(local)) - return + default: + Yyerror(`usage: //go:cgo_import_static local`) } + case "cgo_dynamic_linker": + switch { + case len(f) == 2 && isQuoted(f[1]): + path := plan9quote(strings.Trim(f[1], `"`)) + return fmt.Sprintln(verb, path) - remote = getimpsym(&q) - if remote == "" { - goto err3 - } - if !more(&q) { - pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v\n", plan9quote(local), plan9quote(remote)) - return + default: + Yyerror(`usage: //go:cgo_dynamic_linker "path"`) } + case "cgo_ldflag": + switch { + case len(f) == 2 && isQuoted(f[1]): + arg := plan9quote(strings.Trim(f[1], `"`)) + return fmt.Sprintln(verb, arg) - p, ok = getquoted(&q) - if !ok { - goto err3 + default: + Yyerror(`usage: //go:cgo_ldflag "arg"`) } - pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v %v\n", plan9quote(local), plan9quote(remote), plan9quote(p)) - return - - err3: - Yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]") - return } + return "" +} - if verb == "cgo_import_static" { - local := getimpsym(&q) - if local == "" || more(&q) { - Yyerror("usage: //go:cgo_import_static local") - return +// pragmaFields is similar to strings.FieldsFunc(s, isSpace) +// but does not split when inside double quoted regions and always +// splits before the start and after the end of a double quoted region. +// pragmaFields does not recognize escaped quotes. If a quote in s is not +// closed the part after the opening quote will not be returned as a field. +func pragmaFields(s string) []string { + var a []string + inQuote := false + fieldStart := -1 // Set to -1 when looking for start of field. + for i, c := range s { + switch { + case c == '"': + if inQuote { + inQuote = false + a = append(a, s[fieldStart:i+1]) + fieldStart = -1 + } else { + inQuote = true + if fieldStart >= 0 { + a = append(a, s[fieldStart:i]) + } + fieldStart = i + } + case !inQuote && isSpace(c): + if fieldStart >= 0 { + a = append(a, s[fieldStart:i]) + fieldStart = -1 + } + default: + if fieldStart == -1 { + fieldStart = i + } } - pragcgobuf += fmt.Sprintf("cgo_import_static %v\n", plan9quote(local)) - return - } - - if verb == "cgo_ldflag" { - p, ok := getquoted(&q) - if !ok { - Yyerror("usage: //go:cgo_ldflag \"arg\"") - return - } - pragcgobuf += fmt.Sprintf("cgo_ldflag %v\n", plan9quote(p)) - return - + if !inQuote && fieldStart >= 0 { // Last field might end at the end of the string. + a = append(a, s[fieldStart:]) } + return a } func (l *lexer) getr() rune { diff --git a/src/cmd/compile/internal/gc/lex_test.go b/src/cmd/compile/internal/gc/lex_test.go new file mode 100644 index 0000000000..9230b30dad --- /dev/null +++ b/src/cmd/compile/internal/gc/lex_test.go @@ -0,0 +1,79 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import "testing" + +func eq(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func TestPragmaFields(t *testing.T) { + + var tests = []struct { + in string + want []string + }{ + {"", []string{}}, + {" \t ", []string{}}, + {`""""`, []string{`""`, `""`}}, + {" a'b'c ", []string{"a'b'c"}}, + {"1 2 3 4", []string{"1", "2", "3", "4"}}, + {"\n☺\t☹\n", []string{"☺", "☹"}}, + {`"1 2 " 3 " 4 5"`, []string{`"1 2 "`, `3`, `" 4 5"`}}, + {`"1""2 3""4"`, []string{`"1"`, `"2 3"`, `"4"`}}, + {`12"34"`, []string{`12`, `"34"`}}, + {`12"34 `, []string{`12`}}, + } + + for _, tt := range tests { + got := pragmaFields(tt.in) + if !eq(got, tt.want) { + t.Errorf("pragmaFields(%q) = %v; want %v", tt.in, got, tt.want) + continue + } + } +} + +func TestPragcgo(t *testing.T) { + + var tests = []struct { + in string + want string + }{ + {`go:cgo_export_dynamic local`, "cgo_export_dynamic local\n"}, + {`go:cgo_export_dynamic local remote`, "cgo_export_dynamic local remote\n"}, + {`go:cgo_export_dynamic local' remote'`, "cgo_export_dynamic 'local''' 'remote'''\n"}, + {`go:cgo_export_static local`, "cgo_export_static local\n"}, + {`go:cgo_export_static local remote`, "cgo_export_static local remote\n"}, + {`go:cgo_export_static local' remote'`, "cgo_export_static 'local''' 'remote'''\n"}, + {`go:cgo_import_dynamic local`, "cgo_import_dynamic local\n"}, + {`go:cgo_import_dynamic local remote`, "cgo_import_dynamic local remote\n"}, + {`go:cgo_import_dynamic local remote "library"`, "cgo_import_dynamic local remote library\n"}, + {`go:cgo_import_dynamic local' remote' "lib rary"`, "cgo_import_dynamic 'local''' 'remote''' 'lib rary'\n"}, + {`go:cgo_import_static local`, "cgo_import_static local\n"}, + {`go:cgo_import_static local'`, "cgo_import_static 'local'''\n"}, + {`go:cgo_dynamic_linker "/path/"`, "cgo_dynamic_linker /path/\n"}, + {`go:cgo_dynamic_linker "/p ath/"`, "cgo_dynamic_linker '/p ath/'\n"}, + {`go:cgo_ldflag "arg"`, "cgo_ldflag arg\n"}, + {`go:cgo_ldflag "a rg"`, "cgo_ldflag 'a rg'\n"}, + } + + for _, tt := range tests { + got := pragcgo(tt.in) + if got != tt.want { + t.Errorf("pragcgo(%q) = %q; want %q", tt.in, got, tt.want) + continue + } + } +} -- cgit v1.3 From a223ccae386449169774597b15a00f2d70addce7 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Mon, 11 Apr 2016 20:23:19 -0400 Subject: cmd/compile/internal/s390x: add s390x support s390x does not require duffzero/duffcopy since it has storage-to-storage instructions that can copy/clear up to 256 bytes at a time. peep contains several new passes to optimize instruction sequences that match s390x instructions such as the compare-and-branch and load/store multiple instructions. copyprop and subprop have been extended to work with moves that require sign/zero extension. This work could be ported to other architectures that do not used sized math however it does add complexity and will probably be rendered unnecessary by ssa in the near future. Change-Id: I1b64b281b452ed82a85655a0df69cb224d2a6941 Reviewed-on: https://go-review.googlesource.com/20873 Run-TryBot: Michael Munday TryBot-Result: Gobot Gobot Reviewed-by: Bill O'Farrell Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/s390x/cgen.go | 178 ++++ src/cmd/compile/internal/s390x/galign.go | 66 ++ src/cmd/compile/internal/s390x/ggen.go | 577 +++++++++++ src/cmd/compile/internal/s390x/gsubr.go | 1115 ++++++++++++++++++++ src/cmd/compile/internal/s390x/peep.go | 1664 ++++++++++++++++++++++++++++++ src/cmd/compile/internal/s390x/prog.go | 179 ++++ src/cmd/compile/internal/s390x/reg.go | 130 +++ src/cmd/compile/main.go | 3 + src/cmd/dist/buildtool.go | 1 + 9 files changed, 3913 insertions(+) create mode 100644 src/cmd/compile/internal/s390x/cgen.go create mode 100644 src/cmd/compile/internal/s390x/galign.go create mode 100644 src/cmd/compile/internal/s390x/ggen.go create mode 100644 src/cmd/compile/internal/s390x/gsubr.go create mode 100644 src/cmd/compile/internal/s390x/peep.go create mode 100644 src/cmd/compile/internal/s390x/prog.go create mode 100644 src/cmd/compile/internal/s390x/reg.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/s390x/cgen.go b/src/cmd/compile/internal/s390x/cgen.go new file mode 100644 index 0000000000..28bb34e0ef --- /dev/null +++ b/src/cmd/compile/internal/s390x/cgen.go @@ -0,0 +1,178 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj" + "cmd/internal/obj/s390x" +) + +type direction int + +const ( + _FORWARDS direction = iota + _BACKWARDS +) + +// blockcopy copies w bytes from &n to &res +func blockcopy(n, res *gc.Node, osrc, odst, w int64) { + var dst gc.Node + var src gc.Node + if n.Ullman >= res.Ullman { + gc.Agenr(n, &dst, res) // temporarily use dst + gc.Regalloc(&src, gc.Types[gc.Tptr], nil) + gins(s390x.AMOVD, &dst, &src) + if res.Op == gc.ONAME { + gc.Gvardef(res) + } + gc.Agen(res, &dst) + } else { + if res.Op == gc.ONAME { + gc.Gvardef(res) + } + gc.Agenr(res, &dst, res) + gc.Agenr(n, &src, nil) + } + defer gc.Regfree(&src) + defer gc.Regfree(&dst) + + var tmp gc.Node + gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil) + defer gc.Regfree(&tmp) + + offset := int64(0) + dir := _FORWARDS + if osrc < odst && odst < osrc+w { + // Reverse. Can't use MVC, fall back onto basic moves. + dir = _BACKWARDS + const copiesPerIter = 2 + if w >= 8*copiesPerIter { + cnt := w - (w % (8 * copiesPerIter)) + ginscon(s390x.AADD, w, &src) + ginscon(s390x.AADD, w, &dst) + + var end gc.Node + gc.Regalloc(&end, gc.Types[gc.Tptr], nil) + p := gins(s390x.ASUB, nil, &end) + p.From.Type = obj.TYPE_CONST + p.From.Offset = cnt + p.Reg = src.Reg + + var label *obj.Prog + for i := 0; i < copiesPerIter; i++ { + offset := int64(-8 * (i + 1)) + p := gins(s390x.AMOVD, &src, &tmp) + p.From.Type = obj.TYPE_MEM + p.From.Offset = offset + if i == 0 { + label = p + } + p = gins(s390x.AMOVD, &tmp, &dst) + p.To.Type = obj.TYPE_MEM + p.To.Offset = offset + } + + ginscon(s390x.ASUB, 8*copiesPerIter, &src) + ginscon(s390x.ASUB, 8*copiesPerIter, &dst) + gins(s390x.ACMP, &src, &end) + gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), label) + gc.Regfree(&end) + + w -= cnt + } else { + offset = w + } + } + + if dir == _FORWARDS && w > 1024 { + // Loop over MVCs + cnt := w - (w % 256) + + var end gc.Node + gc.Regalloc(&end, gc.Types[gc.Tptr], nil) + add := gins(s390x.AADD, nil, &end) + add.From.Type = obj.TYPE_CONST + add.From.Offset = cnt + add.Reg = src.Reg + + mvc := gins(s390x.AMVC, &src, &dst) + mvc.From.Type = obj.TYPE_MEM + mvc.From.Offset = 0 + mvc.To.Type = obj.TYPE_MEM + mvc.To.Offset = 0 + mvc.From3 = new(obj.Addr) + mvc.From3.Type = obj.TYPE_CONST + mvc.From3.Offset = 256 + + ginscon(s390x.AADD, 256, &src) + ginscon(s390x.AADD, 256, &dst) + gins(s390x.ACMP, &src, &end) + gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), mvc) + gc.Regfree(&end) + + w -= cnt + } + + for w > 0 { + cnt := w + // If in reverse we can only do 8, 4, 2 or 1 bytes at a time. + if dir == _BACKWARDS { + switch { + case cnt >= 8: + cnt = 8 + case cnt >= 4: + cnt = 4 + case cnt >= 2: + cnt = 2 + } + } else if cnt > 256 { + cnt = 256 + } + + switch cnt { + case 8, 4, 2, 1: + op := s390x.AMOVB + switch cnt { + case 8: + op = s390x.AMOVD + case 4: + op = s390x.AMOVW + case 2: + op = s390x.AMOVH + } + load := gins(op, &src, &tmp) + load.From.Type = obj.TYPE_MEM + load.From.Offset = offset + + store := gins(op, &tmp, &dst) + store.To.Type = obj.TYPE_MEM + store.To.Offset = offset + + if dir == _BACKWARDS { + load.From.Offset -= cnt + store.To.Offset -= cnt + } + + default: + p := gins(s390x.AMVC, &src, &dst) + p.From.Type = obj.TYPE_MEM + p.From.Offset = offset + p.To.Type = obj.TYPE_MEM + p.To.Offset = offset + p.From3 = new(obj.Addr) + p.From3.Type = obj.TYPE_CONST + p.From3.Offset = cnt + } + + switch dir { + case _FORWARDS: + offset += cnt + case _BACKWARDS: + offset -= cnt + } + w -= cnt + } +} diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go new file mode 100644 index 0000000000..d0d621e557 --- /dev/null +++ b/src/cmd/compile/internal/s390x/galign.go @@ -0,0 +1,66 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj/s390x" +) + +func betypeinit() { + gc.Widthptr = 8 + gc.Widthint = 8 + gc.Widthreg = 8 +} + +func Main() { + gc.Thearch.LinkArch = &s390x.Links390x + gc.Thearch.REGSP = s390x.REGSP + gc.Thearch.REGCTXT = s390x.REGCTXT + gc.Thearch.REGCALLX = s390x.REG_R3 + gc.Thearch.REGCALLX2 = s390x.REG_R4 + gc.Thearch.REGRETURN = s390x.REG_R3 + gc.Thearch.REGMIN = s390x.REG_R0 + gc.Thearch.REGMAX = s390x.REG_R15 + gc.Thearch.FREGMIN = s390x.REG_F0 + gc.Thearch.FREGMAX = s390x.REG_F15 + gc.Thearch.MAXWIDTH = 1 << 50 + gc.Thearch.ReservedRegs = resvd + + gc.Thearch.Betypeinit = betypeinit + gc.Thearch.Cgen_hmul = cgen_hmul + gc.Thearch.Cgen_shift = cgen_shift + gc.Thearch.Clearfat = clearfat + gc.Thearch.Defframe = defframe + gc.Thearch.Dodiv = dodiv + gc.Thearch.Excise = excise + gc.Thearch.Expandchecks = expandchecks + gc.Thearch.Getg = getg + gc.Thearch.Gins = gins + gc.Thearch.Ginscmp = ginscmp + gc.Thearch.Ginscon = ginscon + gc.Thearch.Ginsnop = ginsnop + gc.Thearch.Gmove = gmove + gc.Thearch.Peep = peep + gc.Thearch.Proginfo = proginfo + gc.Thearch.Regtyp = isReg + gc.Thearch.Sameaddr = sameaddr + gc.Thearch.Smallindir = smallindir + gc.Thearch.Stackaddr = stackaddr + gc.Thearch.Blockcopy = blockcopy + gc.Thearch.Sudoaddable = sudoaddable + gc.Thearch.Sudoclean = sudoclean + gc.Thearch.Excludedregs = excludedregs + gc.Thearch.RtoB = RtoB + gc.Thearch.FtoB = RtoB + gc.Thearch.BtoR = BtoR + gc.Thearch.BtoF = BtoF + gc.Thearch.Optoas = optoas + gc.Thearch.Doregbits = doregbits + gc.Thearch.Regnames = regnames + + gc.Main() + gc.Exit(0) +} diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go new file mode 100644 index 0000000000..39885baace --- /dev/null +++ b/src/cmd/compile/internal/s390x/ggen.go @@ -0,0 +1,577 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj" + "cmd/internal/obj/s390x" + "fmt" +) + +// clearLoopCutOff is the (somewhat arbitrary) value above which it is better +// to have a loop of clear instructions (e.g. XCs) rather than just generating +// multiple instructions (i.e. loop unrolling). +// Must be between 256 and 4096. +const clearLoopCutoff = 1024 + +func defframe(ptxt *obj.Prog) { + // fill in argument size, stack size + ptxt.To.Type = obj.TYPE_TEXTSIZE + + ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.ArgWidth(), int64(gc.Widthptr))) + frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) + ptxt.To.Offset = int64(frame) + + // insert code to zero ambiguously live variables + // so that the garbage collector only sees initialized values + // when it looks for pointers. + p := ptxt + + hi := int64(0) + lo := hi + + // iterate through declarations - they are sorted in decreasing xoffset order. + for _, n := range gc.Curfn.Func.Dcl { + if !n.Name.Needzero { + continue + } + if n.Class != gc.PAUTO { + gc.Fatalf("needzero class %d", n.Class) + } + if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 { + gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, gc.FmtLong), int(n.Type.Width), int(n.Xoffset)) + } + + if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) { + // merge with range we already have + lo = n.Xoffset + + continue + } + + // zero old range + p = zerorange(p, int64(frame), lo, hi) + + // set new range + hi = n.Xoffset + n.Type.Width + + lo = n.Xoffset + } + + // zero final range + zerorange(p, int64(frame), lo, hi) +} + +// zerorange clears the stack in the given range. +func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog { + cnt := hi - lo + if cnt == 0 { + return p + } + + // Adjust the frame to account for LR. + frame += gc.Ctxt.FixedFrameSize() + offset := frame + lo + reg := int16(s390x.REGSP) + + // If the offset cannot fit in a 12-bit unsigned displacement then we + // need to create a copy of the stack pointer that we can adjust. + // We also need to do this if we are going to loop. + if offset < 0 || offset > 4096-clearLoopCutoff || cnt > clearLoopCutoff { + p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, offset, obj.TYPE_REG, s390x.REGRT1, 0) + p.Reg = int16(s390x.REGSP) + reg = s390x.REGRT1 + offset = 0 + } + + // Generate a loop of large clears. + if cnt > clearLoopCutoff { + n := cnt - (cnt % 256) + end := int16(s390x.REGRT2) + p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, offset+n, obj.TYPE_REG, end, 0) + p.Reg = reg + p = appendpp(p, s390x.AXC, obj.TYPE_MEM, reg, offset, obj.TYPE_MEM, reg, offset) + p.From3 = new(obj.Addr) + p.From3.Type = obj.TYPE_CONST + p.From3.Offset = 256 + pl := p + p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0) + p = appendpp(p, s390x.ACMP, obj.TYPE_REG, reg, 0, obj.TYPE_REG, end, 0) + p = appendpp(p, s390x.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) + gc.Patch(p, pl) + + cnt -= n + } + + // Generate remaining clear instructions without a loop. + for cnt > 0 { + n := cnt + + // Can clear at most 256 bytes per instruction. + if n > 256 { + n = 256 + } + + switch n { + // Handle very small clears with move instructions. + case 8, 4, 2, 1: + ins := s390x.AMOVB + switch n { + case 8: + ins = s390x.AMOVD + case 4: + ins = s390x.AMOVW + case 2: + ins = s390x.AMOVH + } + p = appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, offset) + + // Handle clears that would require multiple move instructions with XC. + default: + p = appendpp(p, s390x.AXC, obj.TYPE_MEM, reg, offset, obj.TYPE_MEM, reg, offset) + p.From3 = new(obj.Addr) + p.From3.Type = obj.TYPE_CONST + p.From3.Offset = n + } + + cnt -= n + offset += n + } + + return p +} + +func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog { + q := gc.Ctxt.NewProg() + gc.Clearp(q) + q.As = as + q.Lineno = p.Lineno + q.From.Type = ftype + q.From.Reg = freg + q.From.Offset = foffset + q.To.Type = ttype + q.To.Reg = treg + q.To.Offset = toffset + q.Link = p.Link + p.Link = q + return q +} + +func ginsnop() { + var reg gc.Node + gc.Nodreg(®, gc.Types[gc.TINT], s390x.REG_R0) + gins(s390x.AOR, ®, ®) +} + +var panicdiv *gc.Node + +/* + * generate division. + * generates one of: + * res = nl / nr + * res = nl % nr + * according to op. + */ +func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { + // Have to be careful about handling + // most negative int divided by -1 correctly. + // The hardware will generate undefined result. + // Also need to explicitly trap on division on zero, + // the hardware will silently generate undefined result. + // DIVW will leave unpredicable result in higher 32-bit, + // so always use DIVD/DIVDU. + t := nl.Type + + t0 := t + check := 0 + if t.IsSigned() { + check = 1 + if gc.Isconst(nl, gc.CTINT) && nl.Int64() != -(1<= nr.Ullman { + gc.Cgen(nl, &tl) + gc.Cgen(nr, &tr) + } else { + gc.Cgen(nr, &tr) + gc.Cgen(nl, &tl) + } + + if t != t0 { + // Convert + tl2 := tl + + tr2 := tr + tl.Type = t + tr.Type = t + gmove(&tl2, &tl) + gmove(&tr2, &tr) + } + + // Handle divide-by-zero panic. + p1 := gins(optoas(gc.OCMP, t), &tr, nil) + + p1.To.Type = obj.TYPE_REG + p1.To.Reg = s390x.REGZERO + p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1) + if panicdiv == nil { + panicdiv = gc.Sysfunc("panicdivide") + } + gc.Ginscall(panicdiv, -1) + gc.Patch(p1, gc.Pc) + + var p2 *obj.Prog + if check != 0 { + var nm1 gc.Node + gc.Nodconst(&nm1, t, -1) + gins(optoas(gc.OCMP, t), &tr, &nm1) + p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1) + if op == gc.ODIV { + // a / (-1) is -a. + gins(optoas(gc.OMINUS, t), nil, &tl) + + gmove(&tl, res) + } else { + // a % (-1) is 0. + var nz gc.Node + gc.Nodconst(&nz, t, 0) + + gmove(&nz, res) + } + + p2 = gc.Gbranch(obj.AJMP, nil, 0) + gc.Patch(p1, gc.Pc) + } + + p1 = gins(a, &tr, &tl) + if op == gc.ODIV { + gc.Regfree(&tr) + gmove(&tl, res) + } else { + // A%B = A-(A/B*B) + var tm gc.Node + gc.Regalloc(&tm, t, nil) + + // patch div to use the 3 register form + // TODO(minux): add gins3? + p1.Reg = p1.To.Reg + + p1.To.Reg = tm.Reg + gins(optoas(gc.OMUL, t), &tr, &tm) + gc.Regfree(&tr) + gins(optoas(gc.OSUB, t), &tm, &tl) + gc.Regfree(&tm) + gmove(&tl, res) + } + + gc.Regfree(&tl) + if check != 0 { + gc.Patch(p2, gc.Pc) + } +} + +/* + * generate high multiply: + * res = (nl*nr) >> width + */ +func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { + // largest ullman on left. + if nl.Ullman < nr.Ullman { + nl, nr = nr, nl + } + + t := nl.Type + w := int(t.Width) * 8 + var n1 gc.Node + gc.Cgenr(nl, &n1, res) + var n2 gc.Node + gc.Cgenr(nr, &n2, nil) + switch gc.Simtype[t.Etype] { + case gc.TINT8, + gc.TINT16, + gc.TINT32: + gins(optoas(gc.OMUL, t), &n2, &n1) + p := gins(s390x.ASRAD, nil, &n1) + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(w) + + case gc.TUINT8, + gc.TUINT16, + gc.TUINT32: + gins(optoas(gc.OMUL, t), &n2, &n1) + p := gins(s390x.ASRD, nil, &n1) + p.From.Type = obj.TYPE_CONST + p.From.Offset = int64(w) + + case gc.TINT64: + gins(s390x.AMULHD, &n2, &n1) + + case gc.TUINT64: + gins(s390x.AMULHDU, &n2, &n1) + + default: + gc.Fatalf("cgen_hmul %v", t) + } + + gc.Cgen(&n1, res) + gc.Regfree(&n1) + gc.Regfree(&n2) +} + +/* + * generate shift according to op, one of: + * res = nl << nr + * res = nl >> nr + */ +func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { + a := optoas(op, nl.Type) + + if nr.Op == gc.OLITERAL { + var n1 gc.Node + gc.Regalloc(&n1, nl.Type, res) + gc.Cgen(nl, &n1) + sc := uint64(nr.Int64()) + if sc >= uint64(nl.Type.Width*8) { + // large shift gets 2 shifts by width-1 + var n3 gc.Node + gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) + + gins(a, &n3, &n1) + gins(a, &n3, &n1) + } else { + gins(a, nr, &n1) + } + gmove(&n1, res) + gc.Regfree(&n1) + return + } + + if nl.Ullman >= gc.UINF { + var n4 gc.Node + gc.Tempname(&n4, nl.Type) + gc.Cgen(nl, &n4) + nl = &n4 + } + + if nr.Ullman >= gc.UINF { + var n5 gc.Node + gc.Tempname(&n5, nr.Type) + gc.Cgen(nr, &n5) + nr = &n5 + } + + // Allow either uint32 or uint64 as shift type, + // to avoid unnecessary conversion from uint32 to uint64 + // just to do the comparison. + tcount := gc.Types[gc.Simtype[nr.Type.Etype]] + + if tcount.Etype < gc.TUINT32 { + tcount = gc.Types[gc.TUINT32] + } + + var n1 gc.Node + gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX + var n3 gc.Node + gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX + + var n2 gc.Node + gc.Regalloc(&n2, nl.Type, res) + + if nl.Ullman >= nr.Ullman { + gc.Cgen(nl, &n2) + gc.Cgen(nr, &n1) + gmove(&n1, &n3) + } else { + gc.Cgen(nr, &n1) + gmove(&n1, &n3) + gc.Cgen(nl, &n2) + } + + gc.Regfree(&n3) + + // test and fix up large shifts + if !bounded { + gc.Nodconst(&n3, tcount, nl.Type.Width*8) + gins(optoas(gc.OCMP, tcount), &n1, &n3) + p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, 1) + if op == gc.ORSH && nl.Type.IsSigned() { + gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1) + gins(a, &n3, &n2) + } else { + gc.Nodconst(&n3, nl.Type, 0) + gmove(&n3, &n2) + } + + gc.Patch(p1, gc.Pc) + } + + gins(a, &n1, &n2) + + gmove(&n2, res) + + gc.Regfree(&n1) + gc.Regfree(&n2) +} + +// clearfat clears (i.e. replaces with zeros) the value pointed to by nl. +func clearfat(nl *gc.Node) { + if gc.Debug['g'] != 0 { + fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width) + } + + // Avoid taking the address for simple enough types. + if gc.Componentgen(nil, nl) { + return + } + + var dst gc.Node + gc.Regalloc(&dst, gc.Types[gc.Tptr], nil) + gc.Agen(nl, &dst) + + var boff int64 + w := nl.Type.Width + if w > clearLoopCutoff { + // Generate a loop clearing 256 bytes per iteration using XCs. + var end gc.Node + gc.Regalloc(&end, gc.Types[gc.Tptr], nil) + p := gins(s390x.AMOVD, &dst, &end) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = w - (w % 256) + + p = gins(s390x.AXC, &dst, &dst) + p.From.Type = obj.TYPE_MEM + p.From.Offset = 0 + p.To.Type = obj.TYPE_MEM + p.To.Offset = 0 + p.From3 = new(obj.Addr) + p.From3.Offset = 256 + p.From3.Type = obj.TYPE_CONST + pl := p + + ginscon(s390x.AADD, 256, &dst) + gins(s390x.ACMP, &dst, &end) + gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), pl) + gc.Regfree(&end) + w = w % 256 + } + + // Generate instructions to clear the remaining memory. + for w > 0 { + n := w + + // Can clear at most 256 bytes per instruction. + if n > 256 { + n = 256 + } + + switch n { + // Handle very small clears using moves. + case 8, 4, 2, 1: + ins := s390x.AMOVB + switch n { + case 8: + ins = s390x.AMOVD + case 4: + ins = s390x.AMOVW + case 2: + ins = s390x.AMOVH + } + p := gins(ins, nil, &dst) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0 + p.To.Type = obj.TYPE_MEM + p.To.Offset = boff + + // Handle clears that would require multiple moves with a XC. + default: + p := gins(s390x.AXC, &dst, &dst) + p.From.Type = obj.TYPE_MEM + p.From.Offset = boff + p.To.Type = obj.TYPE_MEM + p.To.Offset = boff + p.From3 = new(obj.Addr) + p.From3.Offset = n + p.From3.Type = obj.TYPE_CONST + } + + boff += n + w -= n + } + + gc.Regfree(&dst) +} + +// Called after regopt and peep have run. +// Expand CHECKNIL pseudo-op into actual nil pointer check. +func expandchecks(firstp *obj.Prog) { + for p := firstp; p != nil; p = p.Link { + if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 { + fmt.Printf("expandchecks: %v\n", p) + } + if p.As != obj.ACHECKNIL { + continue + } + if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers + gc.Warnl(p.Lineno, "generated nil check") + } + if p.From.Type != obj.TYPE_REG { + gc.Fatalf("invalid nil check %v\n", p) + } + + // check is + // CMPBNE arg, $0, 2(PC) [likely] + // MOVD R0, 0(R0) + p1 := gc.Ctxt.NewProg() + + gc.Clearp(p1) + p1.Link = p.Link + p.Link = p1 + p1.Lineno = p.Lineno + p1.Pc = 9999 + p.As = s390x.ACMPBNE + p.From3 = new(obj.Addr) + p.From3.Type = obj.TYPE_CONST + p.From3.Offset = 0 + + p.To.Type = obj.TYPE_BRANCH + p.To.Val = p1.Link + + // crash by write to memory address 0. + p1.As = s390x.AMOVD + + p1.From.Type = obj.TYPE_REG + p1.From.Reg = s390x.REGZERO + p1.To.Type = obj.TYPE_MEM + p1.To.Reg = s390x.REGZERO + p1.To.Offset = 0 + } +} + +// res = runtime.getg() +func getg(res *gc.Node) { + var n1 gc.Node + gc.Nodreg(&n1, res.Type, s390x.REGG) + gmove(&n1, res) +} diff --git a/src/cmd/compile/internal/s390x/gsubr.go b/src/cmd/compile/internal/s390x/gsubr.go new file mode 100644 index 0000000000..e9cfd23e42 --- /dev/null +++ b/src/cmd/compile/internal/s390x/gsubr.go @@ -0,0 +1,1115 @@ +// Derived from Inferno utils/6c/txt.c +// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s390x + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj" + "cmd/internal/obj/s390x" + "fmt" +) + +var resvd = []int{ + s390x.REGZERO, // R0 + s390x.REGTMP, // R10 + s390x.REGTMP2, // R11 + s390x.REGCTXT, // R12 + s390x.REGG, // R13 + s390x.REG_LR, // R14 + s390x.REGSP, // R15 +} + +// generate +// as $c, n +func ginscon(as obj.As, c int64, n2 *gc.Node) { + var n1 gc.Node + + gc.Nodconst(&n1, gc.Types[gc.TINT64], c) + + if as != s390x.AMOVD && (c < -s390x.BIG || c > s390x.BIG) || n2.Op != gc.OREGISTER || as == s390x.AMULLD { + // cannot have more than 16-bit of immediate in ADD, etc. + // instead, MOV into register first. + var ntmp gc.Node + gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) + + rawgins(s390x.AMOVD, &n1, &ntmp) + rawgins(as, &ntmp, n2) + gc.Regfree(&ntmp) + return + } + + rawgins(as, &n1, n2) +} + +// generate +// as n, $c (CMP/CMPU) +func ginscon2(as obj.As, n2 *gc.Node, c int64) { + var n1 gc.Node + + gc.Nodconst(&n1, gc.Types[gc.TINT64], c) + + switch as { + default: + gc.Fatalf("ginscon2") + + case s390x.ACMP: + if -s390x.BIG <= c && c <= s390x.BIG { + rawgins(as, n2, &n1) + return + } + + case s390x.ACMPU: + if 0 <= c && c <= 2*s390x.BIG { + rawgins(as, n2, &n1) + return + } + } + + // MOV n1 into register first + var ntmp gc.Node + gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) + + rawgins(s390x.AMOVD, &n1, &ntmp) + rawgins(as, n2, &ntmp) + gc.Regfree(&ntmp) +} + +func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { + if t.IsInteger() && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL { + // Reverse comparison to place constant last. + op = gc.Brrev(op) + n1, n2 = n2, n1 + } + + var r1, r2, g1, g2 gc.Node + gc.Regalloc(&r1, t, n1) + gc.Regalloc(&g1, n1.Type, &r1) + gc.Cgen(n1, &g1) + gmove(&g1, &r1) + if t.IsInteger() && gc.Isconst(n2, gc.CTINT) { + ginscon2(optoas(gc.OCMP, t), &r1, n2.Int64()) + } else { + gc.Regalloc(&r2, t, n2) + gc.Regalloc(&g2, n1.Type, &r2) + gc.Cgen(n2, &g2) + gmove(&g2, &r2) + rawgins(optoas(gc.OCMP, t), &r1, &r2) + gc.Regfree(&g2) + gc.Regfree(&r2) + } + gc.Regfree(&g1) + gc.Regfree(&r1) + return gc.Gbranch(optoas(op, t), nil, likely) +} + +// gmvc tries to move f to t using a mvc instruction. +// If successful it returns true, otherwise it returns false. +func gmvc(f, t *gc.Node) bool { + ft := int(gc.Simsimtype(f.Type)) + tt := int(gc.Simsimtype(t.Type)) + + if ft != tt { + return false + } + + if f.Op != gc.OINDREG || t.Op != gc.OINDREG { + return false + } + + if f.Xoffset < 0 || f.Xoffset >= 4096-8 { + return false + } + + if t.Xoffset < 0 || t.Xoffset >= 4096-8 { + return false + } + + var len int64 + switch ft { + case gc.TUINT8, gc.TINT8, gc.TBOOL: + len = 1 + case gc.TUINT16, gc.TINT16: + len = 2 + case gc.TUINT32, gc.TINT32, gc.TFLOAT32: + len = 4 + case gc.TUINT64, gc.TINT64, gc.TFLOAT64, gc.TPTR64: + len = 8 + case gc.TUNSAFEPTR: + len = int64(gc.Widthptr) + default: + return false + } + + p := gc.Prog(s390x.AMVC) + gc.Naddr(&p.From, f) + gc.Naddr(&p.To, t) + p.From3 = new(obj.Addr) + p.From3.Offset = len + p.From3.Type = obj.TYPE_CONST + return true +} + +// generate move: +// t = f +// hard part is conversions. +func gmove(f *gc.Node, t *gc.Node) { + if gc.Debug['M'] != 0 { + fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) + } + + ft := int(gc.Simsimtype(f.Type)) + tt := int(gc.Simsimtype(t.Type)) + cvt := t.Type + + if gc.Iscomplex[ft] || gc.Iscomplex[tt] { + gc.Complexmove(f, t) + return + } + + var a obj.As + + // cannot have two memory operands + if gc.Ismem(f) && gc.Ismem(t) { + if gmvc(f, t) { + return + } + goto hard + } + + // convert constant to desired type + if f.Op == gc.OLITERAL { + var con gc.Node + f.Convconst(&con, t.Type) + f = &con + ft = tt // so big switch will choose a simple mov + + // some constants can't move directly to memory. + if gc.Ismem(t) { + // float constants come from memory. + if t.Type.IsFloat() { + goto hard + } + + // all immediates are 16-bit sign-extended + // unless moving into a register. + if t.Type.IsInteger() { + if i := con.Int64(); int64(int16(i)) != i { + goto hard + } + } + + // immediate moves to memory have a 12-bit unsigned displacement + if t.Xoffset < 0 || t.Xoffset >= 4096-8 { + goto hard + } + } + } + + // a float-to-int or int-to-float conversion requires the source operand in a register + if gc.Ismem(f) && ((f.Type.IsFloat() && t.Type.IsInteger()) || (f.Type.IsInteger() && t.Type.IsFloat())) { + cvt = f.Type + goto hard + } + + // a float32-to-float64 or float64-to-float32 conversion requires the source operand in a register + if gc.Ismem(f) && f.Type.IsFloat() && t.Type.IsFloat() && (ft != tt) { + cvt = f.Type + goto hard + } + + // value -> value copy, only one memory operand. + // figure out the instruction to use. + // break out of switch for one-instruction gins. + // goto rdst for "destination must be register". + // goto hard for "convert to cvt type first". + // otherwise handle and return. + switch uint32(ft)<<16 | uint32(tt) { + default: + gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) + + // integer copy and truncate + case gc.TINT8<<16 | gc.TINT8, + gc.TUINT8<<16 | gc.TINT8, + gc.TINT16<<16 | gc.TINT8, + gc.TUINT16<<16 | gc.TINT8, + gc.TINT32<<16 | gc.TINT8, + gc.TUINT32<<16 | gc.TINT8, + gc.TINT64<<16 | gc.TINT8, + gc.TUINT64<<16 | gc.TINT8: + a = s390x.AMOVB + + case gc.TINT8<<16 | gc.TUINT8, + gc.TUINT8<<16 | gc.TUINT8, + gc.TINT16<<16 | gc.TUINT8, + gc.TUINT16<<16 | gc.TUINT8, + gc.TINT32<<16 | gc.TUINT8, + gc.TUINT32<<16 | gc.TUINT8, + gc.TINT64<<16 | gc.TUINT8, + gc.TUINT64<<16 | gc.TUINT8: + a = s390x.AMOVBZ + + case gc.TINT16<<16 | gc.TINT16, + gc.TUINT16<<16 | gc.TINT16, + gc.TINT32<<16 | gc.TINT16, + gc.TUINT32<<16 | gc.TINT16, + gc.TINT64<<16 | gc.TINT16, + gc.TUINT64<<16 | gc.TINT16: + a = s390x.AMOVH + + case gc.TINT16<<16 | gc.TUINT16, + gc.TUINT16<<16 | gc.TUINT16, + gc.TINT32<<16 | gc.TUINT16, + gc.TUINT32<<16 | gc.TUINT16, + gc.TINT64<<16 | gc.TUINT16, + gc.TUINT64<<16 | gc.TUINT16: + a = s390x.AMOVHZ + + case gc.TINT32<<16 | gc.TINT32, + gc.TUINT32<<16 | gc.TINT32, + gc.TINT64<<16 | gc.TINT32, + gc.TUINT64<<16 | gc.TINT32: + a = s390x.AMOVW + + case gc.TINT32<<16 | gc.TUINT32, + gc.TUINT32<<16 | gc.TUINT32, + gc.TINT64<<16 | gc.TUINT32, + gc.TUINT64<<16 | gc.TUINT32: + a = s390x.AMOVWZ + + case gc.TINT64<<16 | gc.TINT64, + gc.TINT64<<16 | gc.TUINT64, + gc.TUINT64<<16 | gc.TINT64, + gc.TUINT64<<16 | gc.TUINT64: + a = s390x.AMOVD + + // sign extend int8 + case gc.TINT8<<16 | gc.TINT16, + gc.TINT8<<16 | gc.TUINT16, + gc.TINT8<<16 | gc.TINT32, + gc.TINT8<<16 | gc.TUINT32, + gc.TINT8<<16 | gc.TINT64, + gc.TINT8<<16 | gc.TUINT64: + a = s390x.AMOVB + goto rdst + + // sign extend uint8 + case gc.TUINT8<<16 | gc.TINT16, + gc.TUINT8<<16 | gc.TUINT16, + gc.TUINT8<<16 | gc.TINT32, + gc.TUINT8<<16 | gc.TUINT32, + gc.TUINT8<<16 | gc.TINT64, + gc.TUINT8<<16 | gc.TUINT64: + a = s390x.AMOVBZ + goto rdst + + // sign extend int16 + case gc.TINT16<<16 | gc.TINT32, + gc.TINT16<<16 | gc.TUINT32, + gc.TINT16<<16 | gc.TINT64, + gc.TINT16<<16 | gc.TUINT64: + a = s390x.AMOVH + goto rdst + + // zero extend uint16 + case gc.TUINT16<<16 | gc.TINT32, + gc.TUINT16<<16 | gc.TUINT32, + gc.TUINT16<<16 | gc.TINT64, + gc.TUINT16<<16 | gc.TUINT64: + a = s390x.AMOVHZ + goto rdst + + // sign extend int32 + case gc.TINT32<<16 | gc.TINT64, + gc.TINT32<<16 | gc.TUINT64: + a = s390x.AMOVW + goto rdst + + // zero extend uint32 + case gc.TUINT32<<16 | gc.TINT64, + gc.TUINT32<<16 | gc.TUINT64: + a = s390x.AMOVWZ + goto rdst + + // float to integer + case gc.TFLOAT32<<16 | gc.TUINT8, + gc.TFLOAT32<<16 | gc.TUINT16: + cvt = gc.Types[gc.TUINT32] + goto hard + + case gc.TFLOAT32<<16 | gc.TUINT32: + a = s390x.ACLFEBR + goto rdst + + case gc.TFLOAT32<<16 | gc.TUINT64: + a = s390x.ACLGEBR + goto rdst + + case gc.TFLOAT64<<16 | gc.TUINT8, + gc.TFLOAT64<<16 | gc.TUINT16: + cvt = gc.Types[gc.TUINT32] + goto hard + + case gc.TFLOAT64<<16 | gc.TUINT32: + a = s390x.ACLFDBR + goto rdst + + case gc.TFLOAT64<<16 | gc.TUINT64: + a = s390x.ACLGDBR + goto rdst + + case gc.TFLOAT32<<16 | gc.TINT8, + gc.TFLOAT32<<16 | gc.TINT16: + cvt = gc.Types[gc.TINT32] + goto hard + + case gc.TFLOAT32<<16 | gc.TINT32: + a = s390x.ACFEBRA + goto rdst + + case gc.TFLOAT32<<16 | gc.TINT64: + a = s390x.ACGEBRA + goto rdst + + case gc.TFLOAT64<<16 | gc.TINT8, + gc.TFLOAT64<<16 | gc.TINT16: + cvt = gc.Types[gc.TINT32] + goto hard + + case gc.TFLOAT64<<16 | gc.TINT32: + a = s390x.ACFDBRA + goto rdst + + case gc.TFLOAT64<<16 | gc.TINT64: + a = s390x.ACGDBRA + goto rdst + + // integer to float + case gc.TUINT8<<16 | gc.TFLOAT32, + gc.TUINT16<<16 | gc.TFLOAT32: + cvt = gc.Types[gc.TUINT32] + goto hard + + case gc.TUINT32<<16 | gc.TFLOAT32: + a = s390x.ACELFBR + goto rdst + + case gc.TUINT64<<16 | gc.TFLOAT32: + a = s390x.ACELGBR + goto rdst + + case gc.TUINT8<<16 | gc.TFLOAT64, + gc.TUINT16<<16 | gc.TFLOAT64: + cvt = gc.Types[gc.TUINT32] + goto hard + + case gc.TUINT32<<16 | gc.TFLOAT64: + a = s390x.ACDLFBR + goto rdst + + case gc.TUINT64<<16 | gc.TFLOAT64: + a = s390x.ACDLGBR + goto rdst + + case gc.TINT8<<16 | gc.TFLOAT32, + gc.TINT16<<16 | gc.TFLOAT32: + cvt = gc.Types[gc.TINT32] + goto hard + + case gc.TINT32<<16 | gc.TFLOAT32: + a = s390x.ACEFBRA + goto rdst + + case gc.TINT64<<16 | gc.TFLOAT32: + a = s390x.ACEGBRA + goto rdst + + case gc.TINT8<<16 | gc.TFLOAT64, + gc.TINT16<<16 | gc.TFLOAT64: + cvt = gc.Types[gc.TINT32] + goto hard + + case gc.TINT32<<16 | gc.TFLOAT64: + a = s390x.ACDFBRA + goto rdst + + case gc.TINT64<<16 | gc.TFLOAT64: + a = s390x.ACDGBRA + goto rdst + + // float to float + case gc.TFLOAT32<<16 | gc.TFLOAT32: + a = s390x.AFMOVS + + case gc.TFLOAT64<<16 | gc.TFLOAT64: + a = s390x.AFMOVD + + case gc.TFLOAT32<<16 | gc.TFLOAT64: + a = s390x.ALDEBR + goto rdst + + case gc.TFLOAT64<<16 | gc.TFLOAT32: + a = s390x.ALEDBR + goto rdst + } + + gins(a, f, t) + return + + // requires register destination +rdst: + if t != nil && t.Op == gc.OREGISTER { + gins(a, f, t) + return + } else { + var r1 gc.Node + gc.Regalloc(&r1, t.Type, t) + + gins(a, f, &r1) + gmove(&r1, t) + gc.Regfree(&r1) + return + } + + // requires register intermediate +hard: + var r1 gc.Node + gc.Regalloc(&r1, cvt, t) + + gmove(f, &r1) + gmove(&r1, t) + gc.Regfree(&r1) + return +} + +func intLiteral(n *gc.Node) (x int64, ok bool) { + switch { + case n == nil: + return + case gc.Isconst(n, gc.CTINT): + return n.Int64(), true + case gc.Isconst(n, gc.CTBOOL): + return int64(obj.Bool2int(n.Bool())), true + } + return +} + +// gins is called by the front end. +// It synthesizes some multiple-instruction sequences +// so the front end can stay simpler. +func gins(as obj.As, f, t *gc.Node) *obj.Prog { + if t != nil { + if as >= obj.A_ARCHSPECIFIC { + if x, ok := intLiteral(f); ok { + ginscon(as, x, t) + return nil // caller must not use + } + } + if as == s390x.ACMP || as == s390x.ACMPU { + if x, ok := intLiteral(t); ok { + ginscon2(as, f, x) + return nil // caller must not use + } + } + } + return rawgins(as, f, t) +} + +// generate one instruction: +// as f, t +func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog { + // self move check + // TODO(mundaym): use sized math and extend to MOVB, MOVWZ etc. + switch as { + case s390x.AMOVD, s390x.AFMOVS, s390x.AFMOVD: + if f != nil && t != nil && + f.Op == gc.OREGISTER && t.Op == gc.OREGISTER && + f.Reg == t.Reg { + return nil + } + } + + p := gc.Prog(as) + gc.Naddr(&p.From, f) + gc.Naddr(&p.To, t) + + switch as { + // Bad things the front end has done to us. Crash to find call stack. + case s390x.AMULLD: + if p.From.Type == obj.TYPE_CONST { + gc.Debug['h'] = 1 + gc.Fatalf("bad inst: %v", p) + } + case s390x.ACMP, s390x.ACMPU: + if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM { + gc.Debug['h'] = 1 + gc.Fatalf("bad inst: %v", p) + } + } + + if gc.Debug['g'] != 0 { + fmt.Printf("%v\n", p) + } + + w := int32(0) + switch as { + case s390x.AMOVB, s390x.AMOVBZ: + w = 1 + + case s390x.AMOVH, s390x.AMOVHZ: + w = 2 + + case s390x.AMOVW, s390x.AMOVWZ: + w = 4 + + case s390x.AMOVD: + if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR { + break + } + w = 8 + } + + if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) { + gc.Dump("f", f) + gc.Dump("t", t) + gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) + } + + return p +} + +// optoas returns the Axxx equivalent of Oxxx for type t +func optoas(op gc.Op, t *gc.Type) obj.As { + if t == nil { + gc.Fatalf("optoas: t is nil") + } + + // avoid constant conversions in switches below + const ( + OMINUS_ = uint32(gc.OMINUS) << 16 + OLSH_ = uint32(gc.OLSH) << 16 + ORSH_ = uint32(gc.ORSH) << 16 + OADD_ = uint32(gc.OADD) << 16 + OSUB_ = uint32(gc.OSUB) << 16 + OMUL_ = uint32(gc.OMUL) << 16 + ODIV_ = uint32(gc.ODIV) << 16 + OOR_ = uint32(gc.OOR) << 16 + OAND_ = uint32(gc.OAND) << 16 + OXOR_ = uint32(gc.OXOR) << 16 + OEQ_ = uint32(gc.OEQ) << 16 + ONE_ = uint32(gc.ONE) << 16 + OLT_ = uint32(gc.OLT) << 16 + OLE_ = uint32(gc.OLE) << 16 + OGE_ = uint32(gc.OGE) << 16 + OGT_ = uint32(gc.OGT) << 16 + OCMP_ = uint32(gc.OCMP) << 16 + OAS_ = uint32(gc.OAS) << 16 + OHMUL_ = uint32(gc.OHMUL) << 16 + OSQRT_ = uint32(gc.OSQRT) << 16 + OLROT_ = uint32(gc.OLROT) << 16 + ) + + a := obj.AXXX + switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { + default: + gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t) + + case OEQ_ | gc.TBOOL, + OEQ_ | gc.TINT8, + OEQ_ | gc.TUINT8, + OEQ_ | gc.TINT16, + OEQ_ | gc.TUINT16, + OEQ_ | gc.TINT32, + OEQ_ | gc.TUINT32, + OEQ_ | gc.TINT64, + OEQ_ | gc.TUINT64, + OEQ_ | gc.TPTR32, + OEQ_ | gc.TPTR64, + OEQ_ | gc.TFLOAT32, + OEQ_ | gc.TFLOAT64: + a = s390x.ABEQ + + case ONE_ | gc.TBOOL, + ONE_ | gc.TINT8, + ONE_ | gc.TUINT8, + ONE_ | gc.TINT16, + ONE_ | gc.TUINT16, + ONE_ | gc.TINT32, + ONE_ | gc.TUINT32, + ONE_ | gc.TINT64, + ONE_ | gc.TUINT64, + ONE_ | gc.TPTR32, + ONE_ | gc.TPTR64, + ONE_ | gc.TFLOAT32, + ONE_ | gc.TFLOAT64: + a = s390x.ABNE + + case OLT_ | gc.TINT8, // ACMP + OLT_ | gc.TINT16, + OLT_ | gc.TINT32, + OLT_ | gc.TINT64, + OLT_ | gc.TUINT8, + // ACMPU + OLT_ | gc.TUINT16, + OLT_ | gc.TUINT32, + OLT_ | gc.TUINT64, + OLT_ | gc.TFLOAT32, + // AFCMPU + OLT_ | gc.TFLOAT64: + a = s390x.ABLT + + case OLE_ | gc.TINT8, // ACMP + OLE_ | gc.TINT16, + OLE_ | gc.TINT32, + OLE_ | gc.TINT64, + OLE_ | gc.TUINT8, + // ACMPU + OLE_ | gc.TUINT16, + OLE_ | gc.TUINT32, + OLE_ | gc.TUINT64, + OLE_ | gc.TFLOAT32, + OLE_ | gc.TFLOAT64: + a = s390x.ABLE + + case OGT_ | gc.TINT8, + OGT_ | gc.TINT16, + OGT_ | gc.TINT32, + OGT_ | gc.TINT64, + OGT_ | gc.TUINT8, + OGT_ | gc.TUINT16, + OGT_ | gc.TUINT32, + OGT_ | gc.TUINT64, + OGT_ | gc.TFLOAT32, + OGT_ | gc.TFLOAT64: + a = s390x.ABGT + + case OGE_ | gc.TINT8, + OGE_ | gc.TINT16, + OGE_ | gc.TINT32, + OGE_ | gc.TINT64, + OGE_ | gc.TUINT8, + OGE_ | gc.TUINT16, + OGE_ | gc.TUINT32, + OGE_ | gc.TUINT64, + OGE_ | gc.TFLOAT32, + OGE_ | gc.TFLOAT64: + a = s390x.ABGE + + case OCMP_ | gc.TBOOL, + OCMP_ | gc.TINT8, + OCMP_ | gc.TINT16, + OCMP_ | gc.TINT32, + OCMP_ | gc.TPTR32, + OCMP_ | gc.TINT64: + a = s390x.ACMP + + case OCMP_ | gc.TUINT8, + OCMP_ | gc.TUINT16, + OCMP_ | gc.TUINT32, + OCMP_ | gc.TUINT64, + OCMP_ | gc.TPTR64: + a = s390x.ACMPU + + case OCMP_ | gc.TFLOAT32: + a = s390x.ACEBR + + case OCMP_ | gc.TFLOAT64: + a = s390x.AFCMPU + + case OAS_ | gc.TBOOL, + OAS_ | gc.TINT8: + a = s390x.AMOVB + + case OAS_ | gc.TUINT8: + a = s390x.AMOVBZ + + case OAS_ | gc.TINT16: + a = s390x.AMOVH + + case OAS_ | gc.TUINT16: + a = s390x.AMOVHZ + + case OAS_ | gc.TINT32: + a = s390x.AMOVW + + case OAS_ | gc.TUINT32, + OAS_ | gc.TPTR32: + a = s390x.AMOVWZ + + case OAS_ | gc.TINT64, + OAS_ | gc.TUINT64, + OAS_ | gc.TPTR64: + a = s390x.AMOVD + + case OAS_ | gc.TFLOAT32: + a = s390x.AFMOVS + + case OAS_ | gc.TFLOAT64: + a = s390x.AFMOVD + + case OADD_ | gc.TINT8, + OADD_ | gc.TUINT8, + OADD_ | gc.TINT16, + OADD_ | gc.TUINT16, + OADD_ | gc.TINT32, + OADD_ | gc.TUINT32, + OADD_ | gc.TPTR32, + OADD_ | gc.TINT64, + OADD_ | gc.TUINT64, + OADD_ | gc.TPTR64: + a = s390x.AADD + + case OADD_ | gc.TFLOAT32: + a = s390x.AFADDS + + case OADD_ | gc.TFLOAT64: + a = s390x.AFADD + + case OSUB_ | gc.TINT8, + OSUB_ | gc.TUINT8, + OSUB_ | gc.TINT16, + OSUB_ | gc.TUINT16, + OSUB_ | gc.TINT32, + OSUB_ | gc.TUINT32, + OSUB_ | gc.TPTR32, + OSUB_ | gc.TINT64, + OSUB_ | gc.TUINT64, + OSUB_ | gc.TPTR64: + a = s390x.ASUB + + case OSUB_ | gc.TFLOAT32: + a = s390x.AFSUBS + + case OSUB_ | gc.TFLOAT64: + a = s390x.AFSUB + + case OMINUS_ | gc.TINT8, + OMINUS_ | gc.TUINT8, + OMINUS_ | gc.TINT16, + OMINUS_ | gc.TUINT16, + OMINUS_ | gc.TINT32, + OMINUS_ | gc.TUINT32, + OMINUS_ | gc.TPTR32, + OMINUS_ | gc.TINT64, + OMINUS_ | gc.TUINT64, + OMINUS_ | gc.TPTR64: + a = s390x.ANEG + + case OAND_ | gc.TINT8, + OAND_ | gc.TUINT8, + OAND_ | gc.TINT16, + OAND_ | gc.TUINT16, + OAND_ | gc.TINT32, + OAND_ | gc.TUINT32, + OAND_ | gc.TPTR32, + OAND_ | gc.TINT64, + OAND_ | gc.TUINT64, + OAND_ | gc.TPTR64: + a = s390x.AAND + + case OOR_ | gc.TINT8, + OOR_ | gc.TUINT8, + OOR_ | gc.TINT16, + OOR_ | gc.TUINT16, + OOR_ | gc.TINT32, + OOR_ | gc.TUINT32, + OOR_ | gc.TPTR32, + OOR_ | gc.TINT64, + OOR_ | gc.TUINT64, + OOR_ | gc.TPTR64: + a = s390x.AOR + + case OXOR_ | gc.TINT8, + OXOR_ | gc.TUINT8, + OXOR_ | gc.TINT16, + OXOR_ | gc.TUINT16, + OXOR_ | gc.TINT32, + OXOR_ | gc.TUINT32, + OXOR_ | gc.TPTR32, + OXOR_ | gc.TINT64, + OXOR_ | gc.TUINT64, + OXOR_ | gc.TPTR64: + a = s390x.AXOR + + case OLSH_ | gc.TINT8, + OLSH_ | gc.TUINT8, + OLSH_ | gc.TINT16, + OLSH_ | gc.TUINT16, + OLSH_ | gc.TINT32, + OLSH_ | gc.TUINT32, + OLSH_ | gc.TPTR32, + OLSH_ | gc.TINT64, + OLSH_ | gc.TUINT64, + OLSH_ | gc.TPTR64: + a = s390x.ASLD + + case ORSH_ | gc.TUINT8, + ORSH_ | gc.TUINT16, + ORSH_ | gc.TUINT32, + ORSH_ | gc.TPTR32, + ORSH_ | gc.TUINT64, + ORSH_ | gc.TPTR64: + a = s390x.ASRD + + case ORSH_ | gc.TINT8, + ORSH_ | gc.TINT16, + ORSH_ | gc.TINT32, + ORSH_ | gc.TINT64: + a = s390x.ASRAD + + case OHMUL_ | gc.TINT64: + a = s390x.AMULHD + + case OHMUL_ | gc.TUINT64, + OHMUL_ | gc.TPTR64: + a = s390x.AMULHDU + + case OMUL_ | gc.TINT8, + OMUL_ | gc.TINT16, + OMUL_ | gc.TINT32, + OMUL_ | gc.TINT64: + a = s390x.AMULLD + + case OMUL_ | gc.TUINT8, + OMUL_ | gc.TUINT16, + OMUL_ | gc.TUINT32, + OMUL_ | gc.TPTR32, + // don't use word multiply, the high 32-bit are undefined. + OMUL_ | gc.TUINT64, + OMUL_ | gc.TPTR64: + // for 64-bit multiplies, signedness doesn't matter. + a = s390x.AMULLD + + case OMUL_ | gc.TFLOAT32: + a = s390x.AFMULS + + case OMUL_ | gc.TFLOAT64: + a = s390x.AFMUL + + case ODIV_ | gc.TINT8, + ODIV_ | gc.TINT16, + ODIV_ | gc.TINT32, + ODIV_ | gc.TINT64: + a = s390x.ADIVD + + case ODIV_ | gc.TUINT8, + ODIV_ | gc.TUINT16, + ODIV_ | gc.TUINT32, + ODIV_ | gc.TPTR32, + ODIV_ | gc.TUINT64, + ODIV_ | gc.TPTR64: + a = s390x.ADIVDU + + case ODIV_ | gc.TFLOAT32: + a = s390x.AFDIVS + + case ODIV_ | gc.TFLOAT64: + a = s390x.AFDIV + + case OSQRT_ | gc.TFLOAT64: + a = s390x.AFSQRT + + case OLROT_ | gc.TUINT32, + OLROT_ | gc.TPTR32, + OLROT_ | gc.TINT32: + a = s390x.ARLL + + case OLROT_ | gc.TUINT64, + OLROT_ | gc.TPTR64, + OLROT_ | gc.TINT64: + a = s390x.ARLLG + } + + return a +} + +const ( + ODynam = 1 << 0 + OAddable = 1 << 1 +) + +var clean [20]gc.Node + +var cleani int = 0 + +func sudoclean() { + if clean[cleani-1].Op != gc.OEMPTY { + gc.Regfree(&clean[cleani-1]) + } + if clean[cleani-2].Op != gc.OEMPTY { + gc.Regfree(&clean[cleani-2]) + } + cleani -= 2 +} + +/* + * generate code to compute address of n, + * a reference to a (perhaps nested) field inside + * an array or struct. + * return 0 on failure, 1 on success. + * on success, leaves usable address in a. + * + * caller is responsible for calling sudoclean + * after successful sudoaddable, + * to release the register used for a. + */ +func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { + if n.Type == nil { + return false + } + + *a = obj.Addr{} + + switch n.Op { + case gc.OLITERAL: + if !gc.Isconst(n, gc.CTINT) { + return false + } + v := n.Int64() + switch as { + default: + return false + + // operations that can cope with a 32-bit immediate + // TODO(mundaym): logical operations can work on high bits + case s390x.AADD, + s390x.AADDC, + s390x.ASUB, + s390x.AMULLW, + s390x.AAND, + s390x.AOR, + s390x.AXOR, + s390x.ASLD, + s390x.ASLW, + s390x.ASRAW, + s390x.ASRAD, + s390x.ASRW, + s390x.ASRD, + s390x.AMOVB, + s390x.AMOVBZ, + s390x.AMOVH, + s390x.AMOVHZ, + s390x.AMOVW, + s390x.AMOVWZ, + s390x.AMOVD: + if int64(int32(v)) != v { + return false + } + + // for comparisons avoid immediates unless they can + // fit into a int8/uint8 + // this favours combined compare and branch instructions + case s390x.ACMP: + if int64(int8(v)) != v { + return false + } + case s390x.ACMPU: + if int64(uint8(v)) != v { + return false + } + } + + cleani += 2 + reg := &clean[cleani-1] + reg1 := &clean[cleani-2] + reg.Op = gc.OEMPTY + reg1.Op = gc.OEMPTY + gc.Naddr(a, n) + return true + + case gc.ODOT, + gc.ODOTPTR: + cleani += 2 + reg := &clean[cleani-1] + reg1 := &clean[cleani-2] + reg.Op = gc.OEMPTY + reg1.Op = gc.OEMPTY + var nn *gc.Node + var oary [10]int64 + o := gc.Dotoffset(n, oary[:], &nn) + if nn == nil { + sudoclean() + return false + } + + if nn.Addable && o == 1 && oary[0] >= 0 { + // directly addressable set of DOTs + n1 := *nn + + n1.Type = n.Type + n1.Xoffset += oary[0] + // check that the offset fits into a 12-bit displacement + if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 { + sudoclean() + return false + } + gc.Naddr(a, &n1) + return true + } + + gc.Regalloc(reg, gc.Types[gc.Tptr], nil) + n1 := *reg + n1.Op = gc.OINDREG + if oary[0] >= 0 { + gc.Agen(nn, reg) + n1.Xoffset = oary[0] + } else { + gc.Cgen(nn, reg) + gc.Cgen_checknil(reg) + n1.Xoffset = -(oary[0] + 1) + } + + for i := 1; i < o; i++ { + if oary[i] >= 0 { + gc.Fatalf("can't happen") + } + gins(s390x.AMOVD, &n1, reg) + gc.Cgen_checknil(reg) + n1.Xoffset = -(oary[i] + 1) + } + + a.Type = obj.TYPE_NONE + a.Index = 0 + // check that the offset fits into a 12-bit displacement + if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 { + tmp := n1 + tmp.Op = gc.OREGISTER + tmp.Type = gc.Types[gc.Tptr] + tmp.Xoffset = 0 + gc.Cgen_checknil(&tmp) + ginscon(s390x.AADD, n1.Xoffset, &tmp) + n1.Xoffset = 0 + } + gc.Naddr(a, &n1) + return true + } + + return false +} diff --git a/src/cmd/compile/internal/s390x/peep.go b/src/cmd/compile/internal/s390x/peep.go new file mode 100644 index 0000000000..86258d67da --- /dev/null +++ b/src/cmd/compile/internal/s390x/peep.go @@ -0,0 +1,1664 @@ +// Derived from Inferno utils/6c/peep.c +// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s390x + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj" + "cmd/internal/obj/s390x" + "fmt" +) + +type usage int + +const ( + _None usage = iota // no usage found + _Read // only read from + _ReadWriteSame // both read from and written to in a single operand + _Write // only written to + _ReadWriteDiff // both read from and written to in different operands +) + +var gactive uint32 + +func peep(firstp *obj.Prog) { + g := gc.Flowstart(firstp, nil) + if g == nil { + return + } + gactive = 0 + + run := func(name string, pass func(r *gc.Flow) int) int { + n := pass(g.Start) + if gc.Debug['P'] != 0 { + fmt.Println(name, ":", n) + } + if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { + gc.Dumpit(name, g.Start, 0) + } + return n + } + + for { + n := 0 + n += run("constant propagation", constantPropagation) + n += run("copy propagation", copyPropagation) + n += run("cast propagation", castPropagation) + n += run("remove load-hit-stores", removeLoadHitStores) + n += run("dead code elimination", deadCodeElimination) + if n == 0 { + break + } + } + run("fuse op moves", fuseOpMoves) + run("fuse clears", fuseClear) + run("load pipelining", loadPipelining) + run("fuse compare branch", fuseCompareBranch) + run("simplify ops", simplifyOps) + run("dead code elimination", deadCodeElimination) + + // TODO(mundaym): load/store multiple aren't currently handled by copyu + // so this pass must be last. + run("fuse multiple", fuseMultiple) + + gc.Flowend(g) +} + +func pushback(r0 *gc.Flow) { + var r *gc.Flow + + var b *gc.Flow + p0 := r0.Prog + for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) { + p := r.Prog + if p.As != obj.ANOP { + if !(isReg(&p.From) || isConst(&p.From)) || !isReg(&p.To) { + break + } + if copyu(p, &p0.To, nil) != _None || copyu(p0, &p.To, nil) != _None { + break + } + } + + if p.As == obj.ACALL { + break + } + b = r + } + + if b == nil { + if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { + fmt.Printf("no pushback: %v\n", r0.Prog) + if r != nil { + fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil) + } + } + + return + } + + if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { + fmt.Printf("pushback\n") + for r := b; ; r = r.Link { + fmt.Printf("\t%v\n", r.Prog) + if r == r0 { + break + } + } + } + + t := obj.Prog(*r0.Prog) + for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) { + p0 = r.Link.Prog + p := r.Prog + p0.As = p.As + p0.Lineno = p.Lineno + p0.From = p.From + p0.To = p.To + p0.From3 = p.From3 + p0.Reg = p.Reg + p0.RegTo2 = p.RegTo2 + if r == b { + break + } + } + + p0 = r.Prog + p0.As = t.As + p0.Lineno = t.Lineno + p0.From = t.From + p0.To = t.To + p0.From3 = t.From3 + p0.Reg = t.Reg + p0.RegTo2 = t.RegTo2 + + if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { + fmt.Printf("\tafter\n") + for r := (*gc.Flow)(b); ; r = r.Link { + fmt.Printf("\t%v\n", r.Prog) + if r == r0 { + break + } + } + } +} + +// excise replaces the given instruction with a NOP and clears +// its operands. +func excise(r *gc.Flow) { + p := r.Prog + if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { + fmt.Printf("%v ===delete===\n", p) + } + obj.Nopout(p) + gc.Ostats.Ndelmov++ +} + +// isZero returns true if a is either the constant 0 or the register +// REGZERO. +func isZero(a *obj.Addr) bool { + if a.Type == obj.TYPE_CONST && a.Offset == 0 { + return true + } + if a.Type == obj.TYPE_REG && a.Reg == s390x.REGZERO { + return true + } + return false +} + +// isReg returns true if a is a general purpose or floating point +// register (GPR or FPR). +// +// TODO(mundaym): currently this excludes REGZER0, but not other +// special registers. +func isReg(a *obj.Addr) bool { + return a.Type == obj.TYPE_REG && + s390x.REG_R0 <= a.Reg && + a.Reg <= s390x.REG_F15 && + a.Reg != s390x.REGZERO +} + +// isGPR returns true if a is a general purpose register (GPR). +// REGZERO is treated as a GPR. +func isGPR(a *obj.Addr) bool { + return a.Type == obj.TYPE_REG && + s390x.REG_R0 <= a.Reg && + a.Reg <= s390x.REG_R15 +} + +// isFPR returns true if a is a floating point register (FPR). +func isFPR(a *obj.Addr) bool { + return a.Type == obj.TYPE_REG && + s390x.REG_F0 <= a.Reg && + a.Reg <= s390x.REG_F15 +} + +// isConst returns true if a refers to a constant (integer or +// floating point, not string currently). +func isConst(a *obj.Addr) bool { + return a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_FCONST +} + +// isBDMem returns true if a refers to a memory location addressable by a +// base register (B) and a displacement (D), such as: +// x+8(R1) +// and +// 0(R10) +// It returns false if the address contains an index register (X) such as: +// 16(R1)(R2*1) +// or if a relocation is required. +func isBDMem(a *obj.Addr) bool { + return a.Type == obj.TYPE_MEM && + a.Index == 0 && + (a.Name == obj.NAME_NONE || a.Name == obj.NAME_AUTO || a.Name == obj.NAME_PARAM) +} + +// the idea is to substitute +// one register for another +// from one MOV to another +// MOV a, R1 +// ADD b, R1 / no use of R2 +// MOV R1, R2 +// would be converted to +// MOV a, R2 +// ADD b, R2 +// MOV R2, R1 +// hopefully, then the former or latter MOV +// will be eliminated by copy propagation. +// +// r0 (the argument, not the register) is the MOV at the end of the +// above sequences. subprop returns true if it modified any instructions. +func subprop(r0 *gc.Flow) bool { + p := r0.Prog + v1 := &p.From + if !isReg(v1) { + return false + } + v2 := &p.To + if !isReg(v2) { + return false + } + cast := false + switch p.As { + case s390x.AMOVW, s390x.AMOVWZ, + s390x.AMOVH, s390x.AMOVHZ, + s390x.AMOVB, s390x.AMOVBZ: + cast = true + } + for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) { + if gc.Uniqs(r) == nil { + break + } + p = r.Prog + switch copyu(p, v1, nil) { + case _Write, _ReadWriteDiff: + if p.As == obj.ACALL { + return false + } + if (!cast || p.As == r0.Prog.As) && p.To.Type == v1.Type && p.To.Reg == v1.Reg { + copysub(&p.To, v1, v2) + for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) { + p = r.Prog + copysub(&p.From, v1, v2) + copysub1(p, v1, v2) + copysub(&p.To, v1, v2) + } + v1.Reg, v2.Reg = v2.Reg, v1.Reg + return true + } + if cast { + return false + } + case _ReadWriteSame: + if cast { + return false + } + } + if copyu(p, v2, nil) != _None { + return false + } + } + return false +} + +// The idea is to remove redundant copies. +// v1->v2 F=0 +// (use v2 s/v2/v1/)* +// set v1 F=1 +// use v2 return fail (v1->v2 move must remain) +// ----------------- +// v1->v2 F=0 +// (use v2 s/v2/v1/)* +// set v1 F=1 +// set v2 return success (caller can remove v1->v2 move) +func copyprop(r *gc.Flow) bool { + p := r.Prog + + canSub := false + switch p.As { + case s390x.AFMOVS, s390x.AFMOVD, s390x.AMOVD: + canSub = true + default: + for rr := gc.Uniqp(r); rr != nil; rr = gc.Uniqp(rr) { + if gc.Uniqs(rr) == nil { + break + } + switch copyu(rr.Prog, &p.From, nil) { + case _Read, _None: + continue + } + // write + if rr.Prog.As == p.As { + canSub = true + } + break + } + } + if !canSub { + return false + } + if copyas(&p.From, &p.To) { + return true + } + + gactive++ + return copy1(&p.From, &p.To, r.S1, 0) +} + +// copy1 replaces uses of v2 with v1 starting at r and returns true if +// all uses were rewritten. +func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool { + if uint32(r.Active) == gactive { + return true + } + r.Active = int32(gactive) + for ; r != nil; r = r.S1 { + p := r.Prog + if f == 0 && gc.Uniqp(r) == nil { + // Multiple predecessors; conservatively + // assume v1 was set on other path + f = 1 + } + t := copyu(p, v2, nil) + switch t { + case _ReadWriteSame: + return false + case _Write: + return true + case _Read, _ReadWriteDiff: + if f != 0 { + return false + } + if copyu(p, v2, v1) != 0 { + return false + } + if t == _ReadWriteDiff { + return true + } + } + if f == 0 { + switch copyu(p, v1, nil) { + case _ReadWriteSame, _ReadWriteDiff, _Write: + f = 1 + } + } + if r.S2 != nil { + if !copy1(v1, v2, r.S2, f) { + return false + } + } + } + return true +} + +// If s==nil, copyu returns the set/use of v in p; otherwise, it +// modifies p to replace reads of v with reads of s and returns 0 for +// success or non-zero for failure. +// +// If s==nil, copy returns one of the following values: +// _Read if v only used +// _ReadWriteSame if v is set and used in one address (read-alter-rewrite; +// can't substitute) +// _Write if v is only set +// _ReadWriteDiff if v is set in one address and used in another (so addresses +// can be rewritten independently) +// _None otherwise (not touched) +func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) usage { + if p.From3Type() != obj.TYPE_NONE && p.From3Type() != obj.TYPE_CONST { + // Currently we never generate a From3 with anything other than a constant in it. + fmt.Printf("copyu: From3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3)) + } + + switch p.As { + default: + fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As)) + return _ReadWriteSame + + case // read p.From, write p.To + s390x.AMOVH, + s390x.AMOVHZ, + s390x.AMOVB, + s390x.AMOVBZ, + s390x.AMOVW, + s390x.AMOVWZ, + s390x.AMOVD, + s390x.ANEG, + s390x.AADDME, + s390x.AADDZE, + s390x.ASUBME, + s390x.ASUBZE, + s390x.AFMOVS, + s390x.AFMOVD, + s390x.ALEDBR, + s390x.AFNEG, + s390x.ALDEBR, + s390x.ACLFEBR, + s390x.ACLGEBR, + s390x.ACLFDBR, + s390x.ACLGDBR, + s390x.ACFEBRA, + s390x.ACGEBRA, + s390x.ACFDBRA, + s390x.ACGDBRA, + s390x.ACELFBR, + s390x.ACELGBR, + s390x.ACDLFBR, + s390x.ACDLGBR, + s390x.ACEFBRA, + s390x.ACEGBRA, + s390x.ACDFBRA, + s390x.ACDGBRA, + s390x.AFSQRT: + + if s != nil { + copysub(&p.From, v, s) + + // Update only indirect uses of v in p.To + if !copyas(&p.To, v) { + copysub(&p.To, v, s) + } + return _None + } + + if copyas(&p.To, v) { + // Fix up implicit from + if p.From.Type == obj.TYPE_NONE { + p.From = p.To + } + if copyau(&p.From, v) { + return _ReadWriteDiff + } + return _Write + } + + if copyau(&p.From, v) { + return _Read + } + if copyau(&p.To, v) { + // p.To only indirectly uses v + return _Read + } + + return _None + + // read p.From, read p.Reg, write p.To + case s390x.AADD, + s390x.AADDC, + s390x.AADDE, + s390x.ASUB, + s390x.ASLW, + s390x.ASRW, + s390x.ASRAW, + s390x.ASLD, + s390x.ASRD, + s390x.ASRAD, + s390x.ARLL, + s390x.ARLLG, + s390x.AOR, + s390x.AORN, + s390x.AAND, + s390x.AANDN, + s390x.ANAND, + s390x.ANOR, + s390x.AXOR, + s390x.AMULLW, + s390x.AMULLD, + s390x.AMULHD, + s390x.AMULHDU, + s390x.ADIVW, + s390x.ADIVD, + s390x.ADIVWU, + s390x.ADIVDU, + s390x.AFADDS, + s390x.AFADD, + s390x.AFSUBS, + s390x.AFSUB, + s390x.AFMULS, + s390x.AFMUL, + s390x.AFDIVS, + s390x.AFDIV: + if s != nil { + copysub(&p.From, v, s) + copysub1(p, v, s) + + // Update only indirect uses of v in p.To + if !copyas(&p.To, v) { + copysub(&p.To, v, s) + } + } + + if copyas(&p.To, v) { + if p.Reg == 0 { + p.Reg = p.To.Reg + } + if copyau(&p.From, v) || copyau1(p, v) { + return _ReadWriteDiff + } + return _Write + } + + if copyau(&p.From, v) { + return _Read + } + if copyau1(p, v) { + return _Read + } + if copyau(&p.To, v) { + return _Read + } + return _None + + case s390x.ABEQ, + s390x.ABGT, + s390x.ABGE, + s390x.ABLT, + s390x.ABLE, + s390x.ABNE, + s390x.ABVC, + s390x.ABVS: + return _None + + case obj.ACHECKNIL, // read p.From + s390x.ACMP, // read p.From, read p.To + s390x.ACMPU, + s390x.ACMPW, + s390x.ACMPWU, + s390x.AFCMPO, + s390x.AFCMPU, + s390x.ACEBR, + s390x.AMVC, + s390x.ACLC, + s390x.AXC, + s390x.AOC, + s390x.ANC: + if s != nil { + copysub(&p.From, v, s) + copysub(&p.To, v, s) + return _None + } + + if copyau(&p.From, v) { + return _Read + } + if copyau(&p.To, v) { + return _Read + } + return _None + + case s390x.ACMPBNE, s390x.ACMPBEQ, + s390x.ACMPBLT, s390x.ACMPBLE, + s390x.ACMPBGT, s390x.ACMPBGE, + s390x.ACMPUBNE, s390x.ACMPUBEQ, + s390x.ACMPUBLT, s390x.ACMPUBLE, + s390x.ACMPUBGT, s390x.ACMPUBGE: + if s != nil { + copysub(&p.From, v, s) + copysub1(p, v, s) + return _None + } + if copyau(&p.From, v) { + return _Read + } + if copyau1(p, v) { + return _Read + } + return _None + + case s390x.ACLEAR: + if s != nil { + copysub(&p.To, v, s) + return _None + } + if copyau(&p.To, v) { + return _Read + } + return _None + + // go never generates a branch to a GPR + // read p.To + case s390x.ABR: + if s != nil { + copysub(&p.To, v, s) + return _None + } + + if copyau(&p.To, v) { + return _Read + } + return _None + + case obj.ARET, obj.AUNDEF: + if s != nil { + return _None + } + + // All registers die at this point, so claim + // everything is set (and not used). + return _Write + + case s390x.ABL: + if v.Type == obj.TYPE_REG { + if s390x.REGARG != -1 && v.Reg == s390x.REGARG { + return _ReadWriteSame + } + if p.From.Type == obj.TYPE_REG && p.From.Reg == v.Reg { + return _ReadWriteSame + } + if v.Reg == s390x.REGZERO { + // Deliberately inserted nops set R0. + return _ReadWriteSame + } + if v.Reg == s390x.REGCTXT { + // Context register for closures. + // TODO(mundaym): not sure if we need to exclude this. + return _ReadWriteSame + } + } + if s != nil { + copysub(&p.To, v, s) + return _None + } + if copyau(&p.To, v) { + return _ReadWriteDiff + } + return _Write + + case obj.ATEXT: + if v.Type == obj.TYPE_REG { + if v.Reg == s390x.REGARG { + return _Write + } + } + return _None + + case obj.APCDATA, + obj.AFUNCDATA, + obj.AVARDEF, + obj.AVARKILL, + obj.AVARLIVE, + obj.AUSEFIELD, + obj.ANOP: + return _None + } +} + +// copyas returns 1 if a and v address the same register. +// +// If a is the from operand, this means this operation reads the +// register in v. If a is the to operand, this means this operation +// writes the register in v. +func copyas(a *obj.Addr, v *obj.Addr) bool { + if isReg(v) { + if a.Type == v.Type { + if a.Reg == v.Reg { + return true + } + } + } + return false +} + +// copyau returns 1 if a either directly or indirectly addresses the +// same register as v. +// +// If a is the from operand, this means this operation reads the +// register in v. If a is the to operand, this means the operation +// either reads or writes the register in v (if !copyas(a, v), then +// the operation reads the register in v). +func copyau(a *obj.Addr, v *obj.Addr) bool { + if copyas(a, v) { + return true + } + if v.Type == obj.TYPE_REG { + if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) { + if v.Reg == a.Reg { + return true + } + } + } + return false +} + +// copyau1 returns 1 if p.Reg references the same register as v and v +// is a direct reference. +func copyau1(p *obj.Prog, v *obj.Addr) bool { + if isReg(v) && v.Reg != 0 { + if p.Reg == v.Reg { + return true + } + } + return false +} + +// copysub replaces v.Reg with s.Reg if a.Reg and v.Reg are direct +// references to the same register. +func copysub(a, v, s *obj.Addr) { + if copyau(a, v) { + a.Reg = s.Reg + } +} + +// copysub1 replaces p.Reg with s.Reg if p.Reg and v.Reg are direct +// references to the same register. +func copysub1(p *obj.Prog, v, s *obj.Addr) { + if copyau1(p, v) { + p.Reg = s.Reg + } +} + +func sameaddr(a *obj.Addr, v *obj.Addr) bool { + if a.Type != v.Type { + return false + } + if isReg(v) && a.Reg == v.Reg { + return true + } + if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM { + // TODO(mundaym): is the offset enough here? Node? + if v.Offset == a.Offset { + return true + } + } + return false +} + +func smallindir(a *obj.Addr, reg *obj.Addr) bool { + return reg.Type == obj.TYPE_REG && + a.Type == obj.TYPE_MEM && + a.Reg == reg.Reg && + 0 <= a.Offset && a.Offset < 4096 +} + +func stackaddr(a *obj.Addr) bool { + // TODO(mundaym): the name implies this should check + // for TYPE_ADDR with a base register REGSP. + return a.Type == obj.TYPE_REG && a.Reg == s390x.REGSP +} + +// isMove returns true if p is a move. Moves may imply +// sign/zero extension. +func isMove(p *obj.Prog) bool { + switch p.As { + case s390x.AMOVD, + s390x.AMOVW, s390x.AMOVWZ, + s390x.AMOVH, s390x.AMOVHZ, + s390x.AMOVB, s390x.AMOVBZ, + s390x.AFMOVD, s390x.AFMOVS: + return true + } + return false +} + +// isLoad returns true if p is a move from memory to a register. +func isLoad(p *obj.Prog) bool { + if !isMove(p) { + return false + } + if !(isGPR(&p.To) || isFPR(&p.To)) { + return false + } + if p.From.Type != obj.TYPE_MEM { + return false + } + return true +} + +// isStore returns true if p is a move from a register to memory. +func isStore(p *obj.Prog) bool { + if !isMove(p) { + return false + } + if !(isGPR(&p.From) || isFPR(&p.From) || isConst(&p.From)) { + return false + } + if p.To.Type != obj.TYPE_MEM { + return false + } + return true +} + +// sameStackMem returns true if a and b are both memory operands +// and address the same location which must reside on the stack. +func sameStackMem(a, b *obj.Addr) bool { + if a.Type != obj.TYPE_MEM || + b.Type != obj.TYPE_MEM || + a.Name != b.Name || + a.Sym != b.Sym || + a.Node != b.Node || + a.Reg != b.Reg || + a.Index != b.Index || + a.Offset != b.Offset { + return false + } + switch a.Name { + case obj.NAME_NONE: + return a.Reg == s390x.REGSP + case obj.NAME_PARAM, obj.NAME_AUTO: + // params and autos are always on the stack + return true + } + return false +} + +// removeLoadHitStores trys to remove loads that take place +// immediately after a store to the same location. Returns +// true if load-hit-stores were removed. +// +// For example: +// MOVD R1, 0(R15) +// MOVD 0(R15), R2 +// Would become: +// MOVD R1, 0(R15) +// MOVD R1, R2 +func removeLoadHitStores(r *gc.Flow) int { + n := 0 + for ; r != nil; r = r.Link { + p := r.Prog + if !isStore(p) { + continue + } + for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) { + pp := rr.Prog + if gc.Uniqp(rr) == nil { + break + } + if pp.As == obj.ANOP { + continue + } + if isLoad(pp) && sameStackMem(&p.To, &pp.From) { + if size(p.As) >= size(pp.As) && isGPR(&p.From) == isGPR(&pp.To) { + pp.From = p.From + } + } + if !isMove(pp) || isStore(pp) { + break + } + if copyau(&p.From, &pp.To) { + break + } + } + } + return n +} + +// size returns the width of the given move. +func size(as obj.As) int { + switch as { + case s390x.AMOVD, s390x.AFMOVD: + return 8 + case s390x.AMOVW, s390x.AMOVWZ, s390x.AFMOVS: + return 4 + case s390x.AMOVH, s390x.AMOVHZ: + return 2 + case s390x.AMOVB, s390x.AMOVBZ: + return 1 + } + return -1 +} + +// castPropagation tries to eliminate unecessary casts. +// +// For example: +// MOVHZ R1, R2 // uint16 +// MOVB R2, 0(R15) // int8 +// Can be simplified to: +// MOVB R1, 0(R15) +func castPropagation(r *gc.Flow) int { + n := 0 + for ; r != nil; r = r.Link { + p := r.Prog + if !isMove(p) || !isGPR(&p.To) { + continue + } + + // r is a move with a destination register + var move *gc.Flow + for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) { + if gc.Uniqp(rr) == nil { + // branch target: leave alone + break + } + pp := rr.Prog + if isMove(pp) && copyas(&pp.From, &p.To) { + if pp.To.Type == obj.TYPE_MEM { + if p.From.Type == obj.TYPE_MEM || + p.From.Type == obj.TYPE_ADDR { + break + } + if p.From.Type == obj.TYPE_CONST && + int64(int16(p.From.Offset)) != p.From.Offset { + break + } + } + move = rr + break + } + if pp.As == obj.ANOP { + continue + } + break + } + if move == nil { + continue + } + + // we have a move that reads from our destination reg, check if any future + // instructions also read from the reg + mp := move.Prog + if !copyas(&mp.From, &mp.To) { + safe := false + for rr := gc.Uniqs(move); rr != nil; rr = gc.Uniqs(rr) { + if gc.Uniqp(rr) == nil { + break + } + switch copyu(rr.Prog, &p.To, nil) { + case _None: + continue + case _Write: + safe = true + } + break + } + if !safe { + continue + } + } + + // at this point we have something like: + // MOV* const/mem/reg, reg + // MOV* reg, reg/mem + // now check if this is a cast that cannot be forward propagated + execute := false + if p.As == mp.As || isZero(&p.From) || size(p.As) == size(mp.As) { + execute = true + } else if isGPR(&p.From) && size(p.As) >= size(mp.As) { + execute = true + } + + if execute { + mp.From = p.From + excise(r) + n++ + } + } + return n +} + +// fuseClear merges memory clear operations. +// +// Looks for this pattern (sequence of clears): +// MOVD R0, n(R15) +// MOVD R0, n+8(R15) +// MOVD R0, n+16(R15) +// Replaces with: +// CLEAR $24, n(R15) +func fuseClear(r *gc.Flow) int { + n := 0 + var align int64 + var clear *obj.Prog + for ; r != nil; r = r.Link { + // If there is a branch into the instruction stream then + // we can't fuse into previous instructions. + if gc.Uniqp(r) == nil { + clear = nil + } + + p := r.Prog + if p.As == obj.ANOP { + continue + } + if p.As == s390x.AXC { + if p.From.Reg == p.To.Reg && p.From.Offset == p.To.Offset { + // TODO(mundaym): merge clears? + p.As = s390x.ACLEAR + p.From.Offset = p.From3.Offset + p.From3 = nil + p.From.Type = obj.TYPE_CONST + p.From.Reg = 0 + clear = p + } else { + clear = nil + } + continue + } + + // Is our source a constant zero? + if !isZero(&p.From) { + clear = nil + continue + } + + // Are we moving to memory? + if p.To.Type != obj.TYPE_MEM || + p.To.Index != 0 || + p.To.Offset >= 4096 || + !(p.To.Name == obj.NAME_NONE || p.To.Name == obj.NAME_AUTO || p.To.Name == obj.NAME_PARAM) { + clear = nil + continue + } + + size := int64(0) + switch p.As { + default: + clear = nil + continue + case s390x.AMOVB, s390x.AMOVBZ: + size = 1 + case s390x.AMOVH, s390x.AMOVHZ: + size = 2 + case s390x.AMOVW, s390x.AMOVWZ: + size = 4 + case s390x.AMOVD: + size = 8 + } + + // doubleword aligned clears should be kept doubleword + // aligned + if (size == 8 && align != 8) || (size != 8 && align == 8) { + clear = nil + } + + if clear != nil && + clear.To.Reg == p.To.Reg && + clear.To.Name == p.To.Name && + clear.To.Node == p.To.Node && + clear.To.Sym == p.To.Sym { + + min := clear.To.Offset + max := clear.To.Offset + clear.From.Offset + + // previous clear is already clearing this region + if min <= p.To.Offset && max >= p.To.Offset+size { + excise(r) + n++ + continue + } + + // merge forwards + if max == p.To.Offset { + clear.From.Offset += size + excise(r) + n++ + continue + } + + // merge backwards + if min-size == p.To.Offset { + clear.From.Offset += size + clear.To.Offset -= size + excise(r) + n++ + continue + } + } + + // transform into clear + p.From.Type = obj.TYPE_CONST + p.From.Offset = size + p.From.Reg = 0 + p.As = s390x.ACLEAR + clear = p + align = size + } + return n +} + +// fuseMultiple merges memory loads and stores into load multiple and +// store multiple operations. +// +// Looks for this pattern (sequence of loads or stores): +// MOVD R1, 0(R15) +// MOVD R2, 8(R15) +// MOVD R3, 16(R15) +// Replaces with: +// STMG R1, R3, 0(R15) +func fuseMultiple(r *gc.Flow) int { + n := 0 + var fused *obj.Prog + for ; r != nil; r = r.Link { + // If there is a branch into the instruction stream then + // we can't fuse into previous instructions. + if gc.Uniqp(r) == nil { + fused = nil + } + + p := r.Prog + + isStore := isGPR(&p.From) && isBDMem(&p.To) + isLoad := isGPR(&p.To) && isBDMem(&p.From) + + // are we a candidate? + size := int64(0) + switch p.As { + default: + fused = nil + continue + case obj.ANOP: + // skip over nops + continue + case s390x.AMOVW, s390x.AMOVWZ: + size = 4 + // TODO(mundaym): 32-bit load multiple is currently not supported + // as it requires sign/zero extension. + if !isStore { + fused = nil + continue + } + case s390x.AMOVD: + size = 8 + if !isLoad && !isStore { + fused = nil + continue + } + } + + // If we merge two loads/stores with different source/destination Nodes + // then we will lose a reference the second Node which means that the + // compiler might mark the Node as unused and free its slot on the stack. + // TODO(mundaym): allow this by adding a dummy reference to the Node. + if fused == nil || + fused.From.Node != p.From.Node || + fused.From.Type != p.From.Type || + fused.To.Node != p.To.Node || + fused.To.Type != p.To.Type { + fused = p + continue + } + + // check two addresses + ca := func(a, b *obj.Addr, offset int64) bool { + return a.Reg == b.Reg && a.Offset+offset == b.Offset && + a.Sym == b.Sym && a.Name == b.Name + } + + switch fused.As { + default: + fused = p + case s390x.AMOVW, s390x.AMOVWZ: + if size == 4 && fused.From.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, 4) { + fused.As = s390x.ASTMY + fused.Reg = p.From.Reg + excise(r) + n++ + } else { + fused = p + } + case s390x.AMOVD: + if size == 8 && fused.From.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, 8) { + fused.As = s390x.ASTMG + fused.Reg = p.From.Reg + excise(r) + n++ + } else if size == 8 && fused.To.Reg+1 == p.To.Reg && ca(&fused.From, &p.From, 8) { + fused.As = s390x.ALMG + fused.Reg = fused.To.Reg + fused.To.Reg = p.To.Reg + excise(r) + n++ + } else { + fused = p + } + case s390x.ASTMG, s390x.ASTMY: + if (fused.As == s390x.ASTMY && size != 4) || + (fused.As == s390x.ASTMG && size != 8) { + fused = p + continue + } + offset := size * int64(fused.Reg-fused.From.Reg+1) + if fused.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, offset) { + fused.Reg = p.From.Reg + excise(r) + n++ + } else { + fused = p + } + case s390x.ALMG: + offset := 8 * int64(fused.To.Reg-fused.Reg+1) + if size == 8 && fused.To.Reg+1 == p.To.Reg && ca(&fused.From, &p.From, offset) { + fused.To.Reg = p.To.Reg + excise(r) + n++ + } else { + fused = p + } + } + } + return n +} + +// simplifyOps looks for side-effect free ops that can be removed or +// replaced with moves. +// +// For example: +// XOR $0, R1 => NOP +// ADD $0, R1, R2 => MOVD R1, R2 +func simplifyOps(r *gc.Flow) int { + n := 0 + for ; r != nil; r = r.Link { + p := r.Prog + + // if the target is R0 then this is a required NOP + if isGPR(&p.To) && p.To.Reg == s390x.REGZERO { + continue + } + + switch p.As { + case s390x.AADD, s390x.ASUB, + s390x.AOR, s390x.AXOR, + s390x.ASLW, s390x.ASRW, s390x.ASRAW, + s390x.ASLD, s390x.ASRD, s390x.ASRAD, + s390x.ARLL, s390x.ARLLG: + if isZero(&p.From) && isGPR(&p.To) { + if p.Reg == 0 || p.Reg == p.To.Reg { + excise(r) + n++ + } else { + p.As = s390x.AMOVD + p.From.Type = obj.TYPE_REG + p.From.Reg = p.Reg + p.Reg = 0 + } + } + case s390x.AMULLW, s390x.AAND: + if isZero(&p.From) && isGPR(&p.To) { + p.As = s390x.AMOVD + p.From.Type = obj.TYPE_REG + p.From.Reg = s390x.REGZERO + p.Reg = 0 + } + } + } + return n +} + +// fuseOpMoves looks for moves following 2-operand operations and trys to merge them into +// a 3-operand operation. +// +// For example: +// ADD R1, R2 +// MOVD R2, R3 +// might become +// ADD R1, R2, R3 +func fuseOpMoves(r *gc.Flow) int { + n := 0 + for ; r != nil; r = r.Link { + p := r.Prog + switch p.As { + case s390x.AADD: + case s390x.ASUB: + if isConst(&p.From) && int64(int16(p.From.Offset)) != p.From.Offset { + continue + } + case s390x.ASLW, + s390x.ASRW, + s390x.ASRAW, + s390x.ASLD, + s390x.ASRD, + s390x.ASRAD, + s390x.ARLL, + s390x.ARLLG: + // ok - p.From will be a reg or a constant + case s390x.AOR, + s390x.AORN, + s390x.AAND, + s390x.AANDN, + s390x.ANAND, + s390x.ANOR, + s390x.AXOR, + s390x.AMULLW, + s390x.AMULLD: + if isConst(&p.From) { + // these instructions can either use 3 register form + // or have an immediate but not both + continue + } + default: + continue + } + + if p.Reg != 0 && p.Reg != p.To.Reg { + continue + } + + var move *gc.Flow + rr := gc.Uniqs(r) + for { + if rr == nil || gc.Uniqp(rr) == nil || rr == r { + break + } + pp := rr.Prog + switch copyu(pp, &p.To, nil) { + case _None: + rr = gc.Uniqs(rr) + continue + case _Read: + if move == nil && pp.As == s390x.AMOVD && isGPR(&pp.From) && isGPR(&pp.To) { + move = rr + rr = gc.Uniqs(rr) + continue + } + case _Write: + if move == nil { + // dead code + excise(r) + n++ + } else { + for prev := gc.Uniqp(move); prev != r; prev = gc.Uniqp(prev) { + if copyu(prev.Prog, &move.Prog.To, nil) != 0 { + move = nil + break + } + } + if move == nil { + break + } + p.Reg, p.To.Reg = p.To.Reg, move.Prog.To.Reg + excise(move) + n++ + + // clean up + if p.From.Reg == p.To.Reg && isCommutative(p.As) { + p.From.Reg, p.Reg = p.Reg, 0 + } + if p.To.Reg == p.Reg { + p.Reg = 0 + } + // we could try again if p has become a 2-operand op + // but in testing nothing extra was extracted + } + } + break + } + } + return n +} + +// isCommutative returns true if the order of input operands +// does not affect the result. For example: +// x + y == y + x so ADD is commutative +// x ^ y == y ^ x so XOR is commutative +func isCommutative(as obj.As) bool { + switch as { + case s390x.AADD, + s390x.AOR, + s390x.AAND, + s390x.AXOR, + s390x.AMULLW, + s390x.AMULLD: + return true + } + return false +} + +// applyCast applies the cast implied by the given move +// instruction to v and returns the result. +func applyCast(cast obj.As, v int64) int64 { + switch cast { + case s390x.AMOVWZ: + return int64(uint32(v)) + case s390x.AMOVHZ: + return int64(uint16(v)) + case s390x.AMOVBZ: + return int64(uint8(v)) + case s390x.AMOVW: + return int64(int32(v)) + case s390x.AMOVH: + return int64(int16(v)) + case s390x.AMOVB: + return int64(int8(v)) + } + return v +} + +// constantPropagation removes redundant constant copies. +func constantPropagation(r *gc.Flow) int { + n := 0 + // find MOV $con,R followed by + // another MOV $con,R without + // setting R in the interim + for ; r != nil; r = r.Link { + p := r.Prog + if isMove(p) { + if !isReg(&p.To) { + continue + } + if !isConst(&p.From) { + continue + } + } else { + continue + } + + rr := r + for { + rr = gc.Uniqs(rr) + if rr == nil || rr == r { + break + } + if gc.Uniqp(rr) == nil { + break + } + + pp := rr.Prog + t := copyu(pp, &p.To, nil) + switch t { + case _None: + continue + case _Read: + if !isGPR(&pp.From) || !isMove(pp) { + continue + } + if p.From.Type == obj.TYPE_CONST { + v := applyCast(p.As, p.From.Offset) + if isGPR(&pp.To) { + if int64(int32(v)) == v || ((v>>32)<<32) == v { + pp.From.Reg = 0 + pp.From.Offset = v + pp.From.Type = obj.TYPE_CONST + n++ + } + } else if int64(int16(v)) == v { + pp.From.Reg = 0 + pp.From.Offset = v + pp.From.Type = obj.TYPE_CONST + n++ + } + } + continue + case _Write: + if p.As != pp.As || p.From.Type != pp.From.Type { + break + } + if p.From.Type == obj.TYPE_CONST && p.From.Offset == pp.From.Offset { + excise(rr) + n++ + continue + } else if p.From.Type == obj.TYPE_FCONST { + if p.From.Val.(float64) == pp.From.Val.(float64) { + excise(rr) + n++ + continue + } + } + } + break + } + } + return n +} + +// copyPropagation tries to eliminate register-to-register moves. +func copyPropagation(r *gc.Flow) int { + n := 0 + for ; r != nil; r = r.Link { + p := r.Prog + if isMove(p) && isReg(&p.To) { + // Convert uses to $0 to uses of R0 and + // propagate R0 + if isGPR(&p.To) && isZero(&p.From) { + p.From.Type = obj.TYPE_REG + p.From.Reg = s390x.REGZERO + } + + // Try to eliminate reg->reg moves + if isGPR(&p.From) || isFPR(&p.From) { + if copyprop(r) || (subprop(r) && copyprop(r)) { + excise(r) + n++ + } + } + } + } + return n +} + +// loadPipelining pushes any load from memory as early as possible. +func loadPipelining(r *gc.Flow) int { + for ; r != nil; r = r.Link { + p := r.Prog + if isLoad(p) { + pushback(r) + } + } + return 0 +} + +// fuseCompareBranch finds comparisons followed by a branch and converts +// them into a compare-and-branch instruction (which avoid setting the +// condition code). +func fuseCompareBranch(r *gc.Flow) int { + n := 0 + for ; r != nil; r = r.Link { + p := r.Prog + r1 := gc.Uniqs(r) + if r1 == nil { + continue + } + p1 := r1.Prog + + var ins obj.As + switch p.As { + case s390x.ACMP: + switch p1.As { + case s390x.ABCL, s390x.ABC: + continue + case s390x.ABEQ: + ins = s390x.ACMPBEQ + case s390x.ABGE: + ins = s390x.ACMPBGE + case s390x.ABGT: + ins = s390x.ACMPBGT + case s390x.ABLE: + ins = s390x.ACMPBLE + case s390x.ABLT: + ins = s390x.ACMPBLT + case s390x.ABNE: + ins = s390x.ACMPBNE + default: + continue + } + + case s390x.ACMPU: + switch p1.As { + case s390x.ABCL, s390x.ABC: + continue + case s390x.ABEQ: + ins = s390x.ACMPUBEQ + case s390x.ABGE: + ins = s390x.ACMPUBGE + case s390x.ABGT: + ins = s390x.ACMPUBGT + case s390x.ABLE: + ins = s390x.ACMPUBLE + case s390x.ABLT: + ins = s390x.ACMPUBLT + case s390x.ABNE: + ins = s390x.ACMPUBNE + default: + continue + } + + case s390x.ACMPW, s390x.ACMPWU: + continue + + default: + continue + } + + if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { + fmt.Printf("cnb %v; %v ", p, p1) + } + + if p1.To.Sym != nil { + continue + } + + if p.To.Type == obj.TYPE_REG { + p1.As = ins + p1.From = p.From + p1.Reg = p.To.Reg + p1.From3 = nil + } else if p.To.Type == obj.TYPE_CONST { + switch p.As { + case s390x.ACMP, s390x.ACMPW: + if (p.To.Offset < -(1 << 7)) || (p.To.Offset >= ((1 << 7) - 1)) { + continue + } + case s390x.ACMPU, s390x.ACMPWU: + if p.To.Offset >= (1 << 8) { + continue + } + default: + } + p1.As = ins + p1.From = p.From + p1.Reg = 0 + p1.From3 = new(obj.Addr) + *(p1.From3) = p.To + } else { + continue + } + + if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { + fmt.Printf("%v\n", p1) + } + excise(r) + n++ + } + return n +} + +// deadCodeElimination removes writes to registers which are written +// to again before they are next read. +func deadCodeElimination(r *gc.Flow) int { + n := 0 + for ; r != nil; r = r.Link { + p := r.Prog + // Currently there are no instructions which write to multiple + // registers in copyu. This check will need to change if there + // ever are. + if !(isGPR(&p.To) || isFPR(&p.To)) || copyu(p, &p.To, nil) != _Write { + continue + } + for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) { + t := copyu(rr.Prog, &p.To, nil) + if t == _None { + continue + } + if t == _Write { + excise(r) + n++ + } + break + } + } + return n +} diff --git a/src/cmd/compile/internal/s390x/prog.go b/src/cmd/compile/internal/s390x/prog.go new file mode 100644 index 0000000000..306adf85c3 --- /dev/null +++ b/src/cmd/compile/internal/s390x/prog.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s390x + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj" + "cmd/internal/obj/s390x" +) + +// This table gives the basic information about instruction +// generated by the compiler and processed in the optimizer. +// See opt.h for bit definitions. +// +// Instructions not generated need not be listed. +// As an exception to that rule, we typically write down all the +// size variants of an operation even if we just use a subset. +var progtable = [s390x.ALAST & obj.AMask]obj.ProgInfo{ + obj.ATYPE & obj.AMask: {Flags: gc.Pseudo | gc.Skip}, + obj.ATEXT & obj.AMask: {Flags: gc.Pseudo}, + obj.AFUNCDATA & obj.AMask: {Flags: gc.Pseudo}, + obj.APCDATA & obj.AMask: {Flags: gc.Pseudo}, + obj.AUNDEF & obj.AMask: {Flags: gc.Break}, + obj.AUSEFIELD & obj.AMask: {Flags: gc.OK}, + obj.ACHECKNIL & obj.AMask: {Flags: gc.LeftRead}, + obj.AVARDEF & obj.AMask: {Flags: gc.Pseudo | gc.RightWrite}, + obj.AVARKILL & obj.AMask: {Flags: gc.Pseudo | gc.RightWrite}, + obj.AVARLIVE & obj.AMask: {Flags: gc.Pseudo | gc.LeftRead}, + + // NOP is an internal no-op that also stands + // for USED and SET annotations. + obj.ANOP & obj.AMask: {Flags: gc.LeftRead | gc.RightWrite}, + + // Integer + s390x.AADD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASUB & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ANEG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AAND & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AXOR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMULLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMULLW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMULHD & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AMULHDU & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ADIVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ADIVDU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASLD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASRD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ASRAD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ARLL & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ARLLG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.ACMP & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead}, + s390x.ACMPU & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead}, + + // Floating point. + s390x.AFADD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AFADDS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AFSUB & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AFSUBS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AFMUL & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AFMULS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AFDIV & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AFDIVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite}, + s390x.AFCMPU & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead}, + s390x.ACEBR & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightRead}, + s390x.ALEDBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ALDEBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.AFSQRT & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite}, + + // Conversions + s390x.ACEFBRA & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACDFBRA & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACEGBRA & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACDGBRA & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACFEBRA & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACFDBRA & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACGEBRA & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACGDBRA & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACELFBR & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACDLFBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACELGBR & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACDLGBR & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACLFEBR & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACLFDBR & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACLGEBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv}, + s390x.ACLGDBR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv}, + + // Moves + s390x.AMOVB & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVBZ & obj.AMask: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVH & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVHZ & obj.AMask: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVW & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVWZ & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AMOVD & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move}, + s390x.AFMOVS & obj.AMask: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv}, + s390x.AFMOVD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move}, + + // Storage operations + s390x.AMVC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr}, + s390x.ACLC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightRead | gc.RightAddr}, + s390x.AXC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr}, + s390x.AOC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr}, + s390x.ANC & obj.AMask: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr}, + + // Jumps + s390x.ABR & obj.AMask: {Flags: gc.Jump | gc.Break}, + s390x.ABL & obj.AMask: {Flags: gc.Call}, + s390x.ABEQ & obj.AMask: {Flags: gc.Cjmp}, + s390x.ABNE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ABGE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ABLT & obj.AMask: {Flags: gc.Cjmp}, + s390x.ABGT & obj.AMask: {Flags: gc.Cjmp}, + s390x.ABLE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPBEQ & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPBNE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPBGE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPBLT & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPBGT & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPBLE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPUBEQ & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPUBNE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPUBGE & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPUBLT & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPUBGT & obj.AMask: {Flags: gc.Cjmp}, + s390x.ACMPUBLE & obj.AMask: {Flags: gc.Cjmp}, + + // Macros + s390x.ACLEAR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightAddr | gc.RightWrite}, + + // Load/store multiple + s390x.ASTMG & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RightAddr | gc.RightWrite}, + s390x.ASTMY & obj.AMask: {Flags: gc.SizeL | gc.LeftRead | gc.RightAddr | gc.RightWrite}, + s390x.ALMG & obj.AMask: {Flags: gc.SizeQ | gc.LeftAddr | gc.LeftRead | gc.RightWrite}, + s390x.ALMY & obj.AMask: {Flags: gc.SizeL | gc.LeftAddr | gc.LeftRead | gc.RightWrite}, + + obj.ARET & obj.AMask: {Flags: gc.Break}, +} + +func proginfo(p *obj.Prog) { + info := &p.Info + *info = progtable[p.As&obj.AMask] + if info.Flags == 0 { + gc.Fatalf("proginfo: unknown instruction %v", p) + } + + if (info.Flags&gc.RegRead != 0) && p.Reg == 0 { + info.Flags &^= gc.RegRead + info.Flags |= gc.RightRead /*CanRegRead |*/ + } + + if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 { + info.Regindex |= RtoB(int(p.From.Reg)) + } + + if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 { + info.Regindex |= RtoB(int(p.To.Reg)) + } + + if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) { + info.Flags &^= gc.LeftRead + info.Flags |= gc.LeftAddr + } + + switch p.As { + // load multiple sets a range of registers + case s390x.ALMG, s390x.ALMY: + for r := p.Reg; r <= p.To.Reg; r++ { + info.Regset |= RtoB(int(r)) + } + // store multiple reads a range of registers + case s390x.ASTMG, s390x.ASTMY: + for r := p.From.Reg; r <= p.Reg; r++ { + info.Reguse |= RtoB(int(r)) + } + } +} diff --git a/src/cmd/compile/internal/s390x/reg.go b/src/cmd/compile/internal/s390x/reg.go new file mode 100644 index 0000000000..4cb8a9da05 --- /dev/null +++ b/src/cmd/compile/internal/s390x/reg.go @@ -0,0 +1,130 @@ +// Derived from Inferno utils/6c/reg.c +// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package s390x + +import "cmd/internal/obj/s390x" +import "cmd/compile/internal/gc" + +const ( + NREGVAR = 32 /* 16 general + 16 floating */ +) + +var regname = []string{ + ".R0", + ".R1", + ".R2", + ".R3", + ".R4", + ".R5", + ".R6", + ".R7", + ".R8", + ".R9", + ".R10", + ".R11", + ".R12", + ".R13", + ".R14", + ".R15", + ".F0", + ".F1", + ".F2", + ".F3", + ".F4", + ".F5", + ".F6", + ".F7", + ".F8", + ".F9", + ".F10", + ".F11", + ".F12", + ".F13", + ".F14", + ".F15", +} + +func regnames(n *int) []string { + *n = NREGVAR + return regname +} + +func excludedregs() uint64 { + // Exclude registers with fixed functions + return RtoB(s390x.REG_R0) | + RtoB(s390x.REGSP) | + RtoB(s390x.REGG) | + RtoB(s390x.REGTMP) | + RtoB(s390x.REGTMP2) | + RtoB(s390x.REG_LR) +} + +func doregbits(r int) uint64 { + return 0 +} + +/* + * track register variables including external registers: + * bit reg + * 0 R0 + * ... ... + * 15 R15 + * 16+0 F0 + * 16+1 F1 + * ... ... + * 16+15 F15 + */ +func RtoB(r int) uint64 { + if r >= s390x.REG_R0 && r <= s390x.REG_R15 { + return 1 << uint(r-s390x.REG_R0) + } + if r >= s390x.REG_F0 && r <= s390x.REG_F15 { + return 1 << uint(16+r-s390x.REG_F0) + } + return 0 +} + +func BtoR(b uint64) int { + b &= 0xffff + if b == 0 { + return 0 + } + return gc.Bitno(b) + s390x.REG_R0 +} + +func BtoF(b uint64) int { + b >>= 16 + b &= 0xffff + if b == 0 { + return 0 + } + return gc.Bitno(b) + s390x.REG_F0 +} diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go index 49c2fb6263..8b8161efd1 100644 --- a/src/cmd/compile/main.go +++ b/src/cmd/compile/main.go @@ -10,6 +10,7 @@ import ( "cmd/compile/internal/arm64" "cmd/compile/internal/mips64" "cmd/compile/internal/ppc64" + "cmd/compile/internal/s390x" "cmd/compile/internal/x86" "cmd/internal/obj" "fmt" @@ -38,5 +39,7 @@ func main() { mips64.Main() case "ppc64", "ppc64le": ppc64.Main() + case "s390x": + s390x.Main() } } diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 777c92c726..a535316ca0 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -38,6 +38,7 @@ var bootstrapDirs = []string{ "compile/internal/ppc64", "compile/internal/ssa", "compile/internal/x86", + "compile/internal/s390x", "internal/bio", "internal/gcprog", "internal/obj", -- cgit v1.3 From 9743e4b0311c37ebacc2c9063a1cd778510eae09 Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Mon, 11 Apr 2016 21:51:29 +0200 Subject: cmd/compile: share dominator tree among many passes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These passes do not modify the dominator tree too much. % benchstat old.txt new.txt name old time/op new time/op delta Template 335ms ± 3% 325ms ± 8% ~ (p=0.074 n=8+9) GoTypes 1.05s ± 1% 1.05s ± 3% ~ (p=0.095 n=9+10) Compiler 5.37s ± 4% 5.29s ± 1% -1.42% (p=0.022 n=9+10) MakeBash 34.9s ± 3% 34.4s ± 2% ~ (p=0.095 n=9+10) name old alloc/op new alloc/op delta Template 55.4MB ± 0% 54.9MB ± 0% -0.81% (p=0.000 n=10+10) GoTypes 179MB ± 0% 178MB ± 0% -0.89% (p=0.000 n=10+10) Compiler 807MB ± 0% 798MB ± 0% -1.10% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Template 498k ± 0% 496k ± 0% -0.29% (p=0.000 n=9+9) GoTypes 1.42M ± 0% 1.41M ± 0% -0.24% (p=0.000 n=10+10) Compiler 5.61M ± 0% 5.60M ± 0% -0.12% (p=0.000 n=10+10) Change-Id: I4cd20cfba3f132ebf371e16046ab14d7e42799ec Reviewed-on: https://go-review.googlesource.com/21806 Run-TryBot: Alexandru Moșoi TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/compile.go | 5 +++++ src/cmd/compile/internal/ssa/dom.go | 6 ++++++ src/cmd/compile/internal/ssa/func.go | 3 +++ src/cmd/compile/internal/ssa/loopbce.go | 18 ++++++++---------- src/cmd/compile/internal/ssa/nilcheck.go | 2 +- src/cmd/compile/internal/ssa/nilcheck_test.go | 10 ++++++++++ src/cmd/compile/internal/ssa/prove.go | 9 +++------ 7 files changed, 36 insertions(+), 17 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index b4215f119e..f4f0d8cab2 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -234,6 +234,7 @@ var passes = [...]pass{ {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values {name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt {name: "generic cse", fn: cse}, + {name: "generic domtree", fn: domTree}, {name: "phiopt", fn: phiopt}, {name: "nilcheckelim", fn: nilcheckelim}, {name: "prove", fn: prove}, @@ -288,6 +289,10 @@ var passOrder = [...]constraint{ {"opt", "nilcheckelim"}, // tighten should happen before lowering to avoid splitting naturally paired instructions such as CMP/SET {"tighten", "lower"}, + // nilcheckelim, prove and loopbce share idom. + {"generic domtree", "nilcheckelim"}, + {"generic domtree", "prove"}, + {"generic domtree", "loopbce"}, // tighten will be most effective when as many values have been removed as possible {"generic deadcode", "tighten"}, {"generic cse", "tighten"}, diff --git a/src/cmd/compile/internal/ssa/dom.go b/src/cmd/compile/internal/ssa/dom.go index 0fffcdc2af..fedaf602e4 100644 --- a/src/cmd/compile/internal/ssa/dom.go +++ b/src/cmd/compile/internal/ssa/dom.go @@ -364,3 +364,9 @@ func intersect(b, c *Block, postnum []int, idom []*Block) *Block { } return b } + +// build immediate dominators. +func domTree(f *Func) { + f.idom = dominators(f) + f.sdom = newSparseTree(f, f.idom) +} diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 8dd75f6093..da44f26106 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -36,6 +36,9 @@ type Func struct { freeValues *Value // free Values linked by argstorage[0]. All other fields except ID are 0/nil. freeBlocks *Block // free Blocks linked by succstorage[0]. All other fields except ID are 0/nil. + idom []*Block // precomputed immediate dominators + sdom sparseTree // precomputed dominator tree + constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type } diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go index c937ead1b2..9bd2d3f0de 100644 --- a/src/cmd/compile/internal/ssa/loopbce.go +++ b/src/cmd/compile/internal/ssa/loopbce.go @@ -31,7 +31,7 @@ type indVar struct { // // // TODO: handle 32 bit operations -func findIndVar(f *Func, sdom sparseTree) []indVar { +func findIndVar(f *Func) []indVar { var iv []indVar nextb: @@ -110,7 +110,7 @@ nextb: // Second condition: b.Succs[entry] dominates nxt so that // nxt is computed when inc < max, meaning nxt <= max. - if !sdom.isAncestorEq(b.Succs[entry], nxt.Block) { + if !f.sdom.isAncestorEq(b.Succs[entry], nxt.Block) { // inc+ind can only be reached through the branch that enters the loop. continue } @@ -160,20 +160,18 @@ nextb: // loopbce performs loop based bounds check elimination. func loopbce(f *Func) { - idom := dominators(f) - sdom := newSparseTree(f, idom) - ivList := findIndVar(f, sdom) + ivList := findIndVar(f) m := make(map[*Value]indVar) for _, iv := range ivList { m[iv.ind] = iv } - removeBoundsChecks(f, sdom, m) + removeBoundsChecks(f, m) } // removesBoundsChecks remove IsInBounds and IsSliceInBounds based on the induction variables. -func removeBoundsChecks(f *Func, sdom sparseTree, m map[*Value]indVar) { +func removeBoundsChecks(f *Func, m map[*Value]indVar) { for _, b := range f.Blocks { if b.Kind != BlockIf { continue @@ -202,7 +200,7 @@ func removeBoundsChecks(f *Func, sdom sparseTree, m map[*Value]indVar) { goto skip1 } - if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isNonNegative(iv.min) { + if iv, has := m[ind]; has && f.sdom.isAncestorEq(iv.entry, b) && isNonNegative(iv.min) { if v.Args[1] == iv.max { if f.pass.debug > 0 { f.Config.Warnl(b.Line, "Found redundant %s", v.Op) @@ -229,7 +227,7 @@ func removeBoundsChecks(f *Func, sdom sparseTree, m map[*Value]indVar) { goto skip2 } - if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isNonNegative(iv.min) { + if iv, has := m[ind]; has && f.sdom.isAncestorEq(iv.entry, b) && isNonNegative(iv.min) { if v.Args[1].Op == OpSliceCap && iv.max.Op == OpSliceLen && v.Args[1].Args[0] == iv.max.Args[0] { if f.pass.debug > 0 { f.Config.Warnl(b.Line, "Found redundant %s (len promoted to cap)", v.Op) @@ -250,7 +248,7 @@ func removeBoundsChecks(f *Func, sdom sparseTree, m map[*Value]indVar) { } // ind + add >= 0 <-> min + add >= 0 <-> min >= -add - if iv, has := m[ind]; has && sdom.isAncestorEq(iv.entry, b) && isGreaterOrEqualThan(iv.min, -add) { + if iv, has := m[ind]; has && f.sdom.isAncestorEq(iv.entry, b) && isGreaterOrEqualThan(iv.min, -add) { if !v.Args[1].isGenericIntConst() || !iv.max.isGenericIntConst() { goto skip3 } diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index 881e3b2eff..753e48aad5 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -11,7 +11,7 @@ func nilcheckelim(f *Func) { // A nil check is redundant if the same nil check was successful in a // dominating block. The efficacy of this pass depends heavily on the // efficacy of the cse pass. - idom := dominators(f) + idom := f.idom domTree := make([][]*Block, f.NumBlocks()) // Create a block ID -> [dominees] mapping diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go index d1f38b6951..c1c8f94767 100644 --- a/src/cmd/compile/internal/ssa/nilcheck_test.go +++ b/src/cmd/compile/internal/ssa/nilcheck_test.go @@ -49,6 +49,7 @@ func benchmarkNilCheckDeep(b *testing.B, depth int) { b.ReportAllocs() for i := 0; i < b.N; i++ { + domTree(fun.f) nilcheckelim(fun.f) } } @@ -83,6 +84,7 @@ func TestNilcheckSimple(t *testing.T) { Exit("mem"))) CheckFunc(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check @@ -120,6 +122,7 @@ func TestNilcheckDomOrder(t *testing.T) { Goto("exit"))) CheckFunc(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check @@ -153,6 +156,7 @@ func TestNilcheckAddr(t *testing.T) { Exit("mem"))) CheckFunc(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check @@ -187,6 +191,7 @@ func TestNilcheckAddPtr(t *testing.T) { Exit("mem"))) CheckFunc(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check @@ -231,6 +236,7 @@ func TestNilcheckPhi(t *testing.T) { Exit("mem"))) CheckFunc(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check @@ -272,6 +278,7 @@ func TestNilcheckKeepRemove(t *testing.T) { Exit("mem"))) CheckFunc(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check @@ -319,6 +326,7 @@ func TestNilcheckInFalseBranch(t *testing.T) { Exit("mem"))) CheckFunc(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check @@ -370,6 +378,7 @@ func TestNilcheckUser(t *testing.T) { CheckFunc(fun.f) // we need the opt here to rewrite the user nilcheck opt(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check @@ -414,6 +423,7 @@ func TestNilcheckBug(t *testing.T) { CheckFunc(fun.f) // we need the opt here to rewrite the user nilcheck opt(fun.f) + domTree(fun.f) nilcheckelim(fun.f) // clean up the removed nil check diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index a12a996263..f4a10b508a 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -445,9 +445,6 @@ var ( // else branch of the first comparison is executed, we already know that i < len(a). // The code for the second panic can be removed. func prove(f *Func) { - idom := dominators(f) - sdom := newSparseTree(f, idom) - // current node state type walkState int const ( @@ -471,8 +468,8 @@ func prove(f *Func) { for len(work) > 0 { node := work[len(work)-1] work = work[:len(work)-1] - parent := idom[node.block.ID] - branch := getBranch(sdom, parent, node.block) + parent := f.idom[node.block.ID] + branch := getBranch(f.sdom, parent, node.block) switch node.state { case descend: @@ -491,7 +488,7 @@ func prove(f *Func) { block: node.block, state: simplify, }) - for s := sdom.Child(node.block); s != nil; s = sdom.Sibling(s) { + for s := f.sdom.Child(node.block); s != nil; s = f.sdom.Sibling(s) { work = append(work, bp{ block: s, state: descend, -- cgit v1.3 From cd85f711c0b6847cbfe4e05f4402df075ea936de Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 11 Apr 2016 21:23:11 -0700 Subject: cmd/compile: add x.Uses==1 test to load combiners We need to make sure that when we combine loads, we only do so if there are no other uses of the load. We can't split one load into two because that can then lead to inconsistent loaded values in the presence of races. Add some aggressive copy removal code so that phantom "dead copy" uses of values are cleaned up promptly. This lets us use x.Uses==1 conditions reliably. Change-Id: I9037311db85665f3868dbeb3adb3de5c20728b38 Reviewed-on: https://go-review.googlesource.com/21853 Reviewed-by: Todd Neal --- src/cmd/compile/internal/gc/ssa_test.go | 2 + src/cmd/compile/internal/gc/testdata/dupLoad.go | 46 +++++++++++++++++ .../compile/internal/gc/testdata/namedReturn.go | 4 ++ src/cmd/compile/internal/ssa/gen/AMD64.rules | 12 ++--- src/cmd/compile/internal/ssa/nilcheck_test.go | 2 +- src/cmd/compile/internal/ssa/rewrite.go | 60 +++++++++++++++++++++- src/cmd/compile/internal/ssa/rewriteAMD64.go | 24 ++++----- 7 files changed, 129 insertions(+), 21 deletions(-) create mode 100644 src/cmd/compile/internal/gc/testdata/dupLoad.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa_test.go b/src/cmd/compile/internal/gc/ssa_test.go index 0fb0f17778..46e1b0a7d3 100644 --- a/src/cmd/compile/internal/gc/ssa_test.go +++ b/src/cmd/compile/internal/gc/ssa_test.go @@ -101,3 +101,5 @@ func TestPhi(t *testing.T) { runTest(t, "phi_ssa.go") } func TestSlice(t *testing.T) { runTest(t, "slice.go") } func TestNamedReturn(t *testing.T) { runTest(t, "namedReturn.go") } + +func TestDuplicateLoad(t *testing.T) { runTest(t, "dupLoad.go") } diff --git a/src/cmd/compile/internal/gc/testdata/dupLoad.go b/src/cmd/compile/internal/gc/testdata/dupLoad.go new file mode 100644 index 0000000000..d12c26355a --- /dev/null +++ b/src/cmd/compile/internal/gc/testdata/dupLoad.go @@ -0,0 +1,46 @@ +// run + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test makes sure that we don't split a single +// load up into two separate loads. + +package main + +import "fmt" + +//go:noinline +func read(b []byte) (uint16, uint16) { + // There is only a single read of b[0]. The two + // returned values must have the same low byte. + v := b[0] + return uint16(v), uint16(v) | uint16(b[1])<<8 +} + +const N = 100000 + +func main() { + done := make(chan struct{}) + b := make([]byte, 2) + go func() { + for i := 0; i < N; i++ { + b[0] = byte(i) + b[1] = byte(i) + } + done <- struct{}{} + }() + go func() { + for i := 0; i < N; i++ { + x, y := read(b) + if byte(x) != byte(y) { + fmt.Printf("x=%x y=%x\n", x, y) + panic("bad") + } + } + done <- struct{}{} + }() + <-done + <-done +} diff --git a/src/cmd/compile/internal/gc/testdata/namedReturn.go b/src/cmd/compile/internal/gc/testdata/namedReturn.go index dafb5d719f..19ef8a7e43 100644 --- a/src/cmd/compile/internal/gc/testdata/namedReturn.go +++ b/src/cmd/compile/internal/gc/testdata/namedReturn.go @@ -1,5 +1,9 @@ // run +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + // This test makes sure that naming named // return variables in a return statement works. // See issue #14904. diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index dcd5e6a5e1..21c74a9c1c 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1369,13 +1369,13 @@ // There are many ways these combinations could occur. This is // designed to match the way encoding/binary.LittleEndian does it. (ORW x0:(MOVBload [i] {s} p mem) - (SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) && mergePoint(b,x0,x1) != nil -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) + (SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && mergePoint(b,x0,x1) != nil -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) (ORL (ORL (ORL x0:(MOVBload [i] {s} p mem) (SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) (SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) - (SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) && mergePoint(b,x0,x1,x2,x3) != nil -> @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) + (SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil -> @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ x0:(MOVBload [i] {s} p mem) @@ -1385,16 +1385,16 @@ (SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) (SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) (SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) - (SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) + (SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) - (SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) && mergePoint(b,x0,x1) != nil -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) + (SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && mergePoint(b,x0,x1) != nil -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) (ORL (ORL (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) (SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) - (SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) && mergePoint(b,x0,x1,x2,x3) != nil -> @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) + (SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil -> @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) @@ -1404,4 +1404,4 @@ (SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) (SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) (SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) - (SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 [i] {s} p idx mem) + (SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 [i] {s} p idx mem) diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go index c1c8f94767..af6cbe864a 100644 --- a/src/cmd/compile/internal/ssa/nilcheck_test.go +++ b/src/cmd/compile/internal/ssa/nilcheck_test.go @@ -418,7 +418,7 @@ func TestNilcheckBug(t *testing.T) { Goto("exit")), Bloc("exit", Valu("phi", OpPhi, TypeMem, 0, nil, "mem", "store"), - Exit("mem"))) + Exit("phi"))) CheckFunc(fun.f) // we need the opt here to rewrite the user nilcheck diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index e0cb7f517b..c2f8ceadaf 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -40,9 +40,44 @@ func applyRewrite(f *Func, rb func(*Block) bool, rv func(*Value, *Config) bool) } curb = nil for _, v := range b.Values { - change = copyelimValue(v) || change change = phielimValue(v) || change + // Eliminate copy inputs. + // If any copy input becomes unused, mark it + // as invalid and discard its argument. Repeat + // recursively on the discarded argument. + // This phase helps remove phantom "dead copy" uses + // of a value so that a x.Uses==1 rule condition + // fires reliably. + for i, a := range v.Args { + if a.Op != OpCopy { + continue + } + x := a.Args[0] + // Rewriting can generate OpCopy loops. + // They are harmless (see removePredecessor), + // but take care to stop if we find a cycle. + slow := x // advances every other iteration + var advance bool + for x.Op == OpCopy { + x = x.Args[0] + if slow == x { + break + } + if advance { + slow = slow.Args[0] + } + advance = !advance + } + v.SetArg(i, x) + change = true + for a.Uses == 0 { + b := a.Args[0] + a.reset(OpInvalid) + a = b + } + } + // apply rewrite function curv = v if rv(v, config) { @@ -52,7 +87,28 @@ func applyRewrite(f *Func, rb func(*Block) bool, rv func(*Value, *Config) bool) } } if !change { - return + break + } + } + // remove clobbered copies + for _, b := range f.Blocks { + j := 0 + for i, v := range b.Values { + if v.Op == OpInvalid { + f.freeValue(v) + continue + } + if i != j { + b.Values[j] = v + } + j++ + } + if j != len(b.Values) { + tail := b.Values[j:] + for j := range tail { + tail[j] = nil + } + b.Values = b.Values[:j] } } } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index a6600513fa..d1793ad8c0 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -12666,7 +12666,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { return true } // match: (ORL (ORL (ORL x0:(MOVBload [i] {s} p mem) (SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) (SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) (SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) - // cond: mergePoint(b,x0,x1,x2,x3) != nil + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) for { v_0 := v.Args[0] @@ -12754,7 +12754,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x3.Args[1] { break } - if !(mergePoint(b, x0, x1, x2, x3) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil) { break } b = mergePoint(b, x0, x1, x2, x3) @@ -12768,7 +12768,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { return true } // match: (ORL (ORL (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) (SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) (SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) - // cond: mergePoint(b,x0,x1,x2,x3) != nil + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) for { v_0 := v.Args[0] @@ -12866,7 +12866,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x3.Args[2] { break } - if !(mergePoint(b, x0, x1, x2, x3) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil) { break } b = mergePoint(b, x0, x1, x2, x3) @@ -12980,7 +12980,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { return true } // match: (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ x0:(MOVBload [i] {s} p mem) (SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) (SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) (SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) (SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) (SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) (SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) (SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) - // cond: mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) for { v_0 := v.Args[0] @@ -13176,7 +13176,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x7.Args[1] { break } - if !(mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil) { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) @@ -13190,7 +13190,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { return true } // match: (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) (SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) (SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) (SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) (SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) (SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) (SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) - // cond: mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 [i] {s} p idx mem) for { v_0 := v.Args[0] @@ -13408,7 +13408,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x7.Args[2] { break } - if !(mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil) { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) @@ -13514,7 +13514,7 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { return true } // match: (ORW x0:(MOVBload [i] {s} p mem) (SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) - // cond: mergePoint(b,x0,x1) != nil + // cond: x0.Uses == 1 && x1.Uses == 1 && mergePoint(b,x0,x1) != nil // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) for { x0 := v.Args[0] @@ -13548,7 +13548,7 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { if mem != x1.Args[1] { break } - if !(mergePoint(b, x0, x1) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && mergePoint(b, x0, x1) != nil) { break } b = mergePoint(b, x0, x1) @@ -13562,7 +13562,7 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { return true } // match: (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) - // cond: mergePoint(b,x0,x1) != nil + // cond: x0.Uses == 1 && x1.Uses == 1 && mergePoint(b,x0,x1) != nil // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) for { x0 := v.Args[0] @@ -13600,7 +13600,7 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { if mem != x1.Args[2] { break } - if !(mergePoint(b, x0, x1) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && mergePoint(b, x0, x1) != nil) { break } b = mergePoint(b, x0, x1) -- cgit v1.3 From 7f5a063d157c777d8e78a567fc9538929bfd38f5 Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Tue, 12 Apr 2016 10:27:16 -0400 Subject: cmd/compile/internal/gc: minor Cgen_checknil cleanup Most architectures can only generate nil checks when the the address to check is in a register. Currently only amd64 and 386 can generate checks for addresses that reside in memory. This is unlikely to change so the architecture check has been inverted. Change-Id: I73697488a183406c79a9039c62823712b510bb6a Reviewed-on: https://go-review.googlesource.com/21861 Reviewed-by: Brad Fitzpatrick Run-TryBot: Brad Fitzpatrick TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/pgen.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index bfb65ade38..f6e9ab3b06 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -324,7 +324,12 @@ func Cgen_checknil(n *Node) { Fatalf("bad checknil") } - if (Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL { + // Most architectures require that the address to be checked is + // in a register (it could be in memory). + needsReg := !Thearch.LinkArch.InFamily(sys.AMD64, sys.I386) + + // Move the address to be checked into a register if necessary. + if (needsReg && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL { var reg Node Regalloc(®, Types[Tptr], n) Cgen(n, ®) -- cgit v1.3 From 811ebb6ac961162b815f4fd50976df81ba4c47b0 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Tue, 12 Apr 2016 09:22:26 -0700 Subject: cmd/compile: temporarily disable inplace append special case Fixes #15246 Re-opens #14969 Change-Id: Ic0b41c5aa42bbb229a0d62b7f3e5888c6b29293d Reviewed-on: https://go-review.googlesource.com/21891 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index beb68b0385..fdd14953e6 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -699,7 +699,8 @@ func (s *state) stmt(n *Node) { // If the slice can be SSA'd, it'll be on the stack, // so there will be no write barriers, // so there's no need to attempt to prevent them. - if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) { + const doInPlaceAppend = false // issue 15246 + if doInPlaceAppend && samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) { s.append(rhs, true) return } -- cgit v1.3 From 613ba6cda845fef442995d705027a622984c6b3a Mon Sep 17 00:00:00 2001 From: Michael Munday Date: Tue, 12 Apr 2016 12:26:17 -0400 Subject: cmd/compile/internal/gc: add s390x support Allows instructions with a From3 field to be used in regopt so long as From3 represents a constant. This is needed because the storage-to-storage instructions on s390x place the length of the data into From3. Change-Id: I12cd32d4f997baf2fe97937bb7d45bbf716dfcb5 Reviewed-on: https://go-review.googlesource.com/20875 Reviewed-by: Matthew Dempsky Run-TryBot: Matthew Dempsky --- src/cmd/compile/internal/gc/cgen.go | 4 ++-- src/cmd/compile/internal/gc/gsubr.go | 8 +++++--- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/reg.go | 2 +- src/cmd/compile/internal/gc/walk.go | 7 ++++++- 5 files changed, 15 insertions(+), 8 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index eacbc30f87..9de2a19f68 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -247,7 +247,7 @@ func cgen_wb(n, res *Node, wb bool) { return } - if Ctxt.Arch.InFamily(sys.AMD64, sys.I386) && n.Addable { + if Ctxt.Arch.InFamily(sys.AMD64, sys.I386, sys.S390X) && n.Addable { Thearch.Gmove(n, res) return } @@ -1829,7 +1829,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { // Some architectures might need a temporary or other help here, // but they don't support direct generation of a bool value yet. // We can fix that as we go. - mayNeedTemp := Ctxt.Arch.InFamily(sys.ARM, sys.ARM64, sys.MIPS64, sys.PPC64) + mayNeedTemp := Ctxt.Arch.InFamily(sys.ARM, sys.ARM64, sys.MIPS64, sys.PPC64, sys.S390X) if genval { if mayNeedTemp { diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 63a8e969c3..f1316db8d8 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -58,7 +58,9 @@ func Ismem(n *Node) bool { return true case OADDR: - return Thearch.LinkArch.InFamily(sys.AMD64, sys.PPC64) // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too + // amd64 and s390x use PC relative addressing. + // TODO(rsc): not sure why ppc64 needs this too. + return Thearch.LinkArch.InFamily(sys.AMD64, sys.PPC64, sys.S390X) } return false @@ -84,7 +86,7 @@ func Gbranch(as obj.As, t *Type, likely int) *obj.Prog { p := Prog(as) p.To.Type = obj.TYPE_BRANCH p.To.Val = nil - if as != obj.AJMP && likely != 0 && Thearch.LinkArch.Family != sys.PPC64 && Thearch.LinkArch.Family != sys.ARM64 && Thearch.LinkArch.Family != sys.MIPS64 { + if as != obj.AJMP && likely != 0 && !Thearch.LinkArch.InFamily(sys.PPC64, sys.ARM64, sys.MIPS64, sys.S390X) { p.From.Type = obj.TYPE_CONST if likely > 0 { p.From.Offset = 1 @@ -458,7 +460,7 @@ func Naddr(a *obj.Addr, n *Node) { case OADDR: Naddr(a, n.Left) a.Etype = uint8(Tptr) - if !Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) { // TODO(rsc): Do this even for arm, ppc64. + if !Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { // TODO(rsc): Do this even for these architectures. a.Width = int64(Widthptr) } if a.Type != obj.TYPE_MEM { diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index f6e9ab3b06..baa960bf75 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -287,7 +287,7 @@ func allocauto(ptxt *obj.Prog) { if haspointers(n.Type) { stkptrsize = Stksize } - if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64) { + if Thearch.LinkArch.InFamily(sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { Stksize = Rnd(Stksize, int64(Widthptr)) } if Stksize >= 1<<31 { diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go index 8705d6dfa4..138ad683c5 100644 --- a/src/cmd/compile/internal/gc/reg.go +++ b/src/cmd/compile/internal/gc/reg.go @@ -1115,7 +1115,7 @@ func regopt(firstp *obj.Prog) { // Currently we never generate three register forms. // If we do, this will need to change. - if p.From3Type() != obj.TYPE_NONE { + if p.From3Type() != obj.TYPE_NONE && p.From3Type() != obj.TYPE_CONST { Fatalf("regopt not implemented for from3") } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 586a8e9c4f..3e5f5161db 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -673,7 +673,7 @@ opswitch: walkexprlist(n.List.Slice(), init) if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" && n.Left.Sym.Pkg.Path == "math" { - if Thearch.LinkArch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.PPC64) { + if Thearch.LinkArch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) { n.Op = OSQRT n.Left = n.List.First() n.List.Set(nil) @@ -3294,6 +3294,11 @@ func walkrotate(n *Node) *Node { // Constants adding to width? w := int(l.Type.Width * 8) + if Thearch.LinkArch.Family == sys.S390X && w != 32 && w != 64 { + // only supports 32-bit and 64-bit rotates + return n + } + if Smallintconst(l.Right) && Smallintconst(r.Right) { sl := int(l.Right.Int64()) if sl >= 0 { -- cgit v1.3 From b1851a3c11a179d4eb55f9d0dd25ef81668a9f81 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Tue, 12 Apr 2016 11:31:16 -0700 Subject: cmd/compile: move compiler-specific flags into compiler-spec. export data section Also: Adjust go/importer accordingly. Change-Id: Ia6669563793e218946af45b9fba1cf986a21c031 Reviewed-on: https://go-review.googlesource.com/21896 Reviewed-by: Alan Donovan --- src/cmd/compile/internal/gc/bexport.go | 18 +++++++----------- src/cmd/compile/internal/gc/bimport.go | 12 ++++++------ src/go/internal/gcimporter/bimport.go | 5 +---- 3 files changed, 14 insertions(+), 21 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 15e5e3ada6..cb438d7573 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -182,22 +182,12 @@ func export(out *bufio.Writer, trace bool) int { Fatalf("exporter: local package path not empty: %q", localpkg.Path) } p.pkg(localpkg) - - // write compiler-specific flags - // TODO(gri) move this into the compiler-specific export data section - { - var flags string - if safemode != 0 { - flags = "safe" - } - p.string(flags) - } if p.trace { p.tracef("\n") } // export objects - + // // First, export all exported (package-level) objects; i.e., all objects // in the current exportlist. These objects represent all information // required to import this package and type-check against it; i.e., this @@ -270,6 +260,12 @@ func export(out *bufio.Writer, trace bool) int { } } + // write compiler-specific flags + p.bool(safemode != 0) + if p.trace { + p.tracef("\n") + } + // Phase 2: Export objects added to exportlist during phase 1. // Don't use range since exportlist may grow during this phase // and we want to export all remaining objects. diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 7ad4d9dbb0..9cebafcaef 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -62,9 +62,6 @@ func Import(in *bufio.Reader) { Fatalf("importer: imported package not found in pkgList[0]") } - // read compiler-specific flags - importpkg.Safe = p.string() == "safe" - // defer some type-checking until all types are read in completely // (parser.go:import_package) tcok := typecheckok @@ -73,7 +70,7 @@ func Import(in *bufio.Reader) { // read objects - // Phase 1 + // phase 1 objcount := 0 for { tag := p.tagOrIndex() @@ -91,7 +88,10 @@ func Import(in *bufio.Reader) { // --- compiler-specific export data --- - // Phase 2 + // read compiler-specific flags + importpkg.Safe = p.bool() + + // phase 2 objcount = 0 for { tag := p.tagOrIndex() @@ -264,7 +264,7 @@ func (p *importer) obj(tag int) { } default: - Fatalf("importer: unexpected object tag") + Fatalf("importer: unexpected object (tag = %d)", tag) } } diff --git a/src/go/internal/gcimporter/bimport.go b/src/go/internal/gcimporter/bimport.go index aa9569de52..a9d678b021 100644 --- a/src/go/internal/gcimporter/bimport.go +++ b/src/go/internal/gcimporter/bimport.go @@ -78,9 +78,6 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i panic("imported packaged not found in pkgList[0]") } - // read compiler-specific flags - p.string() // discard - // read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go) objcount := 0 for { @@ -193,7 +190,7 @@ func (p *importer) obj(tag int) { p.declare(types.NewFunc(token.NoPos, pkg, name, sig)) default: - panic("unexpected object tag") + panic(fmt.Sprintf("unexpected object tag %d", tag)) } } -- cgit v1.3 From f028b9f9e2433662502283850d06e9e07e72a6bb Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Sun, 27 Mar 2016 10:21:48 -0400 Subject: cmd/link, etc: store typelinks as offsets This is the first in a series of CLs to replace the use of pointers in binary read-only data with offsets. In standard Go binaries these CLs have a small effect, shrinking 8-byte pointers to 4-bytes. In position-independent code, it also saves the dynamic relocation for the pointer. This has a significant effect on the binary size when building as PIE, c-archive, or c-shared. darwin/amd64: cmd/go: -12KB (0.1%) jujud: -82KB (0.1%) linux/amd64 PIE: cmd/go: -86KB (0.7%) jujud: -569KB (0.7%) For #6853. Change-Id: Iad5625bbeba58dabfd4d334dbee3fcbfe04b2dcf Reviewed-on: https://go-review.googlesource.com/21284 Reviewed-by: Ian Lance Taylor Run-TryBot: David Crawshaw TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/go.go | 2 -- src/cmd/compile/internal/gc/main.go | 4 --- src/cmd/compile/internal/gc/obj.go | 6 +++++ src/cmd/compile/internal/gc/reflect.go | 17 +++++------- src/cmd/internal/obj/data.go | 21 ++++++++++++++- src/cmd/internal/obj/link.go | 3 +++ src/cmd/link/internal/ld/data.go | 18 +++++++++++++ src/cmd/link/internal/ld/symtab.go | 4 +++ src/reflect/export_test.go | 8 +++--- src/reflect/type.go | 47 ++++++++++++++++++++++------------ src/runtime/runtime1.go | 8 +++--- src/runtime/symtab.go | 3 ++- 12 files changed, 99 insertions(+), 42 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index d9b28ff8e6..5df49b56d6 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -171,8 +171,6 @@ var msanpkg *Pkg // package runtime/msan var typepkg *Pkg // fake package for runtime type info (headers) -var typelinkpkg *Pkg // fake package for runtime type info (data) - var unsafepkg *Pkg // package unsafe var trackpkg *Pkg // fake package for field tracking diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 26acf8861f..45a510d577 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -126,10 +126,6 @@ func Main() { itabpkg.Name = "go.itab" itabpkg.Prefix = "go.itab" // not go%2eitab - typelinkpkg = mkpkg("go.typelink") - typelinkpkg.Name = "go.typelink" - typelinkpkg.Prefix = "go.typelink" // not go%2etypelink - itablinkpkg = mkpkg("go.itablink") itablinkpkg.Name = "go.itablink" itablinkpkg.Prefix = "go.itablink" // not go%2eitablink diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 23c8be645c..eed0ed6e24 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -321,6 +321,12 @@ func dsymptrLSym(s *obj.LSym, off int, x *obj.LSym, xoff int) int { return off } +func dsymptrOffLSym(s *obj.LSym, off int, x *obj.LSym, xoff int) int { + s.WriteOff(Ctxt, int64(off), x, int64(xoff)) + off += 4 + return off +} + func gdata(nam *Node, nr *Node, wid int) { if nam.Op != ONAME { Fatalf("gdata nam op %v", opnames[nam.Op]) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index df9ef27b7a..ea67634260 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -879,7 +879,7 @@ func tracksym(t *Type, f *Field) *Sym { return Pkglookup(Tconv(t, FmtLeft)+"."+f.Sym.Name, trackpkg) } -func typelinksym(t *Type) *Sym { +func typelinkLSym(t *Type) *obj.LSym { // %-uT is what the generated Type's string field says. // It uses (ambiguous) package names instead of import paths. // %-T is the complete, unambiguous type name. @@ -889,13 +889,8 @@ func typelinksym(t *Type) *Sym { // ensure the types appear sorted by their string field. The // names are a little long but they are discarded by the linker // and do not end up in the symbol table of the final binary. - p := Tconv(t, FmtLeft|FmtUnsigned) + "\t" + Tconv(t, FmtLeft) - - s := Pkglookup(p, typelinkpkg) - - //print("typelinksym: %s -> %+S\n", p, s); - - return s + name := "go.typelink." + Tconv(t, FmtLeft|FmtUnsigned) + "\t" + Tconv(t, FmtLeft) + return obj.Linklookup(Ctxt, name, 0) } func typesymprefix(prefix string, t *Type) *Sym { @@ -1298,9 +1293,9 @@ ok: if t.Sym == nil { switch t.Etype { case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSTRUCT: - slink := typelinksym(t) - dsymptr(slink, 0, s, 0) - ggloblsym(slink, int32(Widthptr), int16(dupok|obj.RODATA)) + slink := typelinkLSym(t) + dsymptrOffLSym(slink, 0, Linksym(s), 0) + ggloblLSym(slink, 4, int16(dupok|obj.RODATA)) } } diff --git a/src/cmd/internal/obj/data.go b/src/cmd/internal/obj/data.go index 37ab70bb0e..546ff37269 100644 --- a/src/cmd/internal/obj/data.go +++ b/src/cmd/internal/obj/data.go @@ -111,17 +111,36 @@ func (s *LSym) WriteInt(ctxt *Link, off int64, siz int, i int64) { // rsym and roff specify the relocation for the address. func (s *LSym) WriteAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64) { if siz != ctxt.Arch.PtrSize { - ctxt.Diag("WriteAddr: bad address size: %d", siz) + ctxt.Diag("WriteAddr: bad address size %d in %s", siz, s.Name) } s.prepwrite(ctxt, off, siz) r := Addrel(s) r.Off = int32(off) + if int64(r.Off) != off { + ctxt.Diag("WriteAddr: off overflow %d in %s", off, s.Name) + } r.Siz = uint8(siz) r.Sym = rsym r.Type = R_ADDR r.Add = roff } +// WriteOff writes a 4 byte offset to rsym+roff into s at offset off. +// After linking the 4 bytes stored at s+off will be +// rsym+roff-(start of section that s is in). +func (s *LSym) WriteOff(ctxt *Link, off int64, rsym *LSym, roff int64) { + s.prepwrite(ctxt, off, 4) + r := Addrel(s) + r.Off = int32(off) + if int64(r.Off) != off { + ctxt.Diag("WriteOff: off overflow %d in %s", off, s.Name) + } + r.Siz = 4 + r.Sym = rsym + r.Type = R_ADDROFF + r.Add = roff +} + // WriteString writes a string of size siz into s at offset off. func (s *LSym) WriteString(ctxt *Link, off int64, siz int, str string) { if siz < len(str) { diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 62175f9ed8..d44d4398b1 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -457,6 +457,9 @@ const ( // R_ADDRMIPS (only used on mips64) resolves to a 32-bit external address, // by loading the address into a register with two instructions (lui, ori). R_ADDRMIPS + // R_ADDROFF resolves to an offset from the beginning of the section holding + // the data being relocated to the referenced symbol. + R_ADDROFF R_SIZE R_CALL R_CALLARM diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index ae7c287f59..cf51b0a908 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -525,6 +525,9 @@ func relocsym(s *LSym) { } o = Symaddr(r.Sym) + r.Add - int64(r.Sym.Sect.Vaddr) + case obj.R_ADDROFF: + o = Symaddr(r.Sym) - int64(r.Sym.Sect.Vaddr) + r.Add + // r->sym can be null when CALL $(constant) is transformed from absolute PC to relative PC call. case obj.R_CALL, obj.R_GOTPCREL, obj.R_PCREL: if Linkmode == LinkExternal && r.Sym != nil && r.Sym.Type != obj.SCONST && (r.Sym.Sect != Ctxt.Cursym.Sect || r.Type == obj.R_GOTPCREL) { @@ -1599,6 +1602,10 @@ func dodata() { sect.Vaddr = 0 Linklookup(Ctxt, "runtime.rodata", 0).Sect = sect Linklookup(Ctxt, "runtime.erodata", 0).Sect = sect + if !UseRelro() { + Linklookup(Ctxt, "runtime.types", 0).Sect = sect + Linklookup(Ctxt, "runtime.etypes", 0).Sect = sect + } for ; s != nil && s.Type < obj.STYPERELRO; s = s.Next { datsize = aligndatsize(datsize, s) s.Sect = sect @@ -1631,6 +1638,8 @@ func dodata() { sect.Align = maxalign(s, obj.STYPELINK-1) datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = 0 + Linklookup(Ctxt, "runtime.types", 0).Sect = sect + Linklookup(Ctxt, "runtime.etypes", 0).Sect = sect for ; s != nil && s.Type < obj.STYPELINK; s = s.Next { datsize = aligndatsize(datsize, s) if s.Outer != nil && s.Outer.Sect != nil && s.Outer.Sect != sect { @@ -1970,10 +1979,12 @@ func address() { } else { rodata = text.Next } + var relrodata *Section typelink := rodata.Next if UseRelro() { // There is another section (.data.rel.ro) when building a shared // object on elf systems. + relrodata = typelink typelink = typelink.Next } itablink := typelink.Next @@ -2007,6 +2018,11 @@ func address() { s.Value = int64(sectSym.Sect.Vaddr + 16) } + types := relrodata + if types == nil { + types = rodata + } + xdefine("runtime.text", obj.STEXT, int64(text.Vaddr)) xdefine("runtime.etext", obj.STEXT, int64(text.Vaddr+text.Length)) if HEADTYPE == obj.Hwindows { @@ -2014,6 +2030,8 @@ func address() { } xdefine("runtime.rodata", obj.SRODATA, int64(rodata.Vaddr)) xdefine("runtime.erodata", obj.SRODATA, int64(rodata.Vaddr+rodata.Length)) + xdefine("runtime.types", obj.SRODATA, int64(types.Vaddr)) + xdefine("runtime.etypes", obj.SRODATA, int64(types.Vaddr+types.Length)) xdefine("runtime.typelink", obj.SRODATA, int64(typelink.Vaddr)) xdefine("runtime.etypelink", obj.SRODATA, int64(typelink.Vaddr+typelink.Length)) xdefine("runtime.itablink", obj.SRODATA, int64(itablink.Vaddr)) diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index ae0b17c259..678ed38730 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -329,6 +329,8 @@ func symtab() { xdefine("runtime.eitablink", obj.SRODATA, 0) xdefine("runtime.rodata", obj.SRODATA, 0) xdefine("runtime.erodata", obj.SRODATA, 0) + xdefine("runtime.types", obj.SRODATA, 0) + xdefine("runtime.etypes", obj.SRODATA, 0) xdefine("runtime.noptrdata", obj.SNOPTRDATA, 0) xdefine("runtime.enoptrdata", obj.SNOPTRDATA, 0) xdefine("runtime.data", obj.SDATA, 0) @@ -537,6 +539,8 @@ func symtab() { Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.end", 0)) Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.gcdata", 0)) Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.gcbss", 0)) + Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.types", 0)) + Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.etypes", 0)) // The typelinks slice Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.typelink", 0)) adduint(Ctxt, moduledata, uint64(ntypelinks)) diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index ddc64b46be..037c953718 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -46,9 +46,11 @@ func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, func TypeLinks() []string { var r []string - for _, m := range typelinks() { - for _, t := range m { - r = append(r, t.string) + sections, offset := typelinks() + for i, offs := range offset { + rodata := sections[i] + for _, off := range offs { + r = append(r, rtypeOff(rodata, off).string) } } return r diff --git a/src/reflect/type.go b/src/reflect/type.go index 8f13acf26e..7104fde60a 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -1558,30 +1558,48 @@ func haveIdenticalUnderlyingType(T, V *rtype) bool { } // typelinks is implemented in package runtime. -// It returns a slice of all the 'typelink' information in the binary, -// which is to say a slice of known types, sorted by string. +// It returns a slice of the sections in each module, +// and a slice of *rtype offsets in each module. +// +// The types in each module are sorted by string. That is, the first +// two linked types of the first module are: +// +// d0 := sections[0] +// t1 := (*rtype)(add(d0, offset[0][0])) +// t2 := (*rtype)(add(d0, offset[0][1])) +// +// and +// +// t1.string < t2.string +// // Note that strings are not unique identifiers for types: // there can be more than one with a given string. // Only types we might want to look up are included: // pointers, channels, maps, slices, and arrays. -func typelinks() [][]*rtype +func typelinks() (sections []unsafe.Pointer, offset [][]int32) + +func rtypeOff(section unsafe.Pointer, off int32) *rtype { + return (*rtype)(add(section, uintptr(off))) +} // typesByString returns the subslice of typelinks() whose elements have // the given string representation. // It may be empty (no known types with that string) or may have // multiple elements (multiple types with that string). func typesByString(s string) []*rtype { - typs := typelinks() + sections, offset := typelinks() var ret []*rtype - for _, typ := range typs { + for offsI, offs := range offset { + section := sections[offsI] + // We are looking for the first index i where the string becomes >= s. // This is a copy of sort.Search, with f(h) replaced by (*typ[h].string >= s). - i, j := 0, len(typ) + i, j := 0, len(offs) for i < j { h := i + (j-i)/2 // avoid overflow when computing h // i ≤ h < j - if !(typ[h].string >= s) { + if !(rtypeOff(section, offs[h]).string >= s) { i = h + 1 // preserves f(i-1) == false } else { j = h // preserves f(j) == true @@ -1592,17 +1610,12 @@ func typesByString(s string) []*rtype { // Having found the first, linear scan forward to find the last. // We could do a second binary search, but the caller is going // to do a linear scan anyway. - j = i - for j < len(typ) && typ[j].string == s { - j++ - } - - if j > i { - if ret == nil { - ret = typ[i:j:j] - } else { - ret = append(ret, typ[i:j]...) + for j := i; j < len(offs); j++ { + typ := rtypeOff(section, offs[j]) + if typ.string != s { + break } + ret = append(ret, typ) } } return ret diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 95bebac593..e1956569fd 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -477,10 +477,12 @@ func gomcache() *mcache { } //go:linkname reflect_typelinks reflect.typelinks -func reflect_typelinks() [][]*_type { - ret := [][]*_type{firstmoduledata.typelinks} +func reflect_typelinks() ([]unsafe.Pointer, [][]int32) { + sections := []unsafe.Pointer{unsafe.Pointer(firstmoduledata.types)} + ret := [][]int32{firstmoduledata.typelinks} for datap := firstmoduledata.next; datap != nil; datap = datap.next { + sections = append(sections, unsafe.Pointer(datap.types)) ret = append(ret, datap.typelinks) } - return ret + return sections, ret } diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index 158bdcea0d..8c70f22c1f 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -127,8 +127,9 @@ type moduledata struct { bss, ebss uintptr noptrbss, enoptrbss uintptr end, gcdata, gcbss uintptr + types, etypes uintptr - typelinks []*_type + typelinks []int32 // offsets from types itablinks []*itab modulename string -- cgit v1.3 From db5338f87982086a19310ad6e25c046280644b98 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Tue, 12 Apr 2016 17:12:26 -0700 Subject: cmd/compile: teach CSE that new objects are bespoke MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit runtime.newobject never returns the same thing twice, so the resulting value will never be a common subexpression. This helps when compiling large static data structures that include pointers, such as maps and slices. No clear performance impact on other code. (See below.) For the code in issue #15112: Before: real 1m14.238s user 1m18.985s sys 0m0.787s After: real 0m47.172s user 0m52.248s sys 0m0.767s For the code in issue #15235, size 10k: Before: real 0m44.916s user 0m46.577s sys 0m0.304s After: real 0m7.703s user 0m9.041s sys 0m0.316s Still more work to be done, particularly for #15112. Updates #15112 Updates #15235 name old time/op new time/op delta Template 330ms ±11% 333ms ±13% ~ (p=0.749 n=20+19) Unicode 148ms ± 6% 152ms ± 8% ~ (p=0.072 n=18+20) GoTypes 1.01s ± 7% 1.01s ± 3% ~ (p=0.583 n=20+20) Compiler 5.04s ± 2% 5.06s ± 2% ~ (p=0.314 n=20+20) name old user-ns/op new user-ns/op delta Template 444user-ms ±11% 445user-ms ±10% ~ (p=0.738 n=20+20) Unicode 215user-ms ± 5% 218user-ms ± 5% ~ (p=0.239 n=18+18) GoTypes 1.45user-s ± 3% 1.45user-s ± 4% ~ (p=0.620 n=20+20) Compiler 7.23user-s ± 2% 7.22user-s ± 2% ~ (p=0.901 n=20+19) name old alloc/op new alloc/op delta Template 55.0MB ± 0% 55.0MB ± 0% ~ (p=0.547 n=20+20) Unicode 37.6MB ± 0% 37.6MB ± 0% ~ (p=0.301 n=20+20) GoTypes 177MB ± 0% 177MB ± 0% ~ (p=0.065 n=20+19) Compiler 798MB ± 0% 797MB ± 0% -0.05% (p=0.000 n=19+20) name old allocs/op new allocs/op delta Template 492k ± 0% 493k ± 0% +0.03% (p=0.030 n=20+20) Unicode 377k ± 0% 377k ± 0% ~ (p=0.423 n=20+19) GoTypes 1.40M ± 0% 1.40M ± 0% ~ (p=0.102 n=20+20) Compiler 5.53M ± 0% 5.53M ± 0% ~ (p=0.094 n=17+18) name old text-bytes new text-bytes delta HelloSize 561k ± 0% 561k ± 0% ~ (all samples are equal) CmdGoSize 6.13M ± 0% 6.13M ± 0% ~ (all samples are equal) name old data-bytes new data-bytes delta HelloSize 128k ± 0% 128k ± 0% ~ (all samples are equal) CmdGoSize 306k ± 0% 306k ± 0% ~ (all samples are equal) name old exe-bytes new exe-bytes delta HelloSize 905k ± 0% 905k ± 0% ~ (all samples are equal) CmdGoSize 9.64M ± 0% 9.64M ± 0% ~ (all samples are equal) Change-Id: Id774e2901d7701a3ec7a1c1d1cf1d9327a4107fc Reviewed-on: https://go-review.googlesource.com/21937 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Todd Neal --- src/cmd/compile/internal/gc/subr.go | 4 ++++ src/cmd/compile/internal/ssa/config.go | 6 ++++++ src/cmd/compile/internal/ssa/cse.go | 8 ++++++++ 3 files changed, 18 insertions(+) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 035bd815c2..091762f496 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1081,6 +1081,10 @@ func syslook(name string) *Node { return s.Def } +func (s *Sym) IsRuntimeCall(name string) bool { + return s.Pkg == Runtimepkg && s.Name == name +} + // typehash computes a hash value for type t to use in type switch // statements. func typehash(t *Type) uint32 { diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 2a676e39b3..a60291ea53 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -116,6 +116,12 @@ type GCNode interface { String() string } +// GCSym is an interface that *gc.Sym implements. +// Using *gc.Sym directly would lead to import cycles. +type GCSym interface { + IsRuntimeCall(name string) bool +} + // NewConfig returns a new configuration object for the given architecture. func NewConfig(arch string, fe Frontend, ctxt *obj.Link, optimize bool) *Config { c := &Config{arch: arch, fe: fe} diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go index 1ec5712be0..9853ff06d0 100644 --- a/src/cmd/compile/internal/ssa/cse.go +++ b/src/cmd/compile/internal/ssa/cse.go @@ -255,6 +255,14 @@ func cmpVal(v, w *Value, auxIDs auxmap, depth int) Cmp { return lt2Cmp(v.Block.ID < w.Block.ID) } + switch v.Op { + case OpStaticCall, OpAMD64CALLstatic, OpARMCALLstatic: + sym := v.Aux.(GCSym) + if sym.IsRuntimeCall("newobject") { + return lt2Cmp(v.ID < w.ID) + } + } + if tc := v.Type.Compare(w.Type); tc != CMPeq { return tc } -- cgit v1.3 From 6531fab06fc4667b7d167c7e3ee936f28bac68e2 Mon Sep 17 00:00:00 2001 From: Tal Shprecher Date: Tue, 12 Apr 2016 22:29:34 -0700 Subject: cmd/compile: remove unnecessary assignments while type checking. Change-Id: Ica0ec84714d7f01d800d62fa10cdb08321d43cf3 Reviewed-on: https://go-review.googlesource.com/21967 Reviewed-by: Brad Fitzpatrick Run-TryBot: Brad Fitzpatrick TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/typecheck.go | 6 ------ 1 file changed, 6 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index a20f87d940..f676b9dd09 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -338,7 +338,6 @@ OpSwitch: ok |= Etype if n.Type == nil { - n.Type = nil return n } @@ -449,7 +448,6 @@ OpSwitch: n.Op = OTYPE n.Type = tointerface(n.List.Slice()) if n.Type == nil { - n.Type = nil return n } @@ -458,7 +456,6 @@ OpSwitch: n.Op = OTYPE n.Type = functype(n.Left, n.List.Slice(), n.Rlist.Slice()) if n.Type == nil { - n.Type = nil return n } n.Left = nil @@ -822,7 +819,6 @@ OpSwitch: ok |= Erv n = typecheckcomplit(n) if n.Type == nil { - n.Type = nil return n } break OpSwitch @@ -864,7 +860,6 @@ OpSwitch: if n.Type.Etype != TFUNC || n.Type.Recv() == nil { Yyerror("type %v has no method %v", n.Left.Type, Sconv(n.Right.Sym, FmtShort)) n.Type = nil - n.Type = nil return n } @@ -1961,7 +1956,6 @@ OpSwitch: ok |= Erv typecheckclosure(n, top) if n.Type == nil { - n.Type = nil return n } break OpSwitch -- cgit v1.3 From 0e01db4b8d6ac64e6661508bc6876fa41c799208 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Apr 2016 17:46:41 -0700 Subject: cmd/compile: fix crash on bare package name in constant declarations Fixes #11361. Change-Id: I70b8808f97f0e07de680e7e6ede1322ea0fdbbc0 Reviewed-on: https://go-review.googlesource.com/21936 Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/subr.go | 7 +++++++ test/fixedbugs/issue11361.go | 11 +++++++++++ 2 files changed, 18 insertions(+) create mode 100644 test/fixedbugs/issue11361.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 091762f496..ea2db8721a 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -540,8 +540,15 @@ func treecopy(n *Node, lineno int32) *Node { } return n + case OPACK: + // OPACK nodes are never valid in const value declarations, + // but allow them like any other declared symbol to avoid + // crashing (golang.org/issue/11361). + fallthrough + case ONAME, OLITERAL, OTYPE: return n + } } diff --git a/test/fixedbugs/issue11361.go b/test/fixedbugs/issue11361.go new file mode 100644 index 0000000000..d01776b47c --- /dev/null +++ b/test/fixedbugs/issue11361.go @@ -0,0 +1,11 @@ +// errorcheck + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +import "fmt" // ERROR "imported and not used" + +const n = fmt // ERROR "fmt without selector" "fmt is not a constant" -- cgit v1.3 From 24967ec122710e73b35893925fd9a8390d7524ab Mon Sep 17 00:00:00 2001 From: Tal Shprecher Date: Sun, 10 Apr 2016 18:12:41 -0700 Subject: cmd/compile: make enqueued map keys fail validation on forward types Map keys are currently validated in multiple locations but share a common validation routine. The problem is that early validations should be lenient enough to allow for forward types while the final validations should not. The final validations should fail on forward types since they've already settled. This change also separates the key type checking from the creation of the map via typMap. Instead of the mapqueue being populated in copytype() by checking the map line number, it's populated in the same block that validates the key type. This isolates key validation logic while type checking. Fixes #14988 Change-Id: Ia47cf6213585d6c63b3a35249104c0439feae658 Reviewed-on: https://go-review.googlesource.com/21830 Reviewed-by: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/subr.go | 24 ------------------ src/cmd/compile/internal/gc/type.go | 4 --- src/cmd/compile/internal/gc/typecheck.go | 42 +++++++++++++++++++------------- test/fixedbugs/issue14988.go | 13 ++++++++++ 4 files changed, 38 insertions(+), 45 deletions(-) create mode 100644 test/fixedbugs/issue14988.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index ea2db8721a..776eb9c64e 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -372,30 +372,6 @@ func saveorignode(n *Node) { n.Orig = norig } -// checkMapKeyType checks that Type key is valid for use as a map key. -func checkMapKeyType(key *Type) { - alg, bad := algtype1(key) - if alg != ANOEQ { - return - } - switch bad.Etype { - default: - Yyerror("invalid map key type %v", key) - case TANY: - // Will be resolved later. - case TFORW: - // map[key] used during definition of key. - // postpone check until key is fully defined. - // if there are multiple uses of map[key] - // before key is fully defined, the error - // will only be printed for the first one. - // good enough. - if maplineno[key] == 0 { - maplineno[key] = lineno - } - } -} - // methcmp sorts by symbol, then by package path for unexported symbols. type methcmp []*Field diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index 25c1bcc203..a44a85bed8 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -429,10 +429,6 @@ func typChan(elem *Type, dir ChanDir) *Type { // typMap returns a new map Type with key type k and element (aka value) type v. func typMap(k, v *Type) *Type { - if k != nil { - checkMapKeyType(k) - } - t := typ(TMAP) mt := t.MapType() mt.Key = k diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index f676b9dd09..7089d7de72 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -416,6 +416,18 @@ OpSwitch: } n.Op = OTYPE n.Type = typMap(l.Type, r.Type) + + // map key validation + alg, bad := algtype1(l.Type) + if alg == ANOEQ { + if bad.Etype == TFORW { + // queue check for map until all the types are done settling. + mapqueue = append(mapqueue, mapqueueval{l, n.Lineno}) + } else if bad.Etype != TANY { + // no need to queue, key is already bad + Yyerror("invalid map key type %v", l.Type) + } + } n.Left = nil n.Right = nil @@ -3507,11 +3519,13 @@ func domethod(n *Node) { checkwidth(n.Type) } -var ( - mapqueue []*Node - // maplineno tracks the line numbers at which types are first used as map keys - maplineno = map[*Type]int32{} -) +type mapqueueval struct { + n *Node + lno int32 +} + +// tracks the line numbers at which forward types are first used as map keys +var mapqueue []mapqueueval func copytype(n *Node, t *Type) { if t.Etype == TFORW { @@ -3520,7 +3534,6 @@ func copytype(n *Node, t *Type) { return } - mapline := maplineno[n.Type] embedlineno := n.Type.ForwardType().Embedlineno l := n.Type.ForwardType().Copyto @@ -3555,12 +3568,6 @@ func copytype(n *Node, t *Type) { } lineno = lno - - // Queue check for map until all the types are done settling. - if mapline != 0 { - maplineno[t] = mapline - mapqueue = append(mapqueue, n) - } } func typecheckdeftype(n *Node) { @@ -3605,12 +3612,13 @@ ret: domethod(n) } } - - for _, n := range mapqueue { - lineno = maplineno[n.Type] - checkMapKeyType(n.Type) + for _, e := range mapqueue { + lineno = e.lno + if !e.n.Type.IsComparable() { + Yyerror("invalid map key type %v", e.n.Type) + } } - + mapqueue = nil lineno = lno } diff --git a/test/fixedbugs/issue14988.go b/test/fixedbugs/issue14988.go new file mode 100644 index 0000000000..4ddc7e728f --- /dev/null +++ b/test/fixedbugs/issue14988.go @@ -0,0 +1,13 @@ +// errorcheck + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 14988: defining a map with an invalid forward declaration array +// key doesn't cause a fatal. + +package main + +type m map[k]int // ERROR "invalid map key type" +type k [1]m -- cgit v1.3 From e0611b16645dba6768cab405f1ec1b3fce83334a Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Wed, 13 Apr 2016 10:58:38 +0200 Subject: cmd/compile: use shared dom tree for cse, too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Missed this in the previous CL where the shared dom tree was introduced. Change-Id: If0bd85d4b4567d7e87814ed511603b1303ab3903 Reviewed-on: https://go-review.googlesource.com/21970 Run-TryBot: Alexandru Moșoi TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/compile.go | 5 +++-- src/cmd/compile/internal/ssa/cse.go | 8 +++----- src/cmd/compile/internal/ssa/cse_test.go | 1 + 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index f4f0d8cab2..a0b5ff71cf 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -233,8 +233,8 @@ var passes = [...]pass{ {name: "opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values {name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt - {name: "generic cse", fn: cse}, {name: "generic domtree", fn: domTree}, + {name: "generic cse", fn: cse}, {name: "phiopt", fn: phiopt}, {name: "nilcheckelim", fn: nilcheckelim}, {name: "prove", fn: prove}, @@ -289,7 +289,8 @@ var passOrder = [...]constraint{ {"opt", "nilcheckelim"}, // tighten should happen before lowering to avoid splitting naturally paired instructions such as CMP/SET {"tighten", "lower"}, - // nilcheckelim, prove and loopbce share idom. + // cse, nilcheckelim, prove and loopbce share idom. + {"generic domtree", "generic cse"}, {"generic domtree", "nilcheckelim"}, {"generic domtree", "prove"}, {"generic domtree", "loopbce"}, diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go index 9853ff06d0..c12d51e50c 100644 --- a/src/cmd/compile/internal/ssa/cse.go +++ b/src/cmd/compile/internal/ssa/cse.go @@ -131,9 +131,7 @@ func cse(f *Func) { } } - // Compute dominator tree - idom := dominators(f) - sdom := newSparseTree(f, idom) + // Dominator tree (f.sdom) is computed by the generic domtree pass. // Compute substitutions we would like to do. We substitute v for w // if v and w are in the same equivalence class and v dominates w. @@ -143,7 +141,7 @@ func cse(f *Func) { // Find a maximal dominant element in e v := e[0] for _, w := range e[1:] { - if sdom.isAncestorEq(w.Block, v.Block) { + if f.sdom.isAncestorEq(w.Block, v.Block) { v = w } } @@ -153,7 +151,7 @@ func cse(f *Func) { w := e[i] if w == v { e, e[i] = e[:len(e)-1], e[len(e)-1] - } else if sdom.isAncestorEq(v.Block, w.Block) { + } else if f.sdom.isAncestorEq(v.Block, w.Block) { rewrite[w.ID] = v e, e[i] = e[:len(e)-1], e[len(e)-1] } else { diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go index 905939fc32..d5be2b52ec 100644 --- a/src/cmd/compile/internal/ssa/cse_test.go +++ b/src/cmd/compile/internal/ssa/cse_test.go @@ -44,6 +44,7 @@ func TestCSEAuxPartitionBug(t *testing.T) { Exit("rstore"))) CheckFunc(fun.f) + domTree(fun.f) cse(fun.f) deadcode(fun.f) CheckFunc(fun.f) -- cgit v1.3 From 7d469179e6e3dafe16700b7fc1cf8683ad9453fa Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Mon, 28 Mar 2016 10:32:27 -0400 Subject: cmd/compile, etc: store method tables as offsets This CL introduces the typeOff type and a lookup method of the same name that can turn a typeOff offset into an *rtype. In a typical Go binary (built with buildmode=exe, pie, c-archive, or c-shared), there is one moduledata and all typeOff values are offsets relative to firstmoduledata.types. This makes computing the pointer cheap in typical programs. With buildmode=shared (and one day, buildmode=plugin) there are multiple modules whose relative offset is determined at runtime. We identify a type in the general case by the pair of the original *rtype that references it and its typeOff value. We determine the module from the original pointer, and then use the typeOff from there to compute the final *rtype. To ensure there is only one *rtype representing each type, the runtime initializes a typemap for each module, using any identical type from an earlier module when resolving that offset. This means that types computed from an offset match the type mapped by the pointer dynamic relocations. A series of followup CLs will replace other *rtype values with typeOff (and name/*string with nameOff). For types created at runtime by reflect, type offsets are treated as global IDs and reference into a reflect offset map kept by the runtime. darwin/amd64: cmd/go: -57KB (0.6%) jujud: -557KB (0.8%) linux/amd64 PIE: cmd/go: -361KB (3.0%) jujud: -3.5MB (4.2%) For #6853. Change-Id: Icf096fd884a0a0cb9f280f46f7a26c70a9006c96 Reviewed-on: https://go-review.googlesource.com/21285 Reviewed-by: Ian Lance Taylor Run-TryBot: David Crawshaw TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/reflect.go | 75 +++++--- src/cmd/internal/obj/link.go | 15 +- src/cmd/link/internal/ld/deadcode.go | 14 +- src/cmd/link/internal/ld/decodesym.go | 22 +-- src/reflect/export_test.go | 2 +- src/reflect/type.go | 267 +++++++++++++++++++++------- src/reflect/value.go | 15 +- src/runtime/iface.go | 10 +- src/runtime/proc.go | 3 +- src/runtime/runtime1.go | 33 ++++ src/runtime/symtab.go | 2 + src/runtime/type.go | 307 ++++++++++++++++++++++++++++++++- 12 files changed, 637 insertions(+), 128 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index ea67634260..2bd50b4665 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -75,7 +75,7 @@ func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{}) if t.Sym == nil && len(methods(t)) == 0 { return 0 } - return 2*Widthptr + 2*Widthint + return 2 * Widthptr } func makefield(name string, t *Type) *Field { @@ -580,13 +580,23 @@ func dextratype(s *Sym, ot int, t *Type, dataAdd int) int { ot = dgopkgpath(s, ot, typePkg(t)) - // slice header - ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+dataAdd) - - n := len(m) - ot = duintxx(s, ot, uint64(n), Widthint) - ot = duintxx(s, ot, uint64(n), Widthint) + dataAdd += Widthptr + 2 + 2 + if Widthptr == 8 { + dataAdd += 4 + } + mcount := len(m) + if mcount != int(uint16(mcount)) { + Fatalf("too many methods on %s: %d", t, mcount) + } + if dataAdd != int(uint16(dataAdd)) { + Fatalf("methods are too far away on %s: %d", t, dataAdd) + } + ot = duint16(s, ot, uint16(mcount)) + ot = duint16(s, ot, uint16(dataAdd)) + if Widthptr == 8 { + ot = duint32(s, ot, 0) // align for following pointers + } return ot } @@ -609,6 +619,7 @@ func typePkg(t *Type) *Pkg { // dextratypeData dumps the backing array for the []method field of // runtime.uncommontype. func dextratypeData(s *Sym, ot int, t *Type) int { + lsym := Linksym(s) for _, a := range methods(t) { // ../../../../runtime/type.go:/method exported := exportname(a.name) @@ -617,21 +628,24 @@ func dextratypeData(s *Sym, ot int, t *Type) int { pkg = a.pkg } ot = dname(s, ot, a.name, "", pkg, exported) - ot = dmethodptr(s, ot, dtypesym(a.mtype)) - ot = dmethodptr(s, ot, a.isym) - ot = dmethodptr(s, ot, a.tsym) + ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype))) + ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym)) + ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym)) + if Widthptr == 8 { + ot = duintxxLSym(lsym, ot, 0, 4) // pad to reflect.method size + } } return ot } -func dmethodptr(s *Sym, off int, x *Sym) int { - duintptr(s, off, 0) - r := obj.Addrel(Linksym(s)) - r.Off = int32(off) - r.Siz = uint8(Widthptr) - r.Sym = Linksym(x) - r.Type = obj.R_METHOD - return off + Widthptr +func dmethodptrOffLSym(s *obj.LSym, ot int, x *obj.LSym) int { + duintxxLSym(s, ot, 0, 4) + r := obj.Addrel(s) + r.Off = int32(ot) + r.Siz = 4 + r.Sym = x + r.Type = obj.R_METHODOFF + return ot + 4 } var kinds = []int{ @@ -1286,18 +1300,29 @@ ok: ggloblsym(s, int32(ot), int16(dupok|obj.RODATA)) // generate typelink.foo pointing at s = type.foo. + // // The linker will leave a table of all the typelinks for - // types in the binary, so reflect can find them. - // We only need the link for unnamed composites that - // we want be able to find. - if t.Sym == nil { + // types in the binary, so the runtime can find them. + // + // When buildmode=shared, all types are in typelinks so the + // runtime can deduplicate type pointers. + keep := Ctxt.Flag_dynlink + if !keep && t.Sym == nil { + // For an unnamed type, we only need the link if the type can + // be created at run time by reflect.PtrTo and similar + // functions. If the type exists in the program, those + // functions must return the existing type structure rather + // than creating a new one. switch t.Etype { case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSTRUCT: - slink := typelinkLSym(t) - dsymptrOffLSym(slink, 0, Linksym(s), 0) - ggloblLSym(slink, 4, int16(dupok|obj.RODATA)) + keep = true } } + if keep { + slink := typelinkLSym(t) + dsymptrOffLSym(slink, 0, Linksym(s), 0) + ggloblLSym(slink, 4, int16(dupok|obj.RODATA)) + } return s } diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 42aaa5f4f0..55c9f4f9e2 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -457,8 +457,8 @@ const ( // R_ADDRMIPS (only used on mips64) resolves to a 32-bit external address, // by loading the address into a register with two instructions (lui, ori). R_ADDRMIPS - // R_ADDROFF resolves to an offset from the beginning of the section holding - // the data being relocated to the referenced symbol. + // R_ADDROFF resolves to a 32-bit offset from the beginning of the section + // holding the data being relocated to the referenced symbol. R_ADDROFF R_SIZE R_CALL @@ -492,11 +492,12 @@ const ( // should be linked into the final binary, even if there are no other // direct references. (This is used for types reachable by reflection.) R_USETYPE - // R_METHOD resolves to an *rtype for a method. - // It is used when linking from the uncommonType of another *rtype, and - // may be set to zero by the linker if it determines the method text is - // unreachable by the linked program. - R_METHOD + // R_METHODOFF resolves to a 32-bit offset from the beginning of the section + // holding the data being relocated to the referenced symbol. + // It is a variant of R_ADDROFF used when linking from the uncommonType of a + // *rtype, and may be set to zero by the linker if it determines the method + // text is unreachable by the linked program. + R_METHODOFF R_POWER_TOC R_GOTPCREL // R_JMPMIPS (only used on mips64) resolves to non-PC-relative target address diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index 51fae02ef0..c83a104a54 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -19,7 +19,7 @@ import ( // // This flood fill is wrapped in logic for pruning unused methods. // All methods are mentioned by relocations on their receiver's *rtype. -// These relocations are specially defined as R_METHOD by the compiler +// These relocations are specially defined as R_METHODOFF by the compiler // so we can detect and manipulated them here. // // There are three ways a method of a reachable type can be invoked: @@ -100,7 +100,7 @@ func deadcode(ctxt *Link) { d.flood() } - // Remove all remaining unreached R_METHOD relocations. + // Remove all remaining unreached R_METHODOFF relocations. for _, m := range d.markableMethods { for _, r := range m.r { d.cleanupReloc(r) @@ -167,7 +167,7 @@ var markextra = []string{ type methodref struct { m methodsig src *LSym // receiver type symbol - r [3]*Reloc // R_METHOD relocations to fields of runtime.method + r [3]*Reloc // R_METHODOFF relocations to fields of runtime.method } func (m methodref) ifn() *LSym { return m.r[1].Sym } @@ -190,7 +190,7 @@ type deadcodepass struct { func (d *deadcodepass) cleanupReloc(r *Reloc) { if r.Sym.Attr.Reachable() { - r.Type = obj.R_ADDR + r.Type = obj.R_ADDROFF } else { if Debug['v'] > 1 { fmt.Fprintf(d.ctxt.Bso, "removing method %s\n", r.Sym.Name) @@ -217,7 +217,7 @@ func (d *deadcodepass) mark(s, parent *LSym) { func (d *deadcodepass) markMethod(m methodref) { for _, r := range m.r { d.mark(r.Sym, m.src) - r.Type = obj.R_ADDR + r.Type = obj.R_ADDROFF } } @@ -291,14 +291,14 @@ func (d *deadcodepass) flood() { } } - mpos := 0 // 0-3, the R_METHOD relocs of runtime.uncommontype + mpos := 0 // 0-3, the R_METHODOFF relocs of runtime.uncommontype var methods []methodref for i := 0; i < len(s.R); i++ { r := &s.R[i] if r.Sym == nil { continue } - if r.Type != obj.R_METHOD { + if r.Type != obj.R_METHODOFF { d.mark(r.Sym, s) continue } diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index 7daa8bc812..5fa8b4c81f 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -47,9 +47,9 @@ func decode_inuxi(p []byte, sz int) uint64 { } } -func commonsize() int { return 6*SysArch.PtrSize + 8 } // runtime._type -func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield -func uncommonSize() int { return 2*SysArch.PtrSize + 2*SysArch.IntSize } // runtime.uncommontype +func commonsize() int { return 6*SysArch.PtrSize + 8 } // runtime._type +func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield +func uncommonSize() int { return 2 * SysArch.PtrSize } // runtime.uncommontype // Type.commonType.kind func decodetype_kind(s *LSym) uint8 { @@ -341,12 +341,14 @@ func decodetype_methods(s *LSym) []methodsig { // just Sizeof(rtype) } - numMethods := int(decode_inuxi(s.P[off+2*SysArch.PtrSize:], SysArch.IntSize)) - r := decode_reloc(s, int32(off+SysArch.PtrSize)) - if r.Sym != s { - panic(fmt.Sprintf("method slice pointer in %s leads to a different symbol %s", s, r.Sym)) + mcount := int(decode_inuxi(s.P[off+SysArch.PtrSize:], 2)) + moff := int(decode_inuxi(s.P[off+SysArch.PtrSize+2:], 2)) + off += moff // offset to array of reflect.method values + var sizeofMethod int // sizeof reflect.method in program + if SysArch.PtrSize == 4 { + sizeofMethod = 4 * SysArch.PtrSize + } else { + sizeofMethod = 3 * SysArch.PtrSize } - off = int(r.Add) // array of reflect.method values - sizeofMethod := 4 * SysArch.PtrSize // sizeof reflect.method in program - return decode_methodsig(s, off, sizeofMethod, numMethods) + return decode_methodsig(s, off, sizeofMethod, mcount) } diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index 037c953718..2769e0db40 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -90,7 +90,7 @@ func FirstMethodNameBytes(t Type) *byte { if ut == nil { panic("type has no methods") } - m := ut.methods[0] + m := ut.methods()[0] if *m.name.data(0)&(1<<2) == 0 { panic("method name does not have pkgPath *string") } diff --git a/src/reflect/type.go b/src/reflect/type.go index 7104fde60a..c7ed402be2 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -288,10 +288,10 @@ type typeAlg struct { // Method on non-interface type type method struct { - name name // name of method - mtyp *rtype // method type (without receiver) - ifn unsafe.Pointer // fn used in interface call (one-word receiver) - tfn unsafe.Pointer // fn used for normal method call + name name // name of method + mtyp typeOff // method type (without receiver) + ifn textOff // fn used in interface call (one-word receiver) + tfn textOff // fn used for normal method call } // uncommonType is present only for types with names or methods @@ -299,8 +299,9 @@ type method struct { // Using a pointer to this struct reduces the overall size required // to describe an unnamed type with no methods. type uncommonType struct { - pkgPath *string // import path; nil for built-in types like int, string - methods []method // methods associated with type + pkgPath *string // import path; nil for built-in types like int, string + mcount uint16 // number of methods + moff uint16 // offset from this uncommontype to [mcount]method } // ChanDir represents a channel type's direction. @@ -589,6 +590,10 @@ var kindNames = []string{ UnsafePointer: "unsafe.Pointer", } +func (t *uncommonType) methods() []method { + return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount] +} + func (t *uncommonType) PkgPath() string { if t == nil || t.pkgPath == nil { return "" @@ -596,13 +601,55 @@ func (t *uncommonType) PkgPath() string { return *t.pkgPath } +// resolveTypeOff resolves an *rtype offset from a base type. +// The (*rtype).typeOff method is a convenience wrapper for this function. +// Implemented in the runtime package. +func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + +// resolveTextOff resolves an function pointer offset from a base type. +// The (*rtype).textOff method is a convenience wrapper for this function. +// Implemented in the runtime package. +func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer + +// addReflectOff adds a pointer to the reflection lookup map in the runtime. +// It returns a new ID that can be used as a typeOff or textOff, and will +// be resolved correctly. Implemented in the runtime package. +func addReflectOff(ptr unsafe.Pointer) int32 + +// resolveReflectType adds a *rtype to the reflection lookup map in the runtime. +// It returns a new typeOff that can be used to refer to the pointer. +func resolveReflectType(t *rtype) typeOff { + return typeOff(addReflectOff(unsafe.Pointer(t))) +} + +// resolveReflectText adds a function pointer to the reflection lookup map in +// the runtime. It returns a new textOff that can be used to refer to the +// pointer. +func resolveReflectText(ptr unsafe.Pointer) textOff { + return textOff(addReflectOff(ptr)) +} + +type typeOff int32 // offset to an *rtype +type textOff int32 // offset from top of text section + +func (t *rtype) typeOff(off typeOff) *rtype { + if off == 0 { + return nil + } + return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off))) +} + +func (t *rtype) textOff(off textOff) unsafe.Pointer { + return resolveTextOff(unsafe.Pointer(t), int32(off)) +} + func (t *rtype) uncommon() *uncommonType { if t.tflag&tflagUncommon == 0 { return nil } switch t.Kind() { case Struct: - return &(*structTypeWithMethods)(unsafe.Pointer(t)).u + return &(*structTypeUncommon)(unsafe.Pointer(t)).u case Ptr: type u struct { ptrType @@ -688,7 +735,7 @@ func (t *rtype) NumMethod() int { if ut == nil { return 0 } - return len(ut.methods) + return int(ut.mcount) } func (t *rtype) Method(i int) (m Method) { @@ -698,10 +745,10 @@ func (t *rtype) Method(i int) (m Method) { } ut := t.uncommon() - if ut == nil || i < 0 || i >= len(ut.methods) { + if ut == nil || i < 0 || i >= int(ut.mcount) { panic("reflect: Method index out of range") } - p := &ut.methods[i] + p := ut.methods()[i] m.Name = p.name.name() fl := flag(Func) if !p.name.isExported() { @@ -712,8 +759,9 @@ func (t *rtype) Method(i int) (m Method) { m.PkgPath = *pkgPath fl |= flagStickyRO } - if p.mtyp != nil { - ft := (*funcType)(unsafe.Pointer(p.mtyp)) + if p.mtyp != 0 { + mtyp := t.typeOff(p.mtyp) + ft := (*funcType)(unsafe.Pointer(mtyp)) in := make([]Type, 0, 1+len(ft.in())) in = append(in, t) for _, arg := range ft.in() { @@ -723,9 +771,10 @@ func (t *rtype) Method(i int) (m Method) { for _, ret := range ft.out() { out = append(out, ret) } - mt := FuncOf(in, out, p.mtyp.IsVariadic()) + mt := FuncOf(in, out, ft.IsVariadic()) m.Type = mt - fn := unsafe.Pointer(&p.tfn) + tfn := t.textOff(p.tfn) + fn := unsafe.Pointer(&tfn) m.Func = Value{mt.(*rtype), fn, fl} } m.Index = i @@ -741,8 +790,9 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) { if ut == nil { return Method{}, false } - for i := range ut.methods { - p := &ut.methods[i] + utmethods := ut.methods() + for i := 0; i < int(ut.mcount); i++ { + p := utmethods[i] if p.name.name() == name { return t.Method(i), true } @@ -1430,10 +1480,11 @@ func implements(T, V *rtype) bool { return false } i := 0 - for j := 0; j < len(v.methods); j++ { + vmethods := v.methods() + for j := 0; j < int(v.mcount); j++ { tm := &t.methods[i] - vm := &v.methods[j] - if vm.name.name() == tm.name.name() && vm.mtyp == tm.typ { + vm := vmethods[j] + if vm.name.name() == tm.name.name() && V.typeOff(vm.mtyp) == tm.typ { if i++; i >= len(t.methods) { return true } @@ -2161,21 +2212,55 @@ func SliceOf(t Type) Type { return cachePut(ckey, &slice.rtype) } -// structTypeWithMethods is a structType created at runtime with StructOf. -// It is needed to pin the []method slice from its associated uncommonType struct. -// Keep in sync with the memory layout of structType. -type structTypeWithMethods struct { - structType - u uncommonType -} - // The structLookupCache caches StructOf lookups. // StructOf does not share the common lookupCache since we need to pin -// the *structType and its associated *uncommonType (especially the -// []method slice field of that uncommonType.) +// the memory associated with *structTypeFixedN. var structLookupCache struct { sync.RWMutex - m map[uint32][]*structTypeWithMethods // keyed by hash calculated in StructOf + m map[uint32][]interface { + common() *rtype + } // keyed by hash calculated in StructOf +} + +type structTypeUncommon struct { + structType + u uncommonType +} + +// A *rtype representing a struct is followed directly in memory by an +// array of method objects representing the methods attached to the +// struct. To get the same layout for a run time generated type, we +// need an array directly following the uncommonType memory. The types +// structTypeFixed4, ...structTypeFixedN are used to do this. +// +// A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN. + +// TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs +// have no methods, they could be defined at runtime using the StructOf +// function. + +type structTypeFixed4 struct { + structType + u uncommonType + m [4]method +} + +type structTypeFixed8 struct { + structType + u uncommonType + m [8]method +} + +type structTypeFixed16 struct { + structType + u uncommonType + m [16]method +} + +type structTypeFixed32 struct { + structType + u uncommonType + m [32]method } // StructOf returns the struct type containing fields. @@ -2192,7 +2277,7 @@ func StructOf(fields []StructField) Type { typalign uint8 comparable = true hashable = true - typ = new(structTypeWithMethods) + methods []method fs = make([]structField, len(fields)) repr = make([]byte, 0, 64) @@ -2269,7 +2354,6 @@ func StructOf(fields []StructField) Type { } return recv.Field(ifield).Method(imethod).Call(args) }) - } else { tfn = MakeFunc(m.typ, func(in []Value) []Value { var args []Value @@ -2287,47 +2371,59 @@ func StructOf(fields []StructField) Type { } return recv.Field(ifield).Method(imethod).Call(args) }) - } - typ.u.methods = append( - typ.u.methods, - method{ - name: m.name, - mtyp: m.typ, - ifn: unsafe.Pointer(&ifn), - tfn: unsafe.Pointer(&tfn), - }, - ) + methods = append(methods, method{ + name: m.name, + mtyp: resolveReflectType(m.typ), + ifn: resolveReflectText(unsafe.Pointer(&ifn)), + tfn: resolveReflectText(unsafe.Pointer(&tfn)), + }) } case Ptr: ptr := (*ptrType)(unsafe.Pointer(ft)) if unt := ptr.uncommon(); unt != nil { - for _, m := range unt.methods { + for _, m := range unt.methods() { if m.name.pkgPath() != nil { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } - typ.u.methods = append(typ.u.methods, m) + methods = append(methods, method{ + name: m.name, + mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), + ifn: resolveReflectText(ptr.textOff(m.ifn)), + tfn: resolveReflectText(ptr.textOff(m.tfn)), + }) } } if unt := ptr.elem.uncommon(); unt != nil { - for _, m := range unt.methods { + for _, m := range unt.methods() { if m.name.pkgPath() != nil { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } - typ.u.methods = append(typ.u.methods, m) + methods = append(methods, method{ + name: m.name, + mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), + ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), + tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), + }) } } default: if unt := ft.uncommon(); unt != nil { - for _, m := range unt.methods { + for _, m := range unt.methods() { if m.name.pkgPath() != nil { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } - typ.u.methods = append(typ.u.methods, m) + methods = append(methods, method{ + name: m.name, + mtyp: resolveReflectType(ft.typeOff(m.mtyp)), + ifn: resolveReflectText(ft.textOff(m.ifn)), + tfn: resolveReflectText(ft.textOff(m.tfn)), + }) + } } } @@ -2359,6 +2455,49 @@ func StructOf(fields []StructField) Type { fs[i] = f } + + var typ *structType + var ut *uncommonType + var typPin interface { + common() *rtype + } // structTypeFixedN + + switch { + case len(methods) == 0: + t := new(structTypeUncommon) + typ = &t.structType + ut = &t.u + typPin = t + case len(methods) <= 4: + t := new(structTypeFixed4) + typ = &t.structType + ut = &t.u + copy(t.m[:], methods) + typPin = t + case len(methods) <= 8: + t := new(structTypeFixed8) + typ = &t.structType + ut = &t.u + copy(t.m[:], methods) + typPin = t + case len(methods) <= 16: + t := new(structTypeFixed16) + typ = &t.structType + ut = &t.u + copy(t.m[:], methods) + typPin = t + case len(methods) <= 32: + t := new(structTypeFixed32) + typ = &t.structType + ut = &t.u + copy(t.m[:], methods) + typPin = t + default: + panic("reflect.StructOf: too many methods") + } + ut.mcount = uint16(len(methods)) + ut.moff = uint16(unsafe.Sizeof(uncommonType{})) + if len(fs) > 0 { repr = append(repr, ' ') } @@ -2372,15 +2511,16 @@ func StructOf(fields []StructField) Type { // Make the struct type. var istruct interface{} = struct{}{} prototype := *(**structType)(unsafe.Pointer(&istruct)) - typ.structType = *prototype - typ.structType.fields = fs + *typ = *prototype + typ.fields = fs // Look in cache structLookupCache.RLock() - for _, t := range structLookupCache.m[hash] { - if haveIdenticalUnderlyingType(&typ.rtype, &t.rtype) { + for _, st := range structLookupCache.m[hash] { + t := st.common() + if haveIdenticalUnderlyingType(&typ.rtype, t) { structLookupCache.RUnlock() - return &t.rtype + return t } } structLookupCache.RUnlock() @@ -2389,11 +2529,14 @@ func StructOf(fields []StructField) Type { structLookupCache.Lock() defer structLookupCache.Unlock() if structLookupCache.m == nil { - structLookupCache.m = make(map[uint32][]*structTypeWithMethods) + structLookupCache.m = make(map[uint32][]interface { + common() *rtype + }) } - for _, t := range structLookupCache.m[hash] { - if haveIdenticalUnderlyingType(&typ.rtype, &t.rtype) { - return &t.rtype + for _, st := range structLookupCache.m[hash] { + t := st.common() + if haveIdenticalUnderlyingType(&typ.rtype, t) { + return t } } @@ -2403,9 +2546,8 @@ func StructOf(fields []StructField) Type { // even if 't' wasn't a structType with methods, we should be ok // as the 'u uncommonType' field won't be accessed except when // tflag&tflagUncommon is set. - tt := (*structTypeWithMethods)(unsafe.Pointer(t)) - structLookupCache.m[hash] = append(structLookupCache.m[hash], tt) - return &tt.rtype + structLookupCache.m[hash] = append(structLookupCache.m[hash], t) + return t } } @@ -2414,7 +2556,7 @@ func StructOf(fields []StructField) Type { typ.size = size typ.align = typalign typ.fieldAlign = typalign - if len(typ.u.methods) > 0 { + if len(methods) > 0 { typ.tflag |= tflagUncommon } if !hasPtr { @@ -2514,7 +2656,7 @@ func StructOf(fields []StructField) Type { typ.kind &^= kindDirectIface } - structLookupCache.m[hash] = append(structLookupCache.m[hash], typ) + structLookupCache.m[hash] = append(structLookupCache.m[hash], typPin) return &typ.rtype } @@ -2533,6 +2675,7 @@ func runtimeStructField(field StructField) structField { } } + _ = resolveReflectType(field.Type.common()) return structField{ name: newName(field.Name, string(field.Tag), field.PkgPath, exported), typ: field.Type.common(), diff --git a/src/reflect/value.go b/src/reflect/value.go index 262545d973..d72c14e9e1 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -566,15 +566,16 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn } else { rcvrtype = v.typ ut := v.typ.uncommon() - if ut == nil || uint(i) >= uint(len(ut.methods)) { + if ut == nil || uint(i) >= uint(ut.mcount) { panic("reflect: internal error: invalid method index") } - m := &ut.methods[i] + m := ut.methods()[i] if !m.name.isExported() { panic("reflect: " + op + " of unexported method") } - fn = unsafe.Pointer(&m.ifn) - t = m.mtyp + ifn := v.typ.textOff(m.ifn) + fn = unsafe.Pointer(&ifn) + t = v.typ.typeOff(m.mtyp) } return } @@ -1687,11 +1688,11 @@ func (v Value) Type() Type { } // Method on concrete type. ut := v.typ.uncommon() - if ut == nil || uint(i) >= uint(len(ut.methods)) { + if ut == nil || uint(i) >= uint(ut.mcount) { panic("reflect: internal error: invalid method index") } - m := &ut.methods[i] - return m.mtyp + m := ut.methods()[i] + return v.typ.typeOff(m.mtyp) } // Uint returns v's underlying value, as a uint64. diff --git a/src/runtime/iface.go b/src/runtime/iface.go index a4c962fb7a..700bdc2f48 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -93,7 +93,8 @@ func additab(m *itab, locked, canfail bool) { // so can iterate over both in lock step; // the loop is O(ni+nt) not O(ni*nt). ni := len(inter.mhdr) - nt := len(x.mhdr) + nt := int(x.mcount) + xmhdr := (*[1 << 16]method)(add(unsafe.Pointer(x), uintptr(x.moff)))[:nt:nt] j := 0 for k := 0; k < ni; k++ { i := &inter.mhdr[k] @@ -104,15 +105,16 @@ func additab(m *itab, locked, canfail bool) { ipkg = inter.pkgpath } for ; j < nt; j++ { - t := &x.mhdr[j] - if t.mtyp == itype && t.name.name() == iname { + t := &xmhdr[j] + if typ.typeOff(t.mtyp) == itype && t.name.name() == iname { pkgPath := t.name.pkgPath() if pkgPath == nil { pkgPath = x.pkgpath } if t.name.isExported() || pkgPath == ipkg { if m != nil { - *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn + ifn := typ.textOff(t.ifn) + *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = ifn } goto nextimethod } diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 1a9dbd6c53..98a986cd63 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -435,9 +435,10 @@ func schedinit() { tracebackinit() moduledataverify() stackinit() - itabsinit() mallocinit() mcommoninit(_g_.m) + typelinksinit() + itabsinit() msigsave(_g_.m) initSigmask = _g_.m.sigmask diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index e1956569fd..02aeedaf75 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -486,3 +486,36 @@ func reflect_typelinks() ([]unsafe.Pointer, [][]int32) { } return sections, ret } + +// reflect_resolveTypeOff resolves an *rtype offset from a base type. +//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff +func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { + return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off))) +} + +// reflect_resolveTextOff resolves an function pointer offset from a base type. +//go:linkname reflect_resolveTextOff reflect.resolveTextOff +func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { + return (*_type)(rtype).textOff(textOff(off)) + +} + +// reflect_addReflectOff adds a pointer to the reflection offset lookup map. +//go:linkname reflect_addReflectOff reflect.addReflectOff +func reflect_addReflectOff(ptr unsafe.Pointer) int32 { + lock(&reflectOffs.lock) + if reflectOffs.m == nil { + reflectOffs.m = make(map[int32]unsafe.Pointer) + reflectOffs.minv = make(map[unsafe.Pointer]int32) + reflectOffs.next = -1 + } + id, found := reflectOffs.minv[ptr] + if !found { + id = reflectOffs.next + reflectOffs.next-- // use negative offsets as IDs to aid debugging + reflectOffs.m[id] = ptr + reflectOffs.minv[ptr] = id + } + unlock(&reflectOffs.lock) + return id +} diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index 8c70f22c1f..2df390253a 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -137,6 +137,8 @@ type moduledata struct { gcdatamask, gcbssmask bitvector + typemap map[typeOff]*_type // offset to *_rtype in previous module + next *moduledata } diff --git a/src/runtime/type.go b/src/runtime/type.go index fbf6f9973c..86131d3ff3 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -131,6 +131,92 @@ func (t *_type) name() string { return t._string[i+1:] } +// reflectOffs holds type offsets defined at run time by the reflect package. +// +// When a type is defined at run time, its *rtype data lives on the heap. +// There are a wide range of possible addresses the heap may use, that +// may not be representable as a 32-bit offset. Moreover the GC may +// one day start moving heap memory, in which case there is no stable +// offset that can be defined. +// +// To provide stable offsets, we add pin *rtype objects in a global map +// and treat the offset as an identifier. We use negative offsets that +// do not overlap with any compile-time module offsets. +// +// Entries are created by reflect.addReflectOff. +var reflectOffs struct { + lock mutex + next int32 + m map[int32]unsafe.Pointer + minv map[unsafe.Pointer]int32 +} + +func (t *_type) typeOff(off typeOff) *_type { + if off == 0 { + return nil + } + base := uintptr(unsafe.Pointer(t)) + var md *moduledata + for next := &firstmoduledata; next != nil; next = next.next { + if base >= next.types && base < next.etypes { + md = next + break + } + } + if md == nil { + lock(&reflectOffs.lock) + res := reflectOffs.m[int32(off)] + unlock(&reflectOffs.lock) + if res == nil { + println("runtime: typeOff", hex(off), "base", hex(base), "not in ranges:") + for next := &firstmoduledata; next != nil; next = next.next { + println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) + } + throw("runtime: type offset base pointer out of range") + } + return (*_type)(res) + } + if t := md.typemap[off]; t != nil { + return t + } + res := md.types + uintptr(off) + if res > md.etypes { + println("runtime: typeOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) + throw("runtime: type offset out of range") + } + return (*_type)(unsafe.Pointer(res)) +} + +func (t *_type) textOff(off textOff) unsafe.Pointer { + base := uintptr(unsafe.Pointer(t)) + var md *moduledata + for next := &firstmoduledata; next != nil; next = next.next { + if base >= next.types && base < next.etypes { + md = next + break + } + } + if md == nil { + lock(&reflectOffs.lock) + res := reflectOffs.m[int32(off)] + unlock(&reflectOffs.lock) + if res == nil { + println("runtime: textOff", hex(off), "base", hex(base), "not in ranges:") + for next := &firstmoduledata; next != nil; next = next.next { + println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) + } + throw("runtime: text offset base pointer out of range") + } + return res + } + res := md.text + uintptr(off) + if res > md.etext { + println("runtime: textOff", hex(off), "out of range", hex(md.text), "-", hex(md.etext)) + throw("runtime: text offset out of range") + } + return unsafe.Pointer(res) +} + func (t *functype) in() []*_type { // See funcType in reflect/type.go for details on data layout. uadd := uintptr(unsafe.Sizeof(functype{})) @@ -154,16 +240,20 @@ func (t *functype) dotdotdot() bool { return t.outCount&(1<<15) != 0 } +type typeOff int32 +type textOff int32 + type method struct { name name - mtyp *_type - ifn unsafe.Pointer - tfn unsafe.Pointer + mtyp typeOff + ifn textOff + tfn textOff } type uncommontype struct { pkgpath *string - mhdr []method + mcount uint16 // number of methods + moff uint16 // offset from this uncommontype to [mcount]method } type imethod struct { @@ -270,6 +360,18 @@ func (n *name) name() (s string) { return s } +func (n *name) tag() (s string) { + tl := n.tagLen() + if tl == 0 { + return "" + } + nl := n.nameLen() + hdr := (*stringStruct)(unsafe.Pointer(&s)) + hdr.str = unsafe.Pointer(n.data(3 + nl + 2)) + hdr.len = tl + return s +} + func (n *name) pkgPath() *string { if *n.data(0)&(1<<2) == 0 { return nil @@ -281,3 +383,200 @@ func (n *name) pkgPath() *string { off = int(round(uintptr(off), sys.PtrSize)) return *(**string)(unsafe.Pointer(n.data(off))) } + +// typelinksinit scans the types from extra modules and builds the +// moduledata typemap used to de-duplicate type pointers. +func typelinksinit() { + if firstmoduledata.next == nil { + return + } + typehash := make(map[uint32][]*_type) + + modules := []*moduledata{} + for md := &firstmoduledata; md != nil; md = md.next { + modules = append(modules, md) + } + prev, modules := modules[len(modules)-1], modules[:len(modules)-1] + for len(modules) > 0 { + // Collect types from the previous module into typehash. + collect: + for _, tl := range prev.typelinks { + var t *_type + if prev.typemap == nil { + t = (*_type)(unsafe.Pointer(prev.types + uintptr(tl))) + } else { + t = prev.typemap[typeOff(tl)] + } + // Add to typehash if not seen before. + tlist := typehash[t.hash] + for _, tcur := range tlist { + if tcur == t { + continue collect + } + } + typehash[t.hash] = append(tlist, t) + } + + // If any of this module's typelinks match a type from a + // prior module, prefer that prior type by adding the offset + // to this module's typemap. + md := modules[len(modules)-1] + md.typemap = make(map[typeOff]*_type, len(md.typelinks)) + for _, tl := range md.typelinks { + t := (*_type)(unsafe.Pointer(md.types + uintptr(tl))) + for _, candidate := range typehash[t.hash] { + if typesEqual(t, candidate) { + t = candidate + break + } + } + md.typemap[typeOff(tl)] = t + } + + prev, modules = md, modules[:len(modules)-1] + } +} + +// typesEqual reports whether two types are equal. +// +// Everywhere in the runtime and reflect packages, it is assumed that +// there is exactly one *_type per Go type, so that pointer equality +// can be used to test if types are equal. There is one place that +// breaks this assumption: buildmode=shared. In this case a type can +// appear as two different pieces of memory. This is hidden from the +// runtime and reflect package by the per-module typemap built in +// typelinksinit. It uses typesEqual to map types from later modules +// back into earlier ones. +// +// Only typelinksinit needs this function. +func typesEqual(t, v *_type) bool { + if t == v { + return true + } + kind := t.kind & kindMask + if kind != v.kind&kindMask { + return false + } + if t._string != v._string { + return false + } + ut := t.uncommon() + uv := v.uncommon() + if ut != nil || uv != nil { + if ut == nil || uv == nil { + return false + } + if !pkgPathEqual(ut.pkgpath, uv.pkgpath) { + return false + } + } + if kindBool <= kind && kind <= kindComplex128 { + return true + } + switch kind { + case kindString, kindUnsafePointer: + return true + case kindArray: + at := (*arraytype)(unsafe.Pointer(t)) + av := (*arraytype)(unsafe.Pointer(v)) + return typesEqual(at.elem, av.elem) && at.len == av.len + case kindChan: + ct := (*chantype)(unsafe.Pointer(t)) + cv := (*chantype)(unsafe.Pointer(v)) + return ct.dir == cv.dir && typesEqual(ct.elem, cv.elem) + case kindFunc: + ft := (*functype)(unsafe.Pointer(t)) + fv := (*functype)(unsafe.Pointer(v)) + if ft.outCount != fv.outCount || ft.inCount != fv.inCount { + return false + } + tin, vin := ft.in(), fv.in() + for i := 0; i < len(tin); i++ { + if !typesEqual(tin[i], vin[i]) { + return false + } + } + tout, vout := ft.out(), fv.out() + for i := 0; i < len(tout); i++ { + if !typesEqual(tout[i], vout[i]) { + return false + } + } + return true + case kindInterface: + it := (*interfacetype)(unsafe.Pointer(t)) + iv := (*interfacetype)(unsafe.Pointer(v)) + if !pkgPathEqual(it.pkgpath, iv.pkgpath) { + return false + } + if len(it.mhdr) != len(iv.mhdr) { + return false + } + for i := range it.mhdr { + tm := &it.mhdr[i] + vm := &iv.mhdr[i] + if tm.name.name() != vm.name.name() { + return false + } + if !pkgPathEqual(tm.name.pkgPath(), vm.name.pkgPath()) { + return false + } + if !typesEqual(tm._type, vm._type) { + return false + } + } + return true + case kindMap: + mt := (*maptype)(unsafe.Pointer(t)) + mv := (*maptype)(unsafe.Pointer(v)) + return typesEqual(mt.key, mv.key) && typesEqual(mt.elem, mv.elem) + case kindPtr: + pt := (*ptrtype)(unsafe.Pointer(t)) + pv := (*ptrtype)(unsafe.Pointer(v)) + return typesEqual(pt.elem, pv.elem) + case kindSlice: + st := (*slicetype)(unsafe.Pointer(t)) + sv := (*slicetype)(unsafe.Pointer(v)) + return typesEqual(st.elem, sv.elem) + case kindStruct: + st := (*structtype)(unsafe.Pointer(t)) + sv := (*structtype)(unsafe.Pointer(v)) + if len(st.fields) != len(sv.fields) { + return false + } + for i := range st.fields { + tf := &st.fields[i] + vf := &sv.fields[i] + if tf.name.name() != vf.name.name() { + return false + } + if !pkgPathEqual(tf.name.pkgPath(), vf.name.pkgPath()) { + return false + } + if !typesEqual(tf.typ, vf.typ) { + return false + } + if tf.name.tag() != vf.name.tag() { + return false + } + if tf.offset != vf.offset { + return false + } + } + return true + default: + println("runtime: impossible type kind", kind) + throw("runtime: impossible type kind") + return false + } +} + +func pkgPathEqual(p, q *string) bool { + if p == q { + return true + } + if p == nil || q == nil { + return false + } + return *p == *q +} -- cgit v1.3 From 6b85a45edc94786c7669823ee47a6ce1156d6a9a Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 21 Mar 2016 11:32:04 -0400 Subject: cmd/compile: move spills to loop exits when easy. For call-free inner loops. Revised statistics: 85 inner loop spills sunk 341 inner loop spills remaining 1162 inner loop spills that were candidates for sinking ended up completely register allocated 119 inner loop spills could have been sunk were used in "shuffling" at the bottom of the loop. 1 inner loop spill not sunk because the register assigned changed between def and exit, Understanding how to make an inner loop definition not be a candidate for from-memory shuffling (to force the shuffle code to choose some other value) should pick up some of the 119 other spills disqualified for this reason. Modified the stats printing based on feedback from Austin. Change-Id: If3fb9b5d5a028f42ccc36c4e3d9e0da39db5ca60 Reviewed-on: https://go-review.googlesource.com/21037 Reviewed-by: Keith Randall Run-TryBot: David Chase TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/ssa/likelyadjust.go | 142 +++++++++++++- src/cmd/compile/internal/ssa/regalloc.go | 284 ++++++++++++++++++++++++++- src/cmd/compile/internal/ssa/sparsemap.go | 16 ++ 3 files changed, 435 insertions(+), 7 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go index 76251bdd14..2f52c4c6e6 100644 --- a/src/cmd/compile/internal/ssa/likelyadjust.go +++ b/src/cmd/compile/internal/ssa/likelyadjust.go @@ -11,11 +11,24 @@ import ( type loop struct { header *Block // The header node of this (reducible) loop outer *loop // loop containing this loop - // Next two fields not currently used, but cheap to maintain, - // and aid in computation of inner-ness and list of blocks. - nBlocks int32 // Number of blocks in this loop but not within inner loops - isInner bool // True if never discovered to contain a loop - containsCall bool // if any block in this loop or any loop it contains is a BlockCall or BlockDefer + + // By default, children exits, and depth are not initialized. + children []*loop // loops nested directly within this loop. Initialized by assembleChildren(). + exits []*Block // exits records blocks reached by exits from this loop. Initialized by findExits(). + + // Loops aren't that common, so rather than force regalloc to keep + // a map or slice for its data, just put it here. + spills []*Value + scratch int32 + + // Next three fields used by regalloc and/or + // aid in computation of inner-ness and list of blocks. + nBlocks int32 // Number of blocks in this loop but not within inner loops + depth int16 // Nesting depth of the loop; 1 is outermost. Initialized by calculateDepths(). + isInner bool // True if never discovered to contain a loop + + // register allocation uses this. + containsCall bool // if any block in this loop or any loop it contains is a BlockCall or BlockDefer } // outerinner records that outer contains inner @@ -48,6 +61,9 @@ type loopnest struct { po []*Block sdom sparseTree loops []*loop + + // Record which of the lazily initialized fields have actually been initialized. + initializedChildren, initializedDepth, initializedExits bool } func min8(a, b int8) int8 { @@ -295,6 +311,35 @@ func loopnestfor(f *Func) *loopnest { innermost.nBlocks++ } } + + ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops} + + // Curious about the loopiness? "-d=ssa/likelyadjust/stats" + if f.pass.stats > 0 && len(loops) > 0 { + ln.assembleChildren() + ln.calculateDepths() + ln.findExits() + + // Note stats for non-innermost loops are slightly flawed because + // they don't account for inner loop exits that span multiple levels. + + for _, l := range loops { + x := len(l.exits) + cf := 0 + if !l.containsCall { + cf = 1 + } + inner := 0 + if l.isInner { + inner++ + } + + f.logStat("loopstats:", + l.depth, "depth", x, "exits", + inner, "is_inner", cf, "is_callfree", l.nBlocks, "n_blocks") + } + } + if f.pass.debug > 1 && len(loops) > 0 { fmt.Printf("Loops in %s:\n", f.Name) for _, l := range loops { @@ -314,5 +359,90 @@ func loopnestfor(f *Func) *loopnest { } fmt.Print("\n") } - return &loopnest{f, b2l, po, sdom, loops} + return ln +} + +// assembleChildren initializes the children field of each +// loop in the nest. Loop A is a child of loop B if A is +// directly nested within B (based on the reducible-loops +// detection above) +func (ln *loopnest) assembleChildren() { + if ln.initializedChildren { + return + } + for _, l := range ln.loops { + if l.outer != nil { + l.outer.children = append(l.outer.children, l) + } + } + ln.initializedChildren = true +} + +// calculateDepths uses the children field of loops +// to determine the nesting depth (outer=1) of each +// loop. This is helpful for finding exit edges. +func (ln *loopnest) calculateDepths() { + if ln.initializedDepth { + return + } + ln.assembleChildren() + for _, l := range ln.loops { + if l.outer == nil { + l.setDepth(1) + } + } + ln.initializedDepth = true +} + +// findExits uses loop depth information to find the +// exits from a loop. +func (ln *loopnest) findExits() { + if ln.initializedExits { + return + } + ln.calculateDepths() + b2l := ln.b2l + for _, b := range ln.po { + l := b2l[b.ID] + if l != nil && len(b.Succs) == 2 { + sl := b2l[b.Succs[0].ID] + if recordIfExit(l, sl, b.Succs[0]) { + continue + } + sl = b2l[b.Succs[1].ID] + if recordIfExit(l, sl, b.Succs[1]) { + continue + } + } + } + ln.initializedExits = true +} + +// recordIfExit checks sl (the loop containing b) to see if it +// is outside of loop l, and if so, records b as an exit block +// from l and returns true. +func recordIfExit(l, sl *loop, b *Block) bool { + if sl != l { + if sl == nil || sl.depth <= l.depth { + l.exits = append(l.exits, b) + return true + } + // sl is not nil, and is deeper than l + // it's possible for this to be a goto into an irreducible loop made from gotos. + for sl.depth > l.depth { + sl = sl.outer + } + if sl != l { + l.exits = append(l.exits, b) + return true + } + } + return false +} + +func (l *loop) setDepth(d int16) { + l.depth = d + for _, c := range l.children { + c.setDepth(d + 1) + } } diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index dfae8612d6..d1de3646d9 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -91,6 +91,13 @@ // will have no use (so don't run deadcode after regalloc!). // TODO: maybe we should introduce these extra phis? +// Additional not-quite-SSA output occurs when spills are sunk out +// of loops to the targets of exit edges from the loop. Before sinking, +// there is one spill site (one StoreReg) targeting stack slot X, after +// sinking there may be multiple spill sites targeting stack slot X, +// with no phi functions at any join points reachable by the multiple +// spill sites. + package ssa import ( @@ -100,7 +107,8 @@ import ( ) const ( - logSpills = iota + moveSpills = iota + logSpills regDebug stackDebug ) @@ -176,6 +184,7 @@ type valState struct { uses *use // list of uses in this block spill *Value // spilled copy of the Value spillUsed bool + spillUsedShuffle bool // true if used in shuffling, after ordinary uses needReg bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags() rematerializeable bool // cached value of v.rematerializeable() desired register // register we want value to be in, if any @@ -243,6 +252,15 @@ type regAllocState struct { loopnest *loopnest } +type spillToSink struct { + spill *Value // Spill instruction to move (a StoreReg) + dests int32 // Bitmask indicating exit blocks from loop in which spill/val is defined. 1<= 0; i-- { + v := ss.Values[i] + entryCandidates.remove(v.ID) // Cannot be an issue, only keeps the sets smaller. + for _, a := range v.Args { + if s.isLoopSpillCandidate(loop, a) { + entryCandidates.setBit(a.ID, uint(whichExit)) + } + } + } + } + + for _, e := range loop.spills { + whichblocks := entryCandidates.get(e.ID) + oldSpill := s.values[e.ID].spill + if whichblocks != 0 && whichblocks != -1 { // -1 = not in map. + toSink = append(toSink, spillToSink{spill: oldSpill, dests: whichblocks}) + } + } + + } // loop is inner etc + loop.scratch = 0 // Don't leave a mess, just in case. + loop.spills = nil + } // if scratch == nBlocks + } // if loop is not nil + // Clear any final uses. // All that is left should be the pseudo-uses added for values which // are live at the end of b. @@ -1110,9 +1241,16 @@ func (s *regAllocState) regalloc(f *Func) { // Constants, SP, SB, ... continue } + loop := s.loopForBlock(spill.Block) + if loop != nil { + nSpillsInner-- + } + spill.Args[0].Uses-- f.freeValue(spill) + nSpills-- } + for _, b := range f.Blocks { i := 0 for _, v := range b.Values { @@ -1127,12 +1265,153 @@ func (s *regAllocState) regalloc(f *Func) { // Not important now because this is the last phase that manipulates Values } + // Must clear these out before any potential recycling, though that's + // not currently implemented. + for i, ts := range toSink { + vsp := ts.spill + if vsp.Op == OpInvalid { // This spill was completely eliminated + toSink[i].spill = nil + } + } + // Anything that didn't get a register gets a stack location here. // (StoreReg, stack-based phis, inputs, ...) stacklive := stackalloc(s.f, s.spillLive) // Fix up all merge edges. s.shuffle(stacklive) + + // Insert moved spills (that have not been marked invalid above) + // at start of appropriate block and remove the originals from their + // location within loops. Notice that this can break SSA form; + // if a spill is sunk to multiple exits, there will be no phi for that + // spill at a join point downstream of those two exits, though the + // two spills will target the same stack slot. Notice also that this + // takes place after stack allocation, so the stack allocator does + // not need to process these malformed flow graphs. +sinking: + for _, ts := range toSink { + vsp := ts.spill + if vsp == nil { // This spill was completely eliminated + nSpillsSunkUnused++ + continue sinking + } + e := ts.spilledValue() + if s.values[e.ID].spillUsedShuffle { + nSpillsNotSunkLateUse++ + continue sinking + } + + // move spills to a better (outside of loop) block. + // This would be costly if it occurred very often, but it doesn't. + b := vsp.Block + loop := s.loopnest.b2l[b.ID] + dests := ts.dests + + // Pre-check to be sure that spilled value is still in expected register on all exits where live. + check_val_still_in_reg: + for i := uint(0); i < 32 && dests != 0; i++ { + + if dests&(1< 1 { + panic("Should be impossible given critical edges removed") + } + p := d.Preds[0] // block in loop exiting to d. + + endregs := s.endRegs[p.ID] + for _, regrec := range endregs { + if regrec.v == e && regrec.r != noRegister && regrec.c == e { // TODO: regrec.c != e implies different spill possible. + continue check_val_still_in_reg + } + } + // If here, the register assignment was lost down at least one exit and it can't be sunk + if s.f.pass.debug > moveSpills { + s.f.Config.Warnl(e.Line, "lost register assignment for spill %v in %v at exit %v to %v", + vsp, b, p, d) + } + nSpillsChanged++ + continue sinking + } + + nSpillsSunk++ + nSpillsInner-- + // don't update nSpills, since spill is only moved, and if it is duplicated, the spills-on-a-path is not increased. + + dests = ts.dests + + // remove vsp from b.Values + i := 0 + for _, w := range b.Values { + if vsp == w { + continue + } + b.Values[i] = w + i++ + } + b.Values = b.Values[:i] + + for i := uint(0); i < 32 && dests != 0; i++ { + + if dests&(1< moveSpills { + s.f.Config.Warnl(e.Line, "moved spill %v in %v for %v to %v in %v", + vsp, b, e, vspnew, d) + } + + f.setHome(vspnew, f.getHome(vsp.ID)) // copy stack home + + // shuffle vspnew to the beginning of its block + copy(d.Values[1:], d.Values[0:len(d.Values)-1]) + d.Values[0] = vspnew + } + } + + if f.pass.stats > 0 { + f.logStat("spills_info", + nSpills, "spills", nSpillsInner, "inner_spills_remaining", nSpillsSunk, "inner_spills_sunk", nSpillsSunkUnused, "inner_spills_unused", nSpillsNotSunkLateUse, "inner_spills_shuffled", nSpillsChanged, "inner_spills_changed") + } +} + +// isLoopSpillCandidate indicates whether the spill for v satisfies preliminary +// spill-sinking conditions just after the last block of loop has been processed. +// In particular: +// v needs a register. +// v's spill is not (YET) used. +// v's definition is within loop. +// The spill may be used in the future, either by an outright use +// in the code, or by shuffling code inserted after stack allocation. +// Outright uses cause sinking; shuffling (within the loop) inhibits it. +func (s *regAllocState) isLoopSpillCandidate(loop *loop, v *Value) bool { + return s.values[v.ID].needReg && !s.values[v.ID].spillUsed && s.loopnest.b2l[v.Block.ID] == loop +} + +// lateSpillUse notes a late (after stack allocation) use of spill c +// This will inhibit spill sinking. +func (s *regAllocState) lateSpillUse(c *Value) { + // TODO investigate why this is necessary. + // It appears that an outside-the-loop use of + // an otherwise sinkable spill makes the spill + // a candidate for shuffling, when it would not + // otherwise have been the case (spillUsed was not + // true when isLoopSpillCandidate was called, yet + // it was shuffled). Such shuffling cuts the amount + // of spill sinking by more than half (in make.bash) + v := s.orig[c.ID] + if v != nil { + s.values[v.ID].spillUsedShuffle = true + } } // shuffle fixes up all the merge edges (those going into blocks of indegree > 1). @@ -1307,6 +1586,7 @@ func (e *edgeState) process() { if _, isReg := loc.(*Register); isReg { c = e.p.NewValue1(c.Line, OpCopy, c.Type, c) } else { + e.s.lateSpillUse(c) c = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c) } e.set(r, vid, c, false) @@ -1395,6 +1675,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool { } } else { if dstReg { + e.s.lateSpillUse(c) x = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c) } else { // mem->mem. Use temp register. @@ -1412,6 +1693,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool { e.erase(loc) r := e.findRegFor(c.Type) + e.s.lateSpillUse(c) t := e.p.NewValue1(c.Line, OpLoadReg, c.Type, c) e.set(r, vid, t, false) x = e.p.NewValue1(c.Line, OpStoreReg, loc.(LocalSlot).Type, t) diff --git a/src/cmd/compile/internal/ssa/sparsemap.go b/src/cmd/compile/internal/ssa/sparsemap.go index 6c0043b230..0211a70f09 100644 --- a/src/cmd/compile/internal/ssa/sparsemap.go +++ b/src/cmd/compile/internal/ssa/sparsemap.go @@ -32,6 +32,8 @@ func (s *sparseMap) contains(k ID) bool { return i < len(s.dense) && s.dense[i].key == k } +// get returns the value for key k, or -1 if k does +// not appear in the map. func (s *sparseMap) get(k ID) int32 { i := s.sparse[k] if i < len(s.dense) && s.dense[i].key == k { @@ -50,6 +52,20 @@ func (s *sparseMap) set(k ID, v int32) { s.sparse[k] = len(s.dense) - 1 } +// setBit sets the v'th bit of k's value, where 0 <= v < 32 +func (s *sparseMap) setBit(k ID, v uint) { + if v >= 32 { + panic("bit index too large.") + } + i := s.sparse[k] + if i < len(s.dense) && s.dense[i].key == k { + s.dense[i].val |= 1 << v + return + } + s.dense = append(s.dense, sparseEntry{k, 1 << v}) + s.sparse[k] = len(s.dense) - 1 +} + func (s *sparseMap) remove(k ID) { i := s.sparse[k] if i < len(s.dense) && s.dense[i].key == k { -- cgit v1.3 From 7d0d1222477ce50736ee24adb38c1f487d0801d9 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Tue, 12 Apr 2016 18:00:04 -0700 Subject: cmd/compile: move more compiler specifics into compiler specific export section Instead of indicating with each function signature if it has an inlineable body, collect all functions in order and export function bodies with function index in platform-specific section. Moves this compiler specific information out of the platform-independent export data section, and removes an int value for all functions w/o body. Also simplifies the code a bit. Change-Id: I8b2d7299dbe81f2706be49ecfb9d9f7da85fd854 Reviewed-on: https://go-review.googlesource.com/21939 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 63 +++++++++++--------- src/cmd/compile/internal/gc/bimport.go | 104 ++++++++++++++------------------- src/go/internal/gcimporter/bimport.go | 2 - 3 files changed, 80 insertions(+), 89 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index cb438d7573..e780bcf577 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -124,10 +124,11 @@ const exportVersion = "v0" const exportInlined = true // default: true type exporter struct { - out *bufio.Writer - pkgIndex map[*Pkg]int - typIndex map[*Type]int - inlined []*Func + out *bufio.Writer + + pkgIndex map[*Pkg]int // pkg -> pkg index in order of appearance + typIndex map[*Type]int // type -> type index in order of appearance + funcList []*Func // in order of appearance // debugging support written int // bytes written @@ -322,27 +323,39 @@ func export(out *bufio.Writer, trace bool) int { // --- inlined function bodies --- if p.trace { - p.tracef("\n--- inlined function bodies ---\n[ ") + p.tracef("\n--- inlined function bodies ---\n") if p.indent != 0 { Fatalf("exporter: incorrect indentation") } } - // write inlined function bodies - p.int(len(p.inlined)) - if p.trace { - p.tracef("]\n") - } - for _, f := range p.inlined { - if p.trace { - p.tracef("\n----\nfunc { %s }\n", Hconv(f.Inl, FmtSharp)) - } - p.stmtList(f.Inl) - if p.trace { - p.tracef("\n") + // write inlineable function bodies + objcount = 0 + for i, f := range p.funcList { + if f != nil { + // function has inlineable body: + // write index and body + if p.trace { + p.tracef("\n----\nfunc { %s }\n", Hconv(f.Inl, FmtSharp)) + } + p.int(i) + p.stmtList(f.Inl) + if p.trace { + p.tracef("\n") + } + objcount++ } } + // indicate end of list + if p.trace { + p.tracef("\n") + } + p.tag(-1) // invalid index terminates list + + // for self-verification only (redundant) + p.int(objcount) + if p.trace { p.tracef("\n--- end ---\n") } @@ -443,10 +456,9 @@ func (p *exporter) obj(sym *Sym) { p.paramList(sig.Params(), inlineable) p.paramList(sig.Results(), inlineable) - index := -1 + var f *Func if inlineable { - index = len(p.inlined) - p.inlined = append(p.inlined, sym.Def.Func) + f = sym.Def.Func // TODO(gri) re-examine reexportdeplist: // Because we can trivially export types // in-place, we don't need to collect types @@ -454,9 +466,9 @@ func (p *exporter) obj(sym *Sym) { // With an adjusted reexportdeplist used only // by the binary exporter, we can also avoid // the global exportlist. - reexportdeplist(sym.Def.Func.Inl) + reexportdeplist(f.Inl) } - p.int(index) + p.funcList = append(p.funcList, f) } else { // variable p.tag(varTag) @@ -563,13 +575,12 @@ func (p *exporter) typ(t *Type) { p.paramList(sig.Params(), inlineable) p.paramList(sig.Results(), inlineable) - index := -1 + var f *Func if inlineable { - index = len(p.inlined) - p.inlined = append(p.inlined, mfn.Func) + f = mfn.Func reexportdeplist(mfn.Func.Inl) } - p.int(index) + p.funcList = append(p.funcList, f) } if p.trace && len(methods) > 0 { diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 9cebafcaef..2e80b9f81d 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -23,9 +23,10 @@ type importer struct { in *bufio.Reader buf []byte // for reading strings bufarray [64]byte // initial underlying array for buf, large enough to avoid allocation when compiling std lib - pkgList []*Pkg - typList []*Type - inlined []*Node // functions with pending inlined function bodies + + pkgList []*Pkg // in order of appearance + typList []*Type // in order of appearance + funcList []*Node // in order of appearance; nil entry means already declared // debugging support debugFormat bool @@ -107,21 +108,35 @@ func Import(in *bufio.Reader) { Fatalf("importer: got %d objects; want %d", objcount, count) } - // read inlined functions bodies + // read inlineable functions bodies if dclcontext != PEXTERN { Fatalf("importer: unexpected context %d", dclcontext) } - bcount := p.int() // consistency check only - if bcount != len(p.inlined) { - Fatalf("importer: expected %d inlined function bodies; got %d", bcount, len(p.inlined)) - } - for _, f := range p.inlined { + objcount = 0 + for i0 := -1; ; { + i := p.int() // index of function with inlineable body + if i < 0 { + break + } + + // don't process the same function twice + if i <= i0 { + Fatalf("importer: index not increasing: %d <= %d", i, i0) + } + i0 = i + if Funcdepth != 0 { Fatalf("importer: unexpected Funcdepth %d", Funcdepth) } - if f != nil { - // function body not yet imported - read body and set it + + // Note: In the original code, funchdr and funcbody are called for + // all functions (that were not yet imported). Now, we are calling + // them only for functions with inlineable bodies. funchdr does + // parameter renaming which doesn't matter if we don't have a body. + + if f := p.funcList[i]; f != nil { + // function not yet imported - read body and set it funchdr(f) f.Func.Inl.Set(p.stmtList()) funcbody(f) @@ -131,6 +146,13 @@ func Import(in *bufio.Reader) { p.stmtList() dclcontext = PEXTERN } + + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + Fatalf("importer: got %d functions; want %d", objcount, count) } if dclcontext != PEXTERN { @@ -214,47 +236,23 @@ func (p *importer) obj(tag int) { sym := p.qualifiedName() params := p.paramList() result := p.paramList() - inl := p.int() sig := functype(nil, params, result) importsym(sym, ONAME) if sym.Def != nil && sym.Def.Op == ONAME { - if Eqtype(sig, sym.Def.Type) { - // function was imported before (via another import) - dclcontext = PDISCARD // since we skip funchdr below - } else { + // function was imported before (via another import) + if !Eqtype(sig, sym.Def.Type) { Fatalf("importer: inconsistent definition for func %v during import\n\t%v\n\t%v", sym, sym.Def.Type, sig) } - } - - var n *Node - if dclcontext != PDISCARD { - n = newfuncname(sym) - n.Type = sig - declare(n, PFUNC) - if inl < 0 { - funchdr(n) - } - } - - if inl >= 0 { - // function has inlined body - collect for later - if inl != len(p.inlined) { - Fatalf("importer: inlined index = %d; want %d", inl, len(p.inlined)) - } - p.inlined = append(p.inlined, n) - } - - // parser.go:hidden_import - if dclcontext == PDISCARD { - dclcontext = PEXTERN // since we skip the funcbody below + p.funcList = append(p.funcList, nil) break } - if inl < 0 { - funcbody(n) - } - importlist = append(importlist, n) // TODO(gri) may only be needed for inlineable functions + n := newfuncname(sym) + n.Type = sig + declare(n, PFUNC) + p.funcList = append(p.funcList, n) + importlist = append(importlist, n) if Debug['E'] > 0 { fmt.Printf("import [%q] func %v \n", importpkg.Path, n) @@ -316,23 +314,13 @@ func (p *importer) typ() *Type { recv := p.paramList() // TODO(gri) do we need a full param list for the receiver? params := p.paramList() result := p.paramList() - inl := p.int() n := methodname1(newname(sym), recv[0].Right) n.Type = functype(recv[0], params, result) checkwidth(n.Type) addmethod(sym, n.Type, tsym.Pkg, false, false) - if inl < 0 { - funchdr(n) - } - - if inl >= 0 { - // method has inlined body - collect for later - if inl != len(p.inlined) { - Fatalf("importer: inlined index = %d; want %d", inl, len(p.inlined)) - } - p.inlined = append(p.inlined, n) - } + p.funcList = append(p.funcList, n) + importlist = append(importlist, n) // (comment from parser.go) // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as @@ -341,12 +329,6 @@ func (p *importer) typ() *Type { // this back link here we avoid special casing there. n.Type.SetNname(n) - // parser.go:hidden_import - if inl < 0 { - funcbody(n) - } - importlist = append(importlist, n) // TODO(gri) may only be needed for inlineable functions - if Debug['E'] > 0 { fmt.Printf("import [%q] meth %v \n", importpkg.Path, n) if Debug['m'] > 2 && len(n.Func.Inl.Slice()) != 0 { diff --git a/src/go/internal/gcimporter/bimport.go b/src/go/internal/gcimporter/bimport.go index a9d678b021..81af064b88 100644 --- a/src/go/internal/gcimporter/bimport.go +++ b/src/go/internal/gcimporter/bimport.go @@ -186,7 +186,6 @@ func (p *importer) obj(tag int) { params, isddd := p.paramList() result, _ := p.paramList() sig := types.NewSignature(nil, params, result, isddd) - p.int() // read and discard index of inlined function body p.declare(types.NewFunc(token.NoPos, pkg, name, sig)) default: @@ -269,7 +268,6 @@ func (p *importer) typ(parent *types.Package) types.Type { recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? params, isddd := p.paramList() result, _ := p.paramList() - p.int() // read and discard index of inlined function body sig := types.NewSignature(recv.At(0), params, result, isddd) t0.AddMethod(types.NewFunc(token.NoPos, parent, name, sig)) -- cgit v1.3 From eb79f21c48915454b372de7fee2c6b86d52ea0bc Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Tue, 12 Apr 2016 21:58:44 -0700 Subject: cmd/compile, go/importer: minor cleanups Change-Id: Ic7a1fb0dbbf108052c970a4a830269a5673df7df Reviewed-on: https://go-review.googlesource.com/21963 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 5 ++-- src/cmd/compile/internal/gc/bimport.go | 10 +++++--- src/go/internal/gcimporter/bimport.go | 46 ++++++++++++++-------------------- 3 files changed, 27 insertions(+), 34 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index e780bcf577..59a85c2f23 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -59,9 +59,8 @@ Encoding format: The export data starts with a single byte indicating the encoding format (compact, or with debugging information), followed by a version string -(so we can evolve the encoding if need be), the name of the imported -package, and a string containing platform-specific information for that -package. +(so we can evolve the encoding if need be), and then the package object +for the exported package (with an empty path). After this header, two lists of objects and the list of inlined function bodies follows. diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 2e80b9f81d..4a93b5a91d 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -59,9 +59,6 @@ func Import(in *bufio.Reader) { // read package data p.pkg() - if p.pkgList[0] != importpkg { - Fatalf("importer: imported package not found in pkgList[0]") - } // defer some type-checking until all types are read in completely // (parser.go:import_package) @@ -193,7 +190,12 @@ func (p *importer) pkg() *Pkg { Fatalf("importer: bad path in import: %q", path) } - // an empty path denotes the package we are currently importing + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + panic(fmt.Sprintf("package path %q for pkg index %d", path, len(p.pkgList))) + } + pkg := importpkg if path != "" { pkg = mkpkg(path) diff --git a/src/go/internal/gcimporter/bimport.go b/src/go/internal/gcimporter/bimport.go index 81af064b88..7a7bc871f4 100644 --- a/src/go/internal/gcimporter/bimport.go +++ b/src/go/internal/gcimporter/bimport.go @@ -16,12 +16,15 @@ import ( ) type importer struct { - imports map[string]*types.Package - data []byte + imports map[string]*types.Package + data []byte + path string + buf []byte // for reading strings bufarray [64]byte // initial underlying array for buf, large enough to avoid allocation when compiling std lib - pkgList []*types.Package - typList []types.Type + + pkgList []*types.Package + typList []types.Type debugFormat bool read int // bytes read @@ -35,6 +38,7 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i p := importer{ imports: imports, data: data, + path: path, } p.buf = p.bufarray[:] @@ -58,25 +62,7 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i p.typList = append(p.typList, predeclared...) // read package data - // TODO(gri) clean this up - i := p.tagOrIndex() - if i != packageTag { - panic(fmt.Sprintf("package tag expected, got %d", i)) - } - name := p.string() - if s := p.string(); s != "" { - panic(fmt.Sprintf("empty path expected, got %s", s)) - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } - p.pkgList = append(p.pkgList, pkg) - - if debug && p.pkgList[0] != pkg { - panic("imported packaged not found in pkgList[0]") - } + pkg := p.pkg() // read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go) objcount := 0 @@ -91,7 +77,7 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i // self-verification if count := p.int(); count != objcount { - panic(fmt.Sprintf("importer: got %d objects; want %d", objcount, count)) + panic(fmt.Sprintf("got %d objects; want %d", objcount, count)) } // ignore compiler-specific import data @@ -135,16 +121,22 @@ func (p *importer) pkg() *types.Package { panic("empty package name in import") } - // we should never see an empty import path - if path == "" { - panic("empty import path") + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + panic(fmt.Sprintf("package path %q for pkg index %d", path, len(p.pkgList))) } // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.path + } pkg := p.imports[path] if pkg == nil { pkg = types.NewPackage(path, name) p.imports[path] = pkg + } else if pkg.Name() != name { + panic(fmt.Sprintf("conflicting names %s and %s for package %q", pkg.Name(), name, path)) } p.pkgList = append(p.pkgList, pkg) -- cgit v1.3 From 3ea7cfabbb0549d62d524e4ad30cb464af250fde Mon Sep 17 00:00:00 2001 From: Todd Neal Date: Wed, 13 Apr 2016 08:51:46 -0400 Subject: cmd/compile: sort partitions by dom to speed up cse We do two O(n) scans of all values in an eqclass when computing substitutions for CSE. In unfortunate cases, like those found in #15112, we can have a large eqclass composed of values found in blocks none of whom dominate the other. This leads to O(n^2) behavior. The elements are removed one at a time, with O(n) scans each time. This CL removes the linear scan by sorting the eqclass so that dominant values will be sorted first. As long as we also ensure we don't disturb the sort order, then we no longer need to scan for the maximally dominant value. For the code in issue #15112: Before: real 1m26.094s user 1m30.776s sys 0m1.125s Aefter: real 0m52.099s user 0m56.829s sys 0m1.092s Updates #15112 Change-Id: Ic4f8680ed172e716232436d31963209c146ef850 Reviewed-on: https://go-review.googlesource.com/21981 Reviewed-by: Josh Bleecher Snyder Run-TryBot: Todd Neal TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/ssa/cse.go | 32 ++++++++++++++++++++---------- src/cmd/compile/internal/ssa/sparsetree.go | 12 +++++++++++ 2 files changed, 33 insertions(+), 11 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go index c12d51e50c..76db9d5467 100644 --- a/src/cmd/compile/internal/ssa/cse.go +++ b/src/cmd/compile/internal/ssa/cse.go @@ -137,23 +137,20 @@ func cse(f *Func) { // if v and w are in the same equivalence class and v dominates w. rewrite := make([]*Value, f.NumValues()) for _, e := range partition { + sort.Sort(sortbyentry{e, f.sdom}) for len(e) > 1 { - // Find a maximal dominant element in e + // e is sorted by entry value so maximal dominant element should be + // found first in the slice v := e[0] - for _, w := range e[1:] { - if f.sdom.isAncestorEq(w.Block, v.Block) { - v = w - } - } - + e = e[1:] // Replace all elements of e which v dominates for i := 0; i < len(e); { w := e[i] - if w == v { - e, e[i] = e[:len(e)-1], e[len(e)-1] - } else if f.sdom.isAncestorEq(v.Block, w.Block) { + if f.sdom.isAncestorEq(v.Block, w.Block) { rewrite[w.ID] = v - e, e[i] = e[:len(e)-1], e[len(e)-1] + // retain the sort order + copy(e[i:], e[i+1:]) + e = e[:len(e)-1] } else { i++ } @@ -308,3 +305,16 @@ func (sv sortvalues) Less(i, j int) bool { // Sort by value ID last to keep the sort result deterministic. return v.ID < w.ID } + +type sortbyentry struct { + a []*Value // array of values + sdom sparseTree +} + +func (sv sortbyentry) Len() int { return len(sv.a) } +func (sv sortbyentry) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] } +func (sv sortbyentry) Less(i, j int) bool { + v := sv.a[i] + w := sv.a[j] + return sv.sdom.maxdomorder(v.Block) < sv.sdom.maxdomorder(w.Block) +} diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go index cae91e7ddb..45c7897496 100644 --- a/src/cmd/compile/internal/ssa/sparsetree.go +++ b/src/cmd/compile/internal/ssa/sparsetree.go @@ -116,6 +116,9 @@ func (t sparseTree) Child(x *Block) *Block { // isAncestorEq reports whether x is an ancestor of or equal to y. func (t sparseTree) isAncestorEq(x, y *Block) bool { + if x == y { + return true + } xx := &t[x.ID] yy := &t[y.ID] return xx.entry <= yy.entry && yy.exit <= xx.exit @@ -123,7 +126,16 @@ func (t sparseTree) isAncestorEq(x, y *Block) bool { // isAncestor reports whether x is a strict ancestor of y. func (t sparseTree) isAncestor(x, y *Block) bool { + if x == y { + return false + } xx := &t[x.ID] yy := &t[y.ID] return xx.entry < yy.entry && yy.exit < xx.exit } + +// maxdomorder returns a value to allow a maximal dominator first sort. maxdomorder(x) < maxdomorder(y) is true +// if x may dominate y, and false if x cannot dominate y. +func (t sparseTree) maxdomorder(x *Block) int32 { + return t[x.ID].entry +} -- cgit v1.3 From f120936dfffa3ac935730699587e6957f2d5ea61 Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Thu, 31 Mar 2016 10:02:10 -0400 Subject: cmd/compile, etc: use name for type pkgPath By replacing the *string used to represent pkgPath with a reflect.name everywhere, the embedded *string for package paths inside the reflect.name can be replaced by an offset, nameOff. This reduces the number of pointers in the type information. This also moves all reflect.name types into the same section, making it possible to use nameOff more widely in later CLs. No significant binary size change for normal binaries, but: linux/amd64 PIE: cmd/go: -440KB (3.7%) jujud: -2.6MB (3.2%) For #6853. Change-Id: I3890b132a784a1090b1b72b32febfe0bea77eaee Reviewed-on: https://go-review.googlesource.com/21395 Run-TryBot: David Crawshaw TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/cmd/compile/internal/gc/go.go | 2 +- src/cmd/compile/internal/gc/reflect.go | 117 +++++++++++++++++++-------------- src/cmd/internal/obj/data.go | 13 +++- src/reflect/type.go | 64 +++++++++--------- src/runtime/heapdump.go | 5 +- src/runtime/iface.go | 8 +-- src/runtime/type.go | 72 ++++++++++++-------- 7 files changed, 168 insertions(+), 113 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 5df49b56d6..8411d2d0ac 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -20,7 +20,7 @@ const ( type Pkg struct { Name string // package name, e.g. "sys" Path string // string literal used in import statement, e.g. "runtime/internal/sys" - Pathsym *Sym + Pathsym *obj.LSym Prefix string // escaped path for use in symbol table Imported bool // export data of this package was parsed Exported bool // import line written in export data diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 2bd50b4665..70a75f9324 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -412,8 +412,6 @@ func imethods(t *Type) []*Sig { return methods } -var dimportpath_gopkg *Pkg - func dimportpath(p *Pkg) { if p.Pathsym != nil { return @@ -426,27 +424,18 @@ func dimportpath(p *Pkg) { return } - if dimportpath_gopkg == nil { - dimportpath_gopkg = mkpkg("go") - dimportpath_gopkg.Name = "go" - } - - nam := "importpath." + p.Prefix + "." - - n := Nod(ONAME, nil, nil) - n.Sym = Pkglookup(nam, dimportpath_gopkg) - - n.Class = PEXTERN - n.Xoffset = 0 - p.Pathsym = n.Sym - + var str string if p == localpkg { // Note: myimportpath != "", or else dgopkgpath won't call dimportpath. - gdatastring(n, myimportpath) + str = myimportpath } else { - gdatastring(n, p.Path) + str = p.Path } - ggloblsym(n.Sym, int32(Types[TSTRING].Width), obj.DUPOK|obj.RODATA) + + s := obj.Linklookup(Ctxt, "go.importpath."+p.Prefix+".", 0) + ot := dnameData(s, 0, str, "", nil, false) + ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) + p.Pathsym = s } func dgopkgpath(s *Sym, ot int, pkg *Pkg) int { @@ -469,7 +458,23 @@ func dgopkgpathLSym(s *obj.LSym, ot int, pkg *Pkg) int { } dimportpath(pkg) - return dsymptrLSym(s, ot, Linksym(pkg.Pathsym), 0) + return dsymptrLSym(s, ot, pkg.Pathsym, 0) +} + +// dgopkgpathOffLSym writes an offset relocation in s at offset ot to the pkg path symbol. +func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *Pkg) int { + if pkg == localpkg && myimportpath == "" { + // If we don't know the full import path of the package being compiled + // (i.e. -p was not passed on the compiler command line), emit a reference to + // go.importpath.""., which the linker will rewrite using the correct import path. + // Every package that imports this one directly defines the symbol. + // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. + ns := obj.Linklookup(Ctxt, `go.importpath."".`, 0) + return dsymptrOffLSym(s, ot, ns, 0) + } + + dimportpath(pkg) + return dsymptrOffLSym(s, ot, pkg.Pathsym, 0) } // isExportedField reports whether a struct field is exported. @@ -495,13 +500,12 @@ func dnameField(s *Sym, ot int, ft *Field) int { if ft.Note != nil { tag = *ft.Note } - return dname(s, ot, name, tag, nil, isExportedField(ft)) + nsym := dname(name, tag, nil, isExportedField(ft)) + return dsymptrLSym(Linksym(s), ot, nsym, 0) } -var dnameCount int - -// dname dumps a reflect.name for a struct field or method. -func dname(s *Sym, ot int, name, tag string, pkg *Pkg, exported bool) int { +// dnameData writes the contents of a reflect.name into s at offset ot. +func dnameData(s *obj.LSym, ot int, name, tag string, pkg *Pkg, exported bool) int { if len(name) > 1<<16-1 { Fatalf("name too long: %s", name) } @@ -534,31 +538,46 @@ func dname(s *Sym, ot int, name, tag string, pkg *Pkg, exported bool) int { copy(tb[2:], tag) } - // Very few names require a pkgPath *string (only those - // defined in a different package than their type). So if - // there is no pkgPath, we treat the name contents as string - // data that duplicates across packages. - var bsym *obj.LSym + ot = int(s.WriteBytes(Ctxt, int64(ot), b)) + + if pkg != nil { + ot = dgopkgpathOffLSym(s, ot, pkg) + } + + return ot +} + +var dnameCount int + +// dname creates a reflect.name for a struct field or method. +func dname(name, tag string, pkg *Pkg, exported bool) *obj.LSym { + // Write out data as "type.." to signal two things to the + // linker, first that when dynamically linking, the symbol + // should be moved to a relro section, and second that the + // contents should not be decoded as a type. + sname := "type..namedata." if pkg == nil { - _, bsym = stringsym(string(b)) + // In the common case, share data with other packages. + if name == "" { + if exported { + sname += "-noname-exported." + tag + } else { + sname += "-noname-unexported." + tag + } + } else { + sname += name + "." + tag + } } else { - // Write out data as "type.." to signal two things to the - // linker, first that when dynamically linking, the symbol - // should be moved to a relro section, and second that the - // contents should not be decoded as a type. - bsymname := fmt.Sprintf(`type..methodname."".%d`, dnameCount) + sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount) dnameCount++ - bsym = obj.Linklookup(Ctxt, bsymname, 0) - bsym.P = b - boff := len(b) - boff = int(Rnd(int64(boff), int64(Widthptr))) - boff = dgopkgpathLSym(bsym, boff, pkg) - ggloblLSym(bsym, int32(boff), obj.RODATA|obj.LOCAL) } - - ot = dsymptrLSym(Linksym(s), ot, bsym, 0) - - return ot + s := obj.Linklookup(Ctxt, sname, 0) + if len(s.P) > 0 { + return s + } + ot := dnameData(s, 0, name, tag, pkg, exported) + ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) + return s } // dextratype dumps the fields of a runtime.uncommontype. @@ -627,7 +646,8 @@ func dextratypeData(s *Sym, ot int, t *Type) int { if !exported && a.pkg != typePkg(t) { pkg = a.pkg } - ot = dname(s, ot, a.name, "", pkg, exported) + nsym := dname(a.name, "", pkg, exported) + ot = dsymptrLSym(lsym, ot, nsym, 0) ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype))) ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym)) ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym)) @@ -1213,7 +1233,8 @@ ok: if !exported && a.pkg != tpkg { pkg = a.pkg } - ot = dname(s, ot, a.name, "", pkg, exported) + nsym := dname(a.name, "", pkg, exported) + ot = dsymptrLSym(Linksym(s), ot, nsym, 0) ot = dsymptr(s, ot, dtypesym(a.type_), 0) } diff --git a/src/cmd/internal/obj/data.go b/src/cmd/internal/obj/data.go index 546ff37269..d7f0840bc1 100644 --- a/src/cmd/internal/obj/data.go +++ b/src/cmd/internal/obj/data.go @@ -75,7 +75,11 @@ func (s *LSym) prepwrite(ctxt *Link, off int64, siz int) { if s.Type == SBSS || s.Type == STLSBSS { ctxt.Diag("cannot supply data for BSS var") } - s.Grow(off + int64(siz)) + l := off + int64(siz) + s.Grow(l) + if l > s.Size { + s.Size = l + } } // WriteFloat32 writes f into s at offset off. @@ -150,6 +154,13 @@ func (s *LSym) WriteString(ctxt *Link, off int64, siz int, str string) { copy(s.P[off:off+int64(siz)], str) } +// WriteBytes writes a slice of bytes into s at offset off. +func (s *LSym) WriteBytes(ctxt *Link, off int64, b []byte) int64 { + s.prepwrite(ctxt, off, len(b)) + copy(s.P[off:], b) + return off + int64(len(b)) +} + func Addrel(s *LSym) *Reloc { s.R = append(s.R, Reloc{}) return &s.R[len(s.R)-1] diff --git a/src/reflect/type.go b/src/reflect/type.go index c7ed402be2..3c7affcd7f 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -299,9 +299,9 @@ type method struct { // Using a pointer to this struct reduces the overall size required // to describe an unnamed type with no methods. type uncommonType struct { - pkgPath *string // import path; nil for built-in types like int, string - mcount uint16 // number of methods - moff uint16 // offset from this uncommontype to [mcount]method + pkgPath name // import path; empty for built-in types like int, string + mcount uint16 // number of methods + moff uint16 // offset from this uncommontype to [mcount]method } // ChanDir represents a channel type's direction. @@ -354,7 +354,7 @@ type imethod struct { // interfaceType represents an interface type. type interfaceType struct { rtype `reflect:"interface"` - pkgPath *string // import path + pkgPath name // import path methods []imethod // sorted by hash } @@ -396,7 +396,7 @@ type structField struct { // structType represents a struct type. type structType struct { rtype `reflect:"struct"` - pkgPath *string + pkgPath name fields []structField // sorted by offset } @@ -406,7 +406,7 @@ type structType struct { // // 1<<0 the name is exported // 1<<1 tag data follows the name -// 1<<2 pkgPath *string follow the name and tag +// 1<<2 pkgPath nameOff follows the name and tag // // The next two bytes are the data length: // @@ -417,10 +417,9 @@ type structType struct { // If tag data follows then bytes 3+l and 3+l+1 are the tag length, // with the data following. // -// If the import path follows, then ptrSize bytes at the end of -// the data form a *string. The pointer is aligned to its width. -// The import path is only set for concrete methods that are defined -// in a different package than their type. +// If the import path follows, then 4 bytes at the end of +// the data form a nameOff. The import path is only set for concrete +// methods that are defined in a different package than their type. type name struct { bytes *byte } @@ -446,6 +445,9 @@ func (n *name) tagLen() int { } func (n *name) name() (s string) { + if n.bytes == nil { + return "" + } nl := n.nameLen() if nl == 0 { return "" @@ -468,16 +470,18 @@ func (n *name) tag() (s string) { return s } -func (n *name) pkgPath() *string { - if *n.data(0)&(1<<2) == 0 { - return nil +func (n *name) pkgPath() string { + if n.bytes == nil || *n.data(0)&(1<<2) == 0 { + return "" } off := 3 + n.nameLen() if tl := n.tagLen(); tl > 0 { off += 2 + tl } - off = int(round(uintptr(off), ptrSize)) - return *(**string)(unsafe.Pointer(n.data(off))) + var nameOff int32 + copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) + pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n), nameOff))} + return pkgPathName.name() } // round n up to a multiple of a. a must be a power of 2. @@ -595,10 +599,10 @@ func (t *uncommonType) methods() []method { } func (t *uncommonType) PkgPath() string { - if t == nil || t.pkgPath == nil { + if t == nil { return "" } - return *t.pkgPath + return t.pkgPath.name() } // resolveTypeOff resolves an *rtype offset from a base type. @@ -752,11 +756,10 @@ func (t *rtype) Method(i int) (m Method) { m.Name = p.name.name() fl := flag(Func) if !p.name.isExported() { - pkgPath := p.name.pkgPath() - if pkgPath == nil { - pkgPath = ut.pkgPath + m.PkgPath = p.name.pkgPath() + if m.PkgPath == "" { + m.PkgPath = ut.pkgPath.name() } - m.PkgPath = *pkgPath fl |= flagStickyRO } if p.mtyp != 0 { @@ -1004,11 +1007,10 @@ func (t *interfaceType) Method(i int) (m Method) { p := &t.methods[i] m.Name = p.name.name() if !p.name.isExported() { - pkgPath := p.name.pkgPath() - if pkgPath == nil { - pkgPath = t.pkgPath + m.PkgPath = p.name.pkgPath() + if m.PkgPath == "" { + m.PkgPath = t.pkgPath.name() } - m.PkgPath = *pkgPath } m.Type = toType(p.typ) m.Index = i @@ -1146,9 +1148,9 @@ func (t *structType) Field(i int) (f StructField) { f.Name = t.Name() f.Anonymous = true } - if t.pkgPath != nil && !p.name.isExported() { + if !p.name.isExported() { // Fields never have an import path in their name. - f.PkgPath = *t.pkgPath + f.PkgPath = t.pkgPath.name() } if tag := p.name.tag(); tag != "" { f.Tag = StructTag(tag) @@ -2325,7 +2327,7 @@ func StructOf(fields []StructField) Type { case Interface: ift := (*interfaceType)(unsafe.Pointer(ft)) for im, m := range ift.methods { - if m.name.pkgPath() != nil { + if m.name.pkgPath() != "" { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } @@ -2384,7 +2386,7 @@ func StructOf(fields []StructField) Type { ptr := (*ptrType)(unsafe.Pointer(ft)) if unt := ptr.uncommon(); unt != nil { for _, m := range unt.methods() { - if m.name.pkgPath() != nil { + if m.name.pkgPath() != "" { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } @@ -2398,7 +2400,7 @@ func StructOf(fields []StructField) Type { } if unt := ptr.elem.uncommon(); unt != nil { for _, m := range unt.methods() { - if m.name.pkgPath() != nil { + if m.name.pkgPath() != "" { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } @@ -2413,7 +2415,7 @@ func StructOf(fields []StructField) Type { default: if unt := ft.uncommon(); unt != nil { for _, m := range unt.methods() { - if m.name.pkgPath() != nil { + if m.name.pkgPath() != "" { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 2410b1954a..adfd660847 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -183,10 +183,11 @@ func dumptype(t *_type) { dumpint(tagType) dumpint(uint64(uintptr(unsafe.Pointer(t)))) dumpint(uint64(t.size)) - if x := t.uncommon(); x == nil || x.pkgpath == nil { + if x := t.uncommon(); x == nil || x.pkgpath.name() == "" { dumpstr(t._string) } else { - pkgpath := stringStructOf(x.pkgpath) + pkgpathstr := x.pkgpath.name() + pkgpath := stringStructOf(&pkgpathstr) namestr := t.name() name := stringStructOf(&namestr) dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len))) diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 700bdc2f48..84f0ee8f0c 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -101,15 +101,15 @@ func additab(m *itab, locked, canfail bool) { iname := i.name.name() itype := i._type ipkg := i.name.pkgPath() - if ipkg == nil { - ipkg = inter.pkgpath + if ipkg == "" { + ipkg = inter.pkgpath.name() } for ; j < nt; j++ { t := &xmhdr[j] if typ.typeOff(t.mtyp) == itype && t.name.name() == iname { pkgPath := t.name.pkgPath() - if pkgPath == nil { - pkgPath = x.pkgpath + if pkgPath == "" { + pkgPath = x.pkgpath.name() } if t.name.isExported() || pkgPath == ipkg { if m != nil { diff --git a/src/runtime/type.go b/src/runtime/type.go index 86131d3ff3..711753bab5 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -6,10 +6,7 @@ package runtime -import ( - "runtime/internal/sys" - "unsafe" -) +import "unsafe" // tflag is documented in ../reflect/type.go. type tflag uint8 @@ -151,6 +148,33 @@ var reflectOffs struct { minv map[unsafe.Pointer]int32 } +func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name { + if off == 0 { + return name{} + } + base := uintptr(ptrInModule) + var md *moduledata + for next := &firstmoduledata; next != nil; next = next.next { + if base >= next.types && base < next.etypes { + md = next + break + } + } + if md == nil { + println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:") + for next := &firstmoduledata; next != nil; next = next.next { + println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) + } + throw("runtime: name offset base pointer out of range") + } + res := md.types + uintptr(off) + if res > md.etypes { + println("runtime: nameOff", hex(off), "out of range", hex(md.types), "-", hex(md.etypes)) + throw("runtime: name offset out of range") + } + return name{(*byte)(unsafe.Pointer(res))} +} + func (t *_type) typeOff(off typeOff) *_type { if off == 0 { return nil @@ -240,6 +264,7 @@ func (t *functype) dotdotdot() bool { return t.outCount&(1<<15) != 0 } +type nameOff int32 type typeOff int32 type textOff int32 @@ -251,7 +276,7 @@ type method struct { } type uncommontype struct { - pkgpath *string + pkgpath name mcount uint16 // number of methods moff uint16 // offset from this uncommontype to [mcount]method } @@ -263,7 +288,7 @@ type imethod struct { type interfacetype struct { typ _type - pkgpath *string + pkgpath name mhdr []imethod } @@ -319,7 +344,7 @@ type structfield struct { type structtype struct { typ _type - pkgPath *string + pkgPath name fields []structfield } @@ -350,6 +375,9 @@ func (n *name) tagLen() int { } func (n *name) name() (s string) { + if n.bytes == nil { + return "" + } nl := n.nameLen() if nl == 0 { return "" @@ -372,16 +400,18 @@ func (n *name) tag() (s string) { return s } -func (n *name) pkgPath() *string { - if *n.data(0)&(1<<2) == 0 { - return nil +func (n *name) pkgPath() string { + if n.bytes == nil || *n.data(0)&(1<<2) == 0 { + return "" } off := 3 + n.nameLen() if tl := n.tagLen(); tl > 0 { off += 2 + tl } - off = int(round(uintptr(off), sys.PtrSize)) - return *(**string)(unsafe.Pointer(n.data(off))) + var nameOff nameOff + copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) + pkgPathName := resolveNameOff(unsafe.Pointer(n.bytes), nameOff) + return pkgPathName.name() } // typelinksinit scans the types from extra modules and builds the @@ -466,7 +496,7 @@ func typesEqual(t, v *_type) bool { if ut == nil || uv == nil { return false } - if !pkgPathEqual(ut.pkgpath, uv.pkgpath) { + if ut.pkgpath.name() != uv.pkgpath.name() { return false } } @@ -506,7 +536,7 @@ func typesEqual(t, v *_type) bool { case kindInterface: it := (*interfacetype)(unsafe.Pointer(t)) iv := (*interfacetype)(unsafe.Pointer(v)) - if !pkgPathEqual(it.pkgpath, iv.pkgpath) { + if it.pkgpath.name() != iv.pkgpath.name() { return false } if len(it.mhdr) != len(iv.mhdr) { @@ -518,7 +548,7 @@ func typesEqual(t, v *_type) bool { if tm.name.name() != vm.name.name() { return false } - if !pkgPathEqual(tm.name.pkgPath(), vm.name.pkgPath()) { + if tm.name.pkgPath() != vm.name.pkgPath() { return false } if !typesEqual(tm._type, vm._type) { @@ -550,7 +580,7 @@ func typesEqual(t, v *_type) bool { if tf.name.name() != vf.name.name() { return false } - if !pkgPathEqual(tf.name.pkgPath(), vf.name.pkgPath()) { + if tf.name.pkgPath() != vf.name.pkgPath() { return false } if !typesEqual(tf.typ, vf.typ) { @@ -570,13 +600,3 @@ func typesEqual(t, v *_type) bool { return false } } - -func pkgPathEqual(p, q *string) bool { - if p == q { - return true - } - if p == nil || q == nil { - return false - } - return *p == *q -} -- cgit v1.3 From 6e5027a37a851eb19dba7dad7ea5a8b43e27b842 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Wed, 13 Apr 2016 13:17:30 -0700 Subject: cmd/compile: don't export unneeded OAS, OASWB nodes Also: - "rewrite" node Op in exporter for some nodes instead of importer - more comments Change-Id: I809e6754d14987b28f1da9379951ffa2e690c2a7 Reviewed-on: https://go-review.googlesource.com/22008 Reviewed-by: Matthew Dempsky Run-TryBot: Robert Griesemer TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/bexport.go | 27 +++++++++++++--------- src/cmd/compile/internal/gc/bimport.go | 41 +++++++++++++++++----------------- 2 files changed, 36 insertions(+), 32 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 59a85c2f23..e0810f9139 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -783,7 +783,11 @@ func (p *exporter) param(q *Field, n int, numbered bool) { // supply the parameter package here. We need the package // when the function is inlined so we can properly resolve // the name. - // TODO(gri) should do this only once per function/method + // TODO(gri) This is compiler-specific. Try using importpkg + // here and then update the symbols if we find an inlined + // body only. Otherwise, the parameter name is ignored and + // the package doesn't matter. This would remove an int + // (likely 1 byte) for each named parameter. p.pkg(q.Sym.Pkg) } // TODO(gri) This is compiler-specific (escape info). @@ -1266,12 +1270,11 @@ func (p *exporter) stmt(n *Node) { // unimplemented - handled by default case case OAS, OASWB: - p.op(op) // Don't export "v = " initializing statements, hope they're always // preceded by the DCL which will be re-parsed and typecheck to reproduce // the "v = " again. - // TODO(gri) if n.Right == nil, don't emit anything - if p.bool(n.Right != nil) { + if n.Right != nil { + p.op(OAS) p.expr(n.Left) p.expr(n.Right) } @@ -1284,16 +1287,14 @@ func (p *exporter) stmt(n *Node) { p.expr(n.Right) } + case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + fallthrough + case OAS2: p.op(OAS2) p.exprList(n.List) p.exprList(n.Rlist) - case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: - p.op(op) - p.exprList(n.List) - p.exprList(n.Rlist) - case ORETURN: p.op(ORETURN) p.exprList(n.List) @@ -1332,11 +1333,15 @@ func (p *exporter) stmt(n *Node) { p.stmtList(n.List) case OCASE, OXCASE: - p.op(op) + p.op(OXCASE) p.stmtList(n.List) p.stmtList(n.Nbody) - case OBREAK, OCONTINUE, OGOTO, OFALL, OXFALL: + case OFALL: + op = OXFALL + fallthrough + + case OBREAK, OCONTINUE, OGOTO, OXFALL: p.op(op) p.exprsOrNil(n.Left, nil) diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 4a93b5a91d..223cc443aa 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -626,6 +626,10 @@ func (p *importer) float(x *Mpflt) { // re-establish the syntax tree's invariants. At some future point we might be // able to avoid this round-about way and create the rewritten nodes directly, // possibly avoiding a lot of duplicate work (name resolution, type checking). +// +// Refined nodes (e.g., ODOTPTR as a refinement of OXDOT) are exported as their +// unrefined nodes (since this is what the importer uses). The respective case +// entries are unreachable in the importer. func (p *importer) stmtList() []*Node { var list []*Node @@ -871,14 +875,11 @@ func (p *importer) node() *Node { // case ODCLFIELD: // unimplemented - case OAS, OASWB: - if p.bool() { - lhs := p.expr() - rhs := p.expr() - return Nod(OAS, lhs, rhs) - } - // TODO(gri) we should not have emitted anything here - return Nod(OEMPTY, nil, nil) + // case OAS, OASWB: + // unreachable - mapped to OAS case below by exporter + + case OAS: + return Nod(OAS, p.expr(), p.expr()) case OASOP: n := Nod(OASOP, nil, nil) @@ -892,15 +893,10 @@ func (p *importer) node() *Node { } return n - case OAS2: - lhs := p.exprList() - rhs := p.exprList() - n := Nod(OAS2, nil, nil) - n.List.Set(lhs) - n.Rlist.Set(rhs) - return n + // case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + // unreachable - mapped to OAS2 case below by exporter - case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV: + case OAS2: n := Nod(OAS2, nil, nil) n.List.Set(p.exprList()) n.Rlist.Set(p.exprList()) @@ -954,7 +950,10 @@ func (p *importer) node() *Node { popdcl() return n - case OCASE, OXCASE: + // case OCASE, OXCASE: + // unreachable - mapped to OXCASE case below by exporter + + case OXCASE: markdcl() n := Nod(OXCASE, nil, nil) n.List.Set(p.exprList()) @@ -964,10 +963,10 @@ func (p *importer) node() *Node { popdcl() return n - case OBREAK, OCONTINUE, OGOTO, OFALL, OXFALL: - if op == OFALL { - op = OXFALL - } + // case OFALL: + // unreachable - mapped to OXFALL case below by exporter + + case OBREAK, OCONTINUE, OGOTO, OXFALL: left, _ := p.exprsOrNil() return Nod(op, left, nil) -- cgit v1.3 From ae9804595879eb07efd23b9c98eab46693573447 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Wed, 13 Apr 2016 16:57:23 -0700 Subject: cmd/compile: use correct export function (fix debugFormat) Tested with debugFormat enabled and running (export GO_GCFLAGS=-newexport; sh all.bash). Change-Id: If7d43e1e594ea43c644232b89e670f7abb6b003e Reviewed-on: https://go-review.googlesource.com/22033 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index e0810f9139..eef2e2200d 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -350,7 +350,7 @@ func export(out *bufio.Writer, trace bool) int { if p.trace { p.tracef("\n") } - p.tag(-1) // invalid index terminates list + p.int(-1) // invalid index terminates list // for self-verification only (redundant) p.int(objcount) -- cgit v1.3 From 980ab12ade53e70d037ab2ab475148b216d84a14 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 13 Apr 2016 18:37:18 -0700 Subject: cmd/compile/internal/gc: change flags to bool where possible Some of the Debug[x] flags are actually boolean too, but not all, so they need to be handled separately. While here, change some obj.Flagstr and obj.Flagint64 calls to directly use flag.StringVar and flag.Int64Var instead. Change-Id: Iccedf6fed4328240ee2257f57fe6d66688f237c4 Reviewed-on: https://go-review.googlesource.com/22052 Reviewed-by: Michael Hudson-Doyle --- src/cmd/compile/internal/gc/alg.go | 5 +- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/closure.go | 2 +- src/cmd/compile/internal/gc/dcl.go | 4 +- src/cmd/compile/internal/gc/export.go | 8 ++-- src/cmd/compile/internal/gc/gen.go | 2 +- src/cmd/compile/internal/gc/go.go | 18 ++++---- src/cmd/compile/internal/gc/inl.go | 4 +- src/cmd/compile/internal/gc/lex.go | 6 +-- src/cmd/compile/internal/gc/main.go | 79 ++++++++++++++++---------------- src/cmd/compile/internal/gc/obj.go | 8 ++-- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/racewalk.go | 10 ++-- src/cmd/compile/internal/gc/reflect.go | 4 +- src/cmd/compile/internal/gc/ssa.go | 4 +- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 4 +- src/cmd/compile/internal/gc/unsafe.go | 2 +- src/cmd/compile/internal/gc/walk.go | 9 ++-- 19 files changed, 88 insertions(+), 87 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index e9b5afe838..6e85438610 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -316,11 +316,12 @@ func genhash(sym *Sym, t *Type) { // for a struct containing a reflect.Value, which itself has // an unexported field of type unsafe.Pointer. old_safemode := safemode + safemode = false - safemode = 0 Disable_checknil++ funccompile(fn) Disable_checknil-- + safemode = old_safemode } @@ -509,7 +510,7 @@ func geneq(sym *Sym, t *Type) { // for a struct containing a reflect.Value, which itself has // an unexported field of type unsafe.Pointer. old_safemode := safemode - safemode = 0 + safemode = false // Disable checknils while compiling this code. // We are comparing a struct or an array, diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index eef2e2200d..e5fa3c39a6 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -261,7 +261,7 @@ func export(out *bufio.Writer, trace bool) int { } // write compiler-specific flags - p.bool(safemode != 0) + p.bool(safemode) if p.trace { p.tracef("\n") } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 80c8d309af..db4eb3f14d 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -419,7 +419,7 @@ func closuredebugruntimecheck(r *Node) { Warnl(r.Lineno, "stack closure, captured vars = %v", r.Func.Cvars) } } - if compiling_runtime > 0 && r.Esc == EscHeap { + if compiling_runtime && r.Esc == EscHeap { yyerrorl(r.Lineno, "heap-allocated closure, not allowed in runtime.") } } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index c652c65962..e1028f681c 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -1330,7 +1330,7 @@ func makefuncsym(s *Sym) { if isblanksym(s) { return } - if compiling_runtime != 0 && s.Name == "getg" { + if compiling_runtime && s.Name == "getg" { // runtime.getg() is not a real function and so does // not get a funcsym. return @@ -1440,7 +1440,7 @@ func (c *nowritebarrierrecChecker) visitcall(n *Node) { if fn == nil || fn.Op != ONAME || fn.Class != PFUNC || fn.Name.Defn == nil { return } - if (compiling_runtime != 0 || fn.Sym.Pkg == Runtimepkg) && fn.Sym.Name == "allocm" { + if (compiling_runtime || fn.Sym.Pkg == Runtimepkg) && fn.Sym.Name == "allocm" { return } defn := fn.Name.Defn diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index ae36657a65..cfe192f3ba 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -15,8 +15,8 @@ import ( ) var ( - newexport int // if set, use new export format - Debug_export int // if set, print debugging information about export data + newexport bool // if set, use new export format + Debug_export int // if set, print debugging information about export data exportsize int ) @@ -377,7 +377,7 @@ func dumpexport() { } size := 0 // size of export section without enclosing markers - if forceNewExport || newexport != 0 { + if forceNewExport || newexport { // binary export // The linker also looks for the $$ marker - use char after $$ to distinguish format. exportf("\n$$B\n") // indicate binary format @@ -417,7 +417,7 @@ func dumpexport() { exportf("\n$$\n") // indicate textual format exportsize = 0 exportf("package %s", localpkg.Name) - if safemode != 0 { + if safemode { exportf(" safe") } exportf("\n") diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 7527452c93..cc624cce7a 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -246,7 +246,7 @@ func cgen_dcl(n *Node) { if n.Class&PHEAP == 0 { return } - if compiling_runtime != 0 { + if compiling_runtime { Yyerror("%v escapes to heap, not allowed in runtime.", n) } if prealloc[n] == nil { diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 8411d2d0ac..af9aaf0dae 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -144,9 +144,9 @@ var nsyntaxerrors int var decldepth int32 -var safemode int +var safemode bool -var nolocalimports int +var nolocalimports bool var Debug [256]int @@ -261,21 +261,21 @@ var Funcdepth int32 var typecheckok bool -var compiling_runtime int +var compiling_runtime bool var compiling_wrappers int -var use_writebarrier int +var use_writebarrier bool -var pure_go int +var pure_go bool var flag_installsuffix string -var flag_race int +var flag_race bool -var flag_msan int +var flag_msan bool -var flag_largemodel int +var flag_largemodel bool // Whether we are adding any sort of code instrumentation, such as // when the race detector is enabled. @@ -285,7 +285,7 @@ var debuglive int var Ctxt *obj.Link -var writearchive int +var writearchive bool var bstdout *bufio.Writer diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index ea2394e7f9..f9e425618b 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -71,7 +71,7 @@ func typecheckinl(fn *Node) { } save_safemode := safemode - safemode = 0 + safemode = false savefn := Curfn Curfn = fn @@ -492,7 +492,7 @@ func mkinlcall(n *Node, fn *Node, isddd bool) *Node { pkg := fnpkg(fn) if pkg != localpkg && pkg != nil { - safemode = 0 + safemode = false } n = mkinlcall1(n, fn, isddd) safemode = save_safemode diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index 4b95bb7124..09fed98985 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -914,17 +914,17 @@ func (l *lexer) getlinepragma() rune { case "go:noinline": l.pragma |= Noinline case "go:systemstack": - if compiling_runtime == 0 { + if !compiling_runtime { Yyerror("//go:systemstack only allowed in runtime") } l.pragma |= Systemstack case "go:nowritebarrier": - if compiling_runtime == 0 { + if !compiling_runtime { Yyerror("//go:nowritebarrier only allowed in runtime") } l.pragma |= Nowritebarrier case "go:nowritebarrierrec": - if compiling_runtime == 0 { + if !compiling_runtime { Yyerror("//go:nowritebarrierrec only allowed in runtime") } l.pragma |= Nowritebarrierrec | Nowritebarrier // implies Nowritebarrier diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 45a510d577..f41097b83b 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -142,15 +142,14 @@ func Main() { Nacl = goos == "nacl" if Nacl { - flag_largemodel = 1 + flag_largemodel = true } - outfile = "" - obj.Flagcount("+", "compiling runtime", &compiling_runtime) + flag.BoolVar(&compiling_runtime, "+", false, "compiling runtime") obj.Flagcount("%", "debug non-static initializers", &Debug['%']) obj.Flagcount("A", "for bootstrapping, allow 'any' type", &Debug['A']) obj.Flagcount("B", "disable bounds checking", &Debug['B']) - obj.Flagstr("D", "set relative `path` for local imports", &localimport) + flag.StringVar(&localimport, "D", "", "set relative `path` for local imports") obj.Flagcount("E", "debug symbol export", &Debug['E']) obj.Flagfn1("I", "add `directory` to import search path", addidir) obj.Flagcount("K", "debug missing line numbers", &Debug['K']) @@ -162,57 +161,59 @@ func Main() { obj.Flagcount("S", "print assembly listing", &Debug['S']) obj.Flagfn0("V", "print compiler version", doversion) obj.Flagcount("W", "debug parse tree after type checking", &Debug['W']) - obj.Flagstr("asmhdr", "write assembly header to `file`", &asmhdr) - obj.Flagstr("buildid", "record `id` as the build id in the export metadata", &buildid) - obj.Flagcount("complete", "compiling complete package (no C or assembly)", &pure_go) - obj.Flagstr("d", "print debug information about items in `list`", &debugstr) + flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`") + flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata") + flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)") + flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`") obj.Flagcount("e", "no limit on number of errors reported", &Debug['e']) obj.Flagcount("f", "debug stack frames", &Debug['f']) obj.Flagcount("g", "debug code generation", &Debug['g']) obj.Flagcount("h", "halt on error", &Debug['h']) obj.Flagcount("i", "debug line number stack", &Debug['i']) obj.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap) - obj.Flagstr("installsuffix", "set pkg directory `suffix`", &flag_installsuffix) + flag.StringVar(&flag_installsuffix, "installsuffix", "", "set pkg directory `suffix`") obj.Flagcount("j", "debug runtime-initialized variables", &Debug['j']) obj.Flagcount("l", "disable inlining", &Debug['l']) obj.Flagcount("live", "debug liveness analysis", &debuglive) obj.Flagcount("m", "print optimization decisions", &Debug['m']) - obj.Flagcount("msan", "build code compatible with C/C++ memory sanitizer", &flag_msan) - obj.Flagcount("newexport", "use new export format", &newexport) // TODO(gri) remove eventually (issue 13241) - obj.Flagcount("nolocalimports", "reject local (relative) imports", &nolocalimports) - obj.Flagstr("o", "write output to `file`", &outfile) - obj.Flagstr("p", "set expected package import `path`", &myimportpath) - obj.Flagcount("pack", "write package file instead of object file", &writearchive) + flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer") + flag.BoolVar(&newexport, "newexport", false, "use new export format") // TODO(gri) remove eventually (issue 13241) + flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports") + flag.StringVar(&outfile, "o", "", "write output to `file`") + flag.StringVar(&myimportpath, "p", "", "set expected package import `path`") + flag.BoolVar(&writearchive, "pack", false, "write package file instead of object file") obj.Flagcount("r", "debug generated wrappers", &Debug['r']) - obj.Flagcount("race", "enable race detector", &flag_race) + flag.BoolVar(&flag_race, "race", false, "enable race detector") obj.Flagcount("s", "warn about composite literals that can be simplified", &Debug['s']) - obj.Flagstr("trimpath", "remove `prefix` from recorded source file paths", &Ctxt.LineHist.TrimPathPrefix) - obj.Flagcount("u", "reject unsafe code", &safemode) + flag.StringVar(&Ctxt.LineHist.TrimPathPrefix, "trimpath", "", "remove `prefix` from recorded source file paths") + flag.BoolVar(&safemode, "u", false, "reject unsafe code") obj.Flagcount("v", "increase debug verbosity", &Debug['v']) obj.Flagcount("w", "debug type checking", &Debug['w']) - use_writebarrier = 1 - obj.Flagcount("wb", "enable write barrier", &use_writebarrier) + flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier") obj.Flagcount("x", "debug lexer", &Debug['x']) obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y']) - var flag_shared int + var flag_shared bool var flag_dynlink bool if supportsDynlink(Thearch.LinkArch.Arch) { - obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared) + flag.BoolVar(&flag_shared, "shared", false, "generate code that can be linked into a shared library") flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries") } if Thearch.LinkArch.Family == sys.AMD64 { - obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel) + flag.BoolVar(&flag_largemodel, "largemodel", false, "generate code that assumes a large memory model") } - obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile) - obj.Flagstr("memprofile", "write memory profile to `file`", &memprofile) - obj.Flagint64("memprofilerate", "set runtime.MemProfileRate to `rate`", &memprofilerate) + flag.StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`") + flag.StringVar(&memprofile, "memprofile", "", "write memory profile to `file`") + flag.Int64Var(&memprofilerate, "memprofilerate", 0, "set runtime.MemProfileRate to `rate`") flag.BoolVar(&ssaEnabled, "ssa", true, "use SSA backend to generate code") obj.Flagparse(usage) if flag_dynlink { - flag_shared = 1 + flag_shared = true + } + if flag_shared { + // TODO(mdempsky): Change Flag_shared to bool. + Ctxt.Flag_shared = 1 } - Ctxt.Flag_shared = int32(flag_shared) Ctxt.Flag_dynlink = flag_dynlink Ctxt.Flag_optimize = Debug['N'] == 0 @@ -225,17 +226,17 @@ func Main() { startProfile() - if flag_race != 0 { + if flag_race { racepkg = mkpkg("runtime/race") racepkg.Name = "race" } - if flag_msan != 0 { + if flag_msan { msanpkg = mkpkg("runtime/msan") msanpkg.Name = "msan" } - if flag_race != 0 && flag_msan != 0 { + if flag_race && flag_msan { log.Fatal("cannot use both -race and -msan") - } else if flag_race != 0 || flag_msan != 0 { + } else if flag_race || flag_msan { instrumenting = true } @@ -471,7 +472,7 @@ func Main() { fninit(xtop) } - if compiling_runtime != 0 { + if compiling_runtime { checknowritebarrierrec() } @@ -569,7 +570,7 @@ func islocalname(name string) bool { func findpkg(name string) (file string, ok bool) { if islocalname(name) { - if safemode != 0 || nolocalimports != 0 { + if safemode || nolocalimports { return "", false } @@ -612,10 +613,10 @@ func findpkg(name string) (file string, ok bool) { if flag_installsuffix != "" { suffixsep = "_" suffix = flag_installsuffix - } else if flag_race != 0 { + } else if flag_race { suffixsep = "_" suffix = "race" - } else if flag_msan != 0 { + } else if flag_msan { suffixsep = "_" suffix = "msan" } @@ -694,7 +695,7 @@ func importfile(f *Val, indent []byte) { } if path_ == "unsafe" { - if safemode != 0 { + if safemode { Yyerror("cannot import package unsafe") errorexit() } @@ -818,7 +819,7 @@ func importfile(f *Val, indent []byte) { errorexit() } - if safemode != 0 && !importpkg.Safe { + if safemode && !importpkg.Safe { Yyerror("cannot import unsafe package %q", importpkg.Path) } } @@ -896,7 +897,7 @@ func mkpackage(pkgname string) { p = p[:i] } suffix := ".o" - if writearchive > 0 { + if writearchive { suffix = ".a" } outfile = p + suffix diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index eed0ed6e24..59ce0547c8 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -33,7 +33,7 @@ func dumpobj() { startobj := int64(0) var arhdr [ArhdrSize]byte - if writearchive != 0 { + if writearchive { bout.WriteString("!\n") arhdr = [ArhdrSize]byte{} bout.Write(arhdr[:]) @@ -43,7 +43,7 @@ func dumpobj() { fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring()) dumpexport() - if writearchive != 0 { + if writearchive { bout.Flush() size := bout.Offset() - startobj if size&1 != 0 { @@ -62,7 +62,7 @@ func dumpobj() { } if pragcgobuf != "" { - if writearchive != 0 { + if writearchive { // write empty export section; must be before cgo section fmt.Fprintf(bout, "\n$$\n\n$$\n\n") } @@ -90,7 +90,7 @@ func dumpobj() { dumpdata() obj.Writeobjdirect(Ctxt, bout) - if writearchive != 0 { + if writearchive { bout.Flush() size := bout.Offset() - startobj if size&1 != 0 { diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index baa960bf75..7b9b91e7b0 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -364,7 +364,7 @@ func compile(fn *Node) { dowidth(Curfn.Type) if len(fn.Nbody.Slice()) == 0 { - if pure_go != 0 || strings.HasPrefix(fn.Func.Nname.Sym.Name, "init.") { + if pure_go || strings.HasPrefix(fn.Func.Nname.Sym.Name, "init.") { Yyerror("missing function body for %q", fn.Func.Nname.Sym.Name) return } diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index f6e65146d6..a8a5e92485 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -54,14 +54,14 @@ func instrument(fn *Node) { return } - if flag_race == 0 || !ispkgin(norace_inst_pkgs) { + if !flag_race || !ispkgin(norace_inst_pkgs) { instrumentlist(fn.Nbody, nil) // nothing interesting for race detector in fn->enter instrumentlist(fn.Func.Exit, nil) } - if flag_race != 0 { + if flag_race { // nodpc is the PC of the caller as extracted by // getcallerpc. We use -widthptr(FP) for x86. // BUG: this will not work on arm. @@ -503,7 +503,7 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool { n = treecopy(n, 0) makeaddable(n) var f *Node - if flag_msan != 0 { + if flag_msan { name := "msanread" if wr != 0 { name = "msanwrite" @@ -515,7 +515,7 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool { Fatalf("instrument: %v badwidth", t) } f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(w)) - } else if flag_race != 0 && (t.IsStruct() || t.IsArray()) { + } else if flag_race && (t.IsStruct() || t.IsArray()) { name := "racereadrange" if wr != 0 { name = "racewriterange" @@ -527,7 +527,7 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool { Fatalf("instrument: %v badwidth", t) } f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(w)) - } else if flag_race != 0 { + } else if flag_race { name := "raceread" if wr != 0 { name = "racewrite" diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 70a75f9324..df68f46d4c 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -1424,10 +1424,10 @@ func dumptypestructs() { // add paths for runtime and main, which 6l imports implicitly. dimportpath(Runtimepkg) - if flag_race != 0 { + if flag_race { dimportpath(racepkg) } - if flag_msan != 0 { + if flag_msan { dimportpath(msanpkg) } dimportpath(mkpkg("main")) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index fdd14953e6..4a93dc1087 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -554,7 +554,7 @@ func (s *state) stmt(n *Node) { case OCALLFUNC, OCALLMETH, OCALLINTER: s.call(n, callNormal) if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class == PFUNC && - (compiling_runtime != 0 && n.Left.Sym.Name == "throw" || + (compiling_runtime && n.Left.Sym.Name == "throw" || n.Left.Sym.Pkg == Runtimepkg && (n.Left.Sym.Name == "gopanic" || n.Left.Sym.Name == "selectgo" || n.Left.Sym.Name == "block")) { m := s.mem() b := s.endBlock() @@ -579,7 +579,7 @@ func (s *state) stmt(n *Node) { if n.Left.Class&PHEAP == 0 { return } - if compiling_runtime != 0 { + if compiling_runtime { Fatalf("%v escapes to heap, not allowed in runtime.", n) } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 776eb9c64e..f6af11adba 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -750,7 +750,7 @@ func assignop(src *Type, dst *Type, why *string) Op { // TODO(rsc,lvd): This behaves poorly in the presence of inlining. // https://golang.org/issue/2795 - if safemode != 0 && importpkg == nil && src != nil && src.Etype == TUNSAFEPTR { + if safemode && importpkg == nil && src != nil && src.Etype == TUNSAFEPTR { Yyerror("cannot use unsafe.Pointer") errorexit() } diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 7089d7de72..6067677738 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1354,7 +1354,7 @@ OpSwitch: if t.Results().NumFields() == 1 { n.Type = l.Type.Results().Field(0).Type - if n.Op == OCALLFUNC && n.Left.Op == ONAME && (compiling_runtime != 0 || n.Left.Sym.Pkg == Runtimepkg) && n.Left.Sym.Name == "getg" { + if n.Op == OCALLFUNC && n.Left.Op == ONAME && (compiling_runtime || n.Left.Sym.Pkg == Runtimepkg) && n.Left.Sym.Name == "getg" { // Emit code for runtime.getg() directly instead of calling function. // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, // so that the ordering pass can make sure to preserve the semantics of the original code @@ -2176,7 +2176,7 @@ OpSwitch: } } - if safemode != 0 && incannedimport == 0 && importpkg == nil && compiling_wrappers == 0 && t != nil && t.Etype == TUNSAFEPTR { + if safemode && incannedimport == 0 && importpkg == nil && compiling_wrappers == 0 && t != nil && t.Etype == TUNSAFEPTR { Yyerror("cannot use unsafe.Pointer") } diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index 338f3c0eae..e1d3b40098 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -9,7 +9,7 @@ func unsafenmagic(nn *Node) *Node { fn := nn.Left args := nn.List - if safemode != 0 || fn == nil || fn.Op != ONAME { + if safemode || fn == nil || fn.Op != ONAME { return nil } s := fn.Sym diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 3e5f5161db..78bad8d348 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -594,8 +594,7 @@ opswitch: // for a struct containing a reflect.Value, which itself has // an unexported field of type unsafe.Pointer. old_safemode := safemode - - safemode = 0 + safemode = false n = walkcompare(n, init) safemode = old_safemode @@ -1938,7 +1937,7 @@ func walkprint(nn *Node, init *Nodes) *Node { on = substArgTypes(on, n.Type) // any-1 } else if Isint[et] { if et == TUINT64 { - if (t.Sym.Pkg == Runtimepkg || compiling_runtime != 0) && t.Sym.Name == "hex" { + if (t.Sym.Pkg == Runtimepkg || compiling_runtime) && t.Sym.Name == "hex" { on = syslook("printhex") } else { on = syslook("printuint") @@ -2041,7 +2040,7 @@ func isglobal(n *Node) bool { // Do we need a write barrier for the assignment l = r? func needwritebarrier(l *Node, r *Node) bool { - if use_writebarrier == 0 { + if !use_writebarrier { return false } @@ -2550,7 +2549,7 @@ func paramstoheap(params *Type, out bool) []*Node { } // generate allocation & copying code - if compiling_runtime != 0 { + if compiling_runtime { Yyerror("%v escapes to heap, not allowed in runtime.", v) } if prealloc[v] == nil { -- cgit v1.3 From babfb4ec3ba3e4e36b1003d6efbaeddf2e975240 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Wed, 13 Apr 2016 18:41:59 -0700 Subject: cmd/internal/obj: change Link.Flag_shared to bool Change-Id: I9bda2ce6f45fb8292503f86d8f9f161601f222b7 Reviewed-on: https://go-review.googlesource.com/22053 Reviewed-by: Michael Hudson-Doyle --- src/cmd/asm/main.go | 4 +--- src/cmd/compile/internal/gc/cgen.go | 2 +- src/cmd/compile/internal/gc/main.go | 8 +------- src/cmd/compile/internal/ppc64/galign.go | 2 +- src/cmd/compile/internal/ppc64/gsubr.go | 4 ++-- src/cmd/compile/internal/ppc64/reg.go | 2 +- src/cmd/compile/internal/x86/reg.go | 2 +- src/cmd/internal/obj/arm/asm5.go | 8 ++++---- src/cmd/internal/obj/arm64/asm7.go | 2 +- src/cmd/internal/obj/link.go | 2 +- src/cmd/internal/obj/ppc64/asm9.go | 10 +++++----- src/cmd/internal/obj/ppc64/obj9.go | 4 ++-- src/cmd/internal/obj/s390x/asmz.go | 2 +- src/cmd/internal/obj/x86/asm6.go | 22 +++++++++++----------- src/cmd/internal/obj/x86/obj6.go | 4 ++-- 15 files changed, 35 insertions(+), 43 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go index f010ca93f1..40e1d9c4a9 100644 --- a/src/cmd/asm/main.go +++ b/src/cmd/asm/main.go @@ -39,9 +39,7 @@ func main() { } ctxt.LineHist.TrimPathPrefix = *flags.TrimPath ctxt.Flag_dynlink = *flags.Dynlink - if *flags.Shared || *flags.Dynlink { - ctxt.Flag_shared = 1 - } + ctxt.Flag_shared = *flags.Shared || *flags.Dynlink ctxt.Bso = bufio.NewWriter(os.Stdout) defer ctxt.Bso.Flush() diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index 9de2a19f68..32ca1ae940 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -2363,7 +2363,7 @@ func Ginscall(f *Node, proc int) { // If the MOVD is not needed, insert a hardware NOP // so that the same number of instructions are used // on ppc64 in both shared and non-shared modes. - if Ctxt.Flag_shared != 0 { + if Ctxt.Flag_shared { p := Thearch.Gins(ppc64.AMOVD, nil, nil) p.From.Type = obj.TYPE_MEM p.From.Offset = 24 diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index f41097b83b..2baf9f6585 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -207,13 +207,7 @@ func Main() { flag.BoolVar(&ssaEnabled, "ssa", true, "use SSA backend to generate code") obj.Flagparse(usage) - if flag_dynlink { - flag_shared = true - } - if flag_shared { - // TODO(mdempsky): Change Flag_shared to bool. - Ctxt.Flag_shared = 1 - } + Ctxt.Flag_shared = flag_dynlink || flag_shared Ctxt.Flag_dynlink = flag_dynlink Ctxt.Flag_optimize = Debug['N'] == 0 diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go index 04fa4cfc78..a83dff9a8b 100644 --- a/src/cmd/compile/internal/ppc64/galign.go +++ b/src/cmd/compile/internal/ppc64/galign.go @@ -11,7 +11,7 @@ import ( ) func betypeinit() { - if gc.Ctxt.Flag_shared != 0 { + if gc.Ctxt.Flag_shared { gc.Thearch.ReservedRegs = append(gc.Thearch.ReservedRegs, ppc64.REG_R2) gc.Thearch.ReservedRegs = append(gc.Thearch.ReservedRegs, ppc64.REG_R12) } diff --git a/src/cmd/compile/internal/ppc64/gsubr.go b/src/cmd/compile/internal/ppc64/gsubr.go index de6e2fbe05..eb6cd2c5e9 100644 --- a/src/cmd/compile/internal/ppc64/gsubr.go +++ b/src/cmd/compile/internal/ppc64/gsubr.go @@ -580,7 +580,7 @@ func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog { case obj.ACALL: if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR { // Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR. - if gc.Ctxt.Flag_shared != 0 { + if gc.Ctxt.Flag_shared { // Make sure function pointer is in R12 as well when // compiling Go into PIC. // TODO(mwhudson): it would obviously be better to @@ -602,7 +602,7 @@ func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog { p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REG_CTR - if gc.Ctxt.Flag_shared != 0 { + if gc.Ctxt.Flag_shared { // When compiling Go into PIC, the function we just // called via pointer might have been implemented in // a separate module and so overwritten the TOC diff --git a/src/cmd/compile/internal/ppc64/reg.go b/src/cmd/compile/internal/ppc64/reg.go index 447679e207..558ba4a4f4 100644 --- a/src/cmd/compile/internal/ppc64/reg.go +++ b/src/cmd/compile/internal/ppc64/reg.go @@ -113,7 +113,7 @@ func excludedregs() uint64 { // Exclude registers with fixed functions regbits := 1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP) - if gc.Ctxt.Flag_shared != 0 { + if gc.Ctxt.Flag_shared { // When compiling Go into PIC, R2 is reserved to be the TOC pointer // and R12 so that calls via function pointer can stomp on it. regbits |= RtoB(ppc64.REG_R2) diff --git a/src/cmd/compile/internal/x86/reg.go b/src/cmd/compile/internal/x86/reg.go index 76d90b8e89..d49a1aed9d 100644 --- a/src/cmd/compile/internal/x86/reg.go +++ b/src/cmd/compile/internal/x86/reg.go @@ -62,7 +62,7 @@ func regnames(n *int) []string { } func excludedregs() uint64 { - if gc.Ctxt.Flag_shared != 0 { + if gc.Ctxt.Flag_shared { return RtoB(x86.REG_SP) | RtoB(x86.REG_CX) } else { return RtoB(x86.REG_SP) diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index f49ee65a04..564f96a94e 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -870,7 +870,7 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { t.To.Type = a.Type t.To.Name = a.Name - if ctxt.Flag_shared != 0 && t.To.Sym != nil { + if ctxt.Flag_shared && t.To.Sym != nil { t.Rel = p } @@ -1015,7 +1015,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ctxt.Instoffset = 0 // s.b. unused but just in case if a.Sym.Type == obj.STLSBSS { - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { return C_TLS_IE } else { return C_TLS_LE @@ -1322,7 +1322,7 @@ func buildop(ctxt *obj.Link) { } for n = 0; optab[n].as != obj.AXXX; n++ { if optab[n].flag&LPCREL != 0 { - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { optab[n].size += int8(optab[n].pcrelsiz) } else { optab[n].flag &^= LPCREL @@ -1633,7 +1633,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { rel.Sym = p.To.Sym rel.Add = p.To.Offset - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { if p.To.Name == obj.NAME_GOTREF { rel.Type = obj.R_GOTPCREL } else { diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index d0ae6115cb..55397132e0 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -972,7 +972,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ctxt.Instoffset = a.Offset if a.Sym != nil { // use relocation if a.Sym.Type == obj.STLSBSS { - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { return C_TLS_IE } else { return C_TLS_LE diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 55c9f4f9e2..5f257f60ab 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -617,7 +617,7 @@ type Link struct { Debugvlog int32 Debugdivmod int32 Debugpcln int32 - Flag_shared int32 + Flag_shared bool Flag_dynlink bool Flag_optimize bool Bso *bufio.Writer diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 0497d3b678..e793f26803 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -585,7 +585,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { ctxt.Instoffset = a.Offset if a.Sym != nil { // use relocation if a.Sym.Type == obj.STLSBSS { - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { return C_TLS_IE } else { return C_TLS_LE @@ -1413,7 +1413,7 @@ func opform(ctxt *obj.Link, insn uint32) int { func symbolAccess(ctxt *obj.Link, s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) { var base uint32 form := opform(ctxt, op) - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { base = REG_R2 } else { base = REG_R0 @@ -1425,7 +1425,7 @@ func symbolAccess(ctxt *obj.Link, s *obj.LSym, d int64, reg int16, op uint32) (o rel.Siz = 8 rel.Sym = s rel.Add = d - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { switch form { case D_FORM: rel.Type = obj.R_ADDRPOWER_TOCREL @@ -1646,7 +1646,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { if v != 0 { ctxt.Diag("illegal indexed instruction\n%v", p) } - if ctxt.Flag_shared != 0 && r == REG_R13 { + if ctxt.Flag_shared && r == REG_R13 { rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 4 @@ -1677,7 +1677,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { if v != 0 { ctxt.Diag("illegal indexed instruction\n%v", p) } - if ctxt.Flag_shared != 0 && r == REG_R13 { + if ctxt.Flag_shared && r == REG_R13 { rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 4 diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go index 7a24d1d1bf..4f9b3943cf 100644 --- a/src/cmd/internal/obj/ppc64/obj9.go +++ b/src/cmd/internal/obj/ppc64/obj9.go @@ -470,7 +470,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) { q = p - if ctxt.Flag_shared != 0 && cursym.Name != "runtime.duffzero" && cursym.Name != "runtime.duffcopy" && cursym.Name != "runtime.stackBarrier" { + if ctxt.Flag_shared && cursym.Name != "runtime.duffzero" && cursym.Name != "runtime.duffcopy" && cursym.Name != "runtime.stackBarrier" { // When compiling Go into PIC, all functions must start // with instructions to load the TOC pointer into r2: // @@ -558,7 +558,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) { q.Spadj = int32(-aoffset) } - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { q = obj.Appendp(ctxt, q) q.As = AMOVD q.Lineno = p.Lineno diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index bae4dc3ce7..9b26580d11 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -473,7 +473,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int { } ctxt.Instoffset = a.Offset if a.Sym.Type == obj.STLSBSS { - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { return C_TLS_IE // initial exec model } return C_TLS_LE // local exec model diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index c563a7a48d..e806a834fd 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -2165,7 +2165,7 @@ func prefixof(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { return 0x64 // FS } - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { log.Fatalf("unknown TLS base register for linux with -shared") } else { return 0x64 // FS @@ -2185,7 +2185,7 @@ func prefixof(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { } if p.Mode == 32 { - if a.Index == REG_TLS && ctxt.Flag_shared != 0 { + if a.Index == REG_TLS && ctxt.Flag_shared { // When building for inclusion into a shared library, an instruction of the form // MOVL 0(CX)(TLS*1), AX // becomes @@ -2214,7 +2214,7 @@ func prefixof(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { return 0x26 case REG_TLS: - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { // When building for inclusion into a shared library, an instruction of the form // MOV 0(CX)(TLS*1), AX // becomes @@ -2288,7 +2288,7 @@ func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { case obj.NAME_EXTERN, obj.NAME_STATIC: - if a.Sym != nil && isextern(a.Sym) || (p.Mode == 32 && ctxt.Flag_shared == 0) { + if a.Sym != nil && isextern(a.Sym) || (p.Mode == 32 && !ctxt.Flag_shared) { return Yi32 } return Yiauto // use pc-relative addressing @@ -2707,7 +2707,7 @@ func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 { if a.Name == obj.NAME_GOTREF { r.Siz = 4 r.Type = obj.R_GOTPCREL - } else if isextern(s) || (p.Mode != 64 && ctxt.Flag_shared == 0) { + } else if isextern(s) || (p.Mode != 64 && !ctxt.Flag_shared) { r.Siz = 4 r.Type = obj.R_ADDR } else { @@ -2728,7 +2728,7 @@ func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 { log.Fatalf("reloc") } - if ctxt.Flag_shared == 0 || isAndroid { + if !ctxt.Flag_shared || isAndroid { r.Type = obj.R_TLS_LE r.Siz = 4 r.Off = -1 // caller must fill in @@ -2793,7 +2793,7 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) if !isextern(a.Sym) && p.Mode == 64 { goto bad } - if p.Mode == 32 && ctxt.Flag_shared != 0 { + if p.Mode == 32 && ctxt.Flag_shared { base = REG_CX } else { base = REG_NONE @@ -2838,7 +2838,7 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) if a.Sym == nil { ctxt.Diag("bad addr: %v", p) } - if p.Mode == 32 && ctxt.Flag_shared != 0 { + if p.Mode == 32 && ctxt.Flag_shared { base = REG_CX } else { base = REG_NONE @@ -2892,7 +2892,7 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) } if REG_AX <= base && base <= REG_R15 { - if a.Index == REG_TLS && ctxt.Flag_shared == 0 { + if a.Index == REG_TLS && !ctxt.Flag_shared { rel = obj.Reloc{} rel.Type = obj.R_TLS_LE rel.Siz = 4 @@ -3945,7 +3945,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { case obj.Hlinux, obj.Hnacl: - if ctxt.Flag_shared != 0 { + if ctxt.Flag_shared { // Note that this is not generating the same insns as the other cases. // MOV TLS, R_to // becomes @@ -4019,7 +4019,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype)) case obj.Hlinux: - if ctxt.Flag_shared == 0 { + if !ctxt.Flag_shared { log.Fatalf("unknown TLS base location for linux without -shared") } // Note that this is not generating the same insn as the other cases. diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 302a597f4c..b638c048e8 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -66,7 +66,7 @@ func CanUse1InsnTLS(ctxt *obj.Link) bool { obj.Hwindows: return false case obj.Hlinux: - return ctxt.Flag_shared == 0 + return !ctxt.Flag_shared } return true @@ -314,7 +314,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) { rewriteToUseGot(ctxt, p) } - if ctxt.Flag_shared != 0 && p.Mode == 32 { + if ctxt.Flag_shared && p.Mode == 32 { rewriteToPcrel(ctxt, p) } } -- cgit v1.3 From 045411e6f28c64e6448a2432fa652cc80ca18f31 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 12 Apr 2016 17:58:46 -0700 Subject: cmd/internal/obj: remove use of package bio Also add MustClose and MustWriter to cmd/internal/bio, and use them in cmd/asm. Change-Id: I07f5df3b66c17bc5b2e6ec9c4357d9b653e354e0 Reviewed-on: https://go-review.googlesource.com/21938 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick --- src/cmd/asm/main.go | 13 +++++++----- src/cmd/compile/internal/gc/obj.go | 2 +- src/cmd/internal/bio/buf.go | 2 +- src/cmd/internal/bio/must.go | 43 ++++++++++++++++++++++++++++++++++++++ src/cmd/internal/obj/objfile.go | 9 ++++---- 5 files changed, 57 insertions(+), 12 deletions(-) create mode 100644 src/cmd/internal/bio/must.go (limited to 'src/cmd/compile') diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go index 40e1d9c4a9..c612583e6b 100644 --- a/src/cmd/asm/main.go +++ b/src/cmd/asm/main.go @@ -44,12 +44,15 @@ func main() { defer ctxt.Bso.Flush() // Create object file, write header. - output, err := bio.Create(*flags.OutputFile) + out, err := os.Create(*flags.OutputFile) if err != nil { log.Fatal(err) } - fmt.Fprintf(output, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion()) - fmt.Fprintf(output, "!\n") + defer bio.MustClose(out) + buf := bufio.NewWriter(bio.MustWriter(out)) + + fmt.Fprintf(buf, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion()) + fmt.Fprintf(buf, "!\n") lexer := lex.NewLexer(flag.Arg(0), ctxt) parser := asm.NewParser(ctxt, architecture, lexer) @@ -63,12 +66,12 @@ func main() { pList.Firstpc, ok = parser.Parse() if ok { // reports errors to parser.Errorf - obj.Writeobjdirect(ctxt, output) + obj.Writeobjdirect(ctxt, buf) } if !ok || diag { log.Printf("assembly of %s failed", flag.Arg(0)) os.Remove(*flags.OutputFile) os.Exit(1) } - output.Flush() + buf.Flush() } diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 59ce0547c8..b60f78f638 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -88,7 +88,7 @@ func dumpobj() { externdcl = tmp dumpdata() - obj.Writeobjdirect(Ctxt, bout) + obj.Writeobjdirect(Ctxt, bout.Writer) if writearchive { bout.Flush() diff --git a/src/cmd/internal/bio/buf.go b/src/cmd/internal/bio/buf.go index 7a077041c2..54ce3c7681 100644 --- a/src/cmd/internal/bio/buf.go +++ b/src/cmd/internal/bio/buf.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package bio implements seekable buffered I/O. +// Package bio implements common I/O abstractions used within the Go toolchain. package bio import ( diff --git a/src/cmd/internal/bio/must.go b/src/cmd/internal/bio/must.go new file mode 100644 index 0000000000..3604b29175 --- /dev/null +++ b/src/cmd/internal/bio/must.go @@ -0,0 +1,43 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bio + +import ( + "io" + "log" +) + +// MustClose closes Closer c and calls log.Fatal if it returns a non-nil error. +func MustClose(c io.Closer) { + if err := c.Close(); err != nil { + log.Fatal(err) + } +} + +// MustWriter returns a Writer that wraps the provided Writer, +// except that it calls log.Fatal instead of returning a non-nil error. +func MustWriter(w io.Writer) io.Writer { + return mustWriter{w} +} + +type mustWriter struct { + w io.Writer +} + +func (w mustWriter) Write(b []byte) (int, error) { + n, err := w.w.Write(b) + if err != nil { + log.Fatal(err) + } + return n, nil +} + +func (w mustWriter) WriteString(s string) (int, error) { + n, err := io.WriteString(w.w, s) + if err != nil { + log.Fatal(err) + } + return n, nil +} diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index 7d88db2bcc..60505dfbb5 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -109,7 +109,6 @@ package obj import ( "bufio" - "cmd/internal/bio" "cmd/internal/sys" "fmt" "log" @@ -120,7 +119,7 @@ import ( // The Go and C compilers, and the assembler, call writeobj to write // out a Go object file. The linker does not call this; the linker // does not write out object files. -func Writeobjdirect(ctxt *Link, b *bio.Writer) { +func Writeobjdirect(ctxt *Link, b *bufio.Writer) { Flushplist(ctxt) WriteObjFile(ctxt, b) } @@ -187,16 +186,16 @@ func (w *objWriter) writeLengths() { w.writeInt(int64(w.nFile)) } -func newObjWriter(ctxt *Link, b *bio.Writer) *objWriter { +func newObjWriter(ctxt *Link, b *bufio.Writer) *objWriter { return &objWriter{ ctxt: ctxt, - wr: b.Writer, + wr: b, vrefIdx: make(map[string]int), refIdx: make(map[string]int), } } -func WriteObjFile(ctxt *Link, b *bio.Writer) { +func WriteObjFile(ctxt *Link, b *bufio.Writer) { w := newObjWriter(ctxt, b) // Magic header -- cgit v1.3 From 6b0b3f86d6b3c2cf01c7ed6080e038bda2c12997 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 13 Apr 2016 13:30:03 -0400 Subject: cmd/compile: fix use of original spill name after sinking This is a fix for the ssacheck builder http://build.golang.org/log/baa00f70c34e41186051cfe90568de3d91f115d7 after CL 21307 for sinking spills down loop exits https://go-review.googlesource.com/#/c/21037/ The fix is to reuse (move) the original spill, thus preserving the definition of the variable and its use count. Original and copy both use the same stack slot, but ssacheck needs to see a definition for the variable itself. Fixes #15279. Change-Id: I286285490193dc211b312d64dbc5a54867730bd6 Reviewed-on: https://go-review.googlesource.com/21995 Reviewed-by: Keith Randall Run-TryBot: David Chase TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/ssa/check.go | 10 +++++----- src/cmd/compile/internal/ssa/regalloc.go | 32 ++++++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 13 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 5a17735304..e4b8cb05f4 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -162,7 +162,7 @@ func checkFunc(f *Func) { // variable length args) nArgs := opcodeTable[v.Op].argLen if nArgs != -1 && int32(len(v.Args)) != nArgs { - f.Fatalf("value %v has %d args, expected %d", v.LongString(), + f.Fatalf("value %s has %d args, expected %d", v.LongString(), len(v.Args), nArgs) } @@ -207,15 +207,15 @@ func checkFunc(f *Func) { f.Fatalf("unknown aux type for %s", v.Op) } if !canHaveAux && v.Aux != nil { - f.Fatalf("value %v has an Aux value %v but shouldn't", v.LongString(), v.Aux) + f.Fatalf("value %s has an Aux value %v but shouldn't", v.LongString(), v.Aux) } if !canHaveAuxInt && v.AuxInt != 0 { - f.Fatalf("value %v has an AuxInt value %d but shouldn't", v.LongString(), v.AuxInt) + f.Fatalf("value %s has an AuxInt value %d but shouldn't", v.LongString(), v.AuxInt) } for _, arg := range v.Args { if arg == nil { - f.Fatalf("value %v has nil arg", v.LongString()) + f.Fatalf("value %s has nil arg", v.LongString()) } } @@ -271,7 +271,7 @@ func checkFunc(f *Func) { for _, v := range b.Values { for i, a := range v.Args { if !valueMark[a.ID] { - f.Fatalf("%v, arg %d of %v, is missing", a, i, v) + f.Fatalf("%v, arg %d of %s, is missing", a, i, v.LongString()) } } } diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index d1de3646d9..7be1cf593c 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -96,7 +96,12 @@ // there is one spill site (one StoreReg) targeting stack slot X, after // sinking there may be multiple spill sites targeting stack slot X, // with no phi functions at any join points reachable by the multiple -// spill sites. +// spill sites. In addition, uses of the spill from copies of the original +// will not name the copy in their reference; instead they will name +// the original, though both will have the same spill location. The +// first sunk spill will be the original, but moved, to an exit block, +// thus ensuring that there is a definition somewhere corresponding to +// the original spill's uses. package ssa @@ -1354,6 +1359,7 @@ sinking: } b.Values = b.Values[:i] + first := true for i := uint(0); i < 32 && dests != 0; i++ { if dests&(1< moveSpills { - s.f.Config.Warnl(e.Line, "moved spill %v in %v for %v to %v in %v", - vsp, b, e, vspnew, d) + vspnew := vsp // reuse original for first sunk spill, saves tracking down and renaming uses + if !first { // any sunk spills after first must make a copy + vspnew = d.NewValue1(e.Line, OpStoreReg, e.Type, e) + f.setHome(vspnew, f.getHome(vsp.ID)) // copy stack home + if s.f.pass.debug > moveSpills { + s.f.Config.Warnl(e.Line, "copied spill %v in %v for %v to %v in %v", + vsp, b, e, vspnew, d) + } + } else { + first = false + vspnew.Block = d + d.Values = append(d.Values, vspnew) + if s.f.pass.debug > moveSpills { + s.f.Config.Warnl(e.Line, "moved spill %v in %v for %v to %v in %v", + vsp, b, e, vspnew, d) + } } - f.setHome(vspnew, f.getHome(vsp.ID)) // copy stack home - // shuffle vspnew to the beginning of its block copy(d.Values[1:], d.Values[0:len(d.Values)-1]) d.Values[0] = vspnew + } } -- cgit v1.3 From 644493f1090e965cbde3e3245bc8b12bb5486477 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Thu, 14 Apr 2016 08:48:36 -0700 Subject: cmd/compile: clear hidden value at end of channel range body MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While we’re here, clean up a few comments. Fixes #15281 Change-Id: Ia6173e9941133db08f57bc80bdd3c5722122bfdb Reviewed-on: https://go-review.googlesource.com/22082 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- src/cmd/compile/internal/gc/range.go | 14 +++++--- test/fixedbugs/issue15281.go | 64 ++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 5 deletions(-) create mode 100644 test/fixedbugs/issue15281.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 6adf8e0d6d..96d7a82972 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -154,7 +154,7 @@ func walkrange(n *Node) { v2 = n.List.Second() } - // n->list has no meaning anymore, clear it + // n.List has no meaning anymore, clear it // to avoid erroneous processing by racewalk. n.List.Set(nil) @@ -217,9 +217,9 @@ func walkrange(n *Node) { n.Right.Ninit.Set1(a) } - // orderstmt allocated the iterator for us. - // we only use a once, so no copy needed. case TMAP: + // orderstmt allocated the iterator for us. + // we only use a once, so no copy needed. ha := a th := hiter(t) @@ -254,8 +254,8 @@ func walkrange(n *Node) { body = []*Node{a} } - // orderstmt arranged for a copy of the channel variable. case TCHAN: + // orderstmt arranged for a copy of the channel variable. ha := a n.Left = nil @@ -278,9 +278,13 @@ func walkrange(n *Node) { } else { body = []*Node{Nod(OAS, v1, hv1)} } + // Zero hv1. This prevents hv1 from being the sole, inaccessible + // reference to an otherwise GC-able value during the next channel receive. + // See issue 15281. + body = append(body, Nod(OAS, hv1, nil)) - // orderstmt arranged for a copy of the string variable. case TSTRING: + // orderstmt arranged for a copy of the string variable. ha := a ohv1 := temp(Types[TINT]) diff --git a/test/fixedbugs/issue15281.go b/test/fixedbugs/issue15281.go new file mode 100644 index 0000000000..187c96f218 --- /dev/null +++ b/test/fixedbugs/issue15281.go @@ -0,0 +1,64 @@ +// run + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package main + +import "runtime" + +func main() { + { + x := inuse() + c := make(chan []byte, 10) + c <- make([]byte, 10<<20) + close(c) + f1(c, x) + } + { + x := inuse() + c := make(chan []byte, 10) + c <- make([]byte, 10<<20) + close(c) + f2(c, x) + } +} + +func f1(c chan []byte, start int64) { + for x := range c { + if delta := inuse() - start; delta < 9<<20 { + println("BUG: f1: after alloc: expected delta at least 9MB, got: ", delta) + println(x) + } + x = nil + if delta := inuse() - start; delta > 1<<20 { + println("BUG: f1: after alloc: expected delta below 1MB, got: ", delta) + println(x) + } + } +} + +func f2(c chan []byte, start int64) { + for { + x, ok := <-c + if !ok { + break + } + if delta := inuse() - start; delta < 9<<20 { + println("BUG: f2: after alloc: expected delta at least 9MB, got: ", delta) + println(x) + } + x = nil + if delta := inuse() - start; delta > 1<<20 { + println("BUG: f2: after alloc: expected delta below 1MB, got: ", delta) + println(x) + } + } +} + +func inuse() int64 { + runtime.GC() + var st runtime.MemStats + runtime.ReadMemStats(&st) + return int64(st.Alloc) +} -- cgit v1.3 From c9638810df5c0ab7e15a0856f0ddddf4b3afbba6 Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Thu, 14 Apr 2016 08:54:15 -0400 Subject: cmd/compile: use type. prefix on importpath symbol This ensures that importpath symbols are treated like other type data and end up in the same section under all build modes. Fixes: go test -buildmode=pie reflect Change-Id: Ibb8348648e8dcc850f2424d206990a06090ce4c6 Reviewed-on: https://go-review.googlesource.com/22081 Run-TryBot: David Crawshaw TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/cmd/compile/internal/gc/reflect.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index df68f46d4c..b8b9369f37 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -432,7 +432,7 @@ func dimportpath(p *Pkg) { str = p.Path } - s := obj.Linklookup(Ctxt, "go.importpath."+p.Prefix+".", 0) + s := obj.Linklookup(Ctxt, "type..importpath."+p.Prefix+".", 0) ot := dnameData(s, 0, str, "", nil, false) ggloblLSym(s, int32(ot), obj.DUPOK|obj.RODATA) p.Pathsym = s @@ -450,10 +450,10 @@ func dgopkgpathLSym(s *obj.LSym, ot int, pkg *Pkg) int { if pkg == localpkg && myimportpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to - // go.importpath.""., which the linker will rewrite using the correct import path. + // type..importpath.""., which the linker will rewrite using the correct import path. // Every package that imports this one directly defines the symbol. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. - ns := obj.Linklookup(Ctxt, `go.importpath."".`, 0) + ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0) return dsymptrLSym(s, ot, ns, 0) } @@ -466,10 +466,10 @@ func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *Pkg) int { if pkg == localpkg && myimportpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to - // go.importpath.""., which the linker will rewrite using the correct import path. + // type..importpath.""., which the linker will rewrite using the correct import path. // Every package that imports this one directly defines the symbol. // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ. - ns := obj.Linklookup(Ctxt, `go.importpath."".`, 0) + ns := obj.Linklookup(Ctxt, `type..importpath."".`, 0) return dsymptrOffLSym(s, ot, ns, 0) } -- cgit v1.3 From ac8127d7e6ead390bc44c89d47d16be587c3ac11 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 14 Apr 2016 13:47:58 -0700 Subject: cmd/compile: fix register size for ODOTPTR result The result of ODOTPTR, as well as a bunch of other ops, should be the type of the result, not always a pointer type. This fixes an amd64p32 bug where we were incorrectly truncating a 64-bit slice index to 32 bits, and then barfing on a weird load-64-bits-but-then-truncate-to-32-bits op that doesn't exist. Fixes #15252 Change-Id: Ie62f4315fffd79f233e5449324ccc0879f5ac343 Reviewed-on: https://go-review.googlesource.com/22094 Run-TryBot: Keith Randall Reviewed-by: Josh Bleecher Snyder TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/cgen.go | 2 +- test/fixedbugs/issue15252.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue15252.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index 32ca1ae940..658cc8a50e 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -946,7 +946,7 @@ func Cgenr(n *Node, a *Node, res *Node) { OCALLINTER: var n1 Node Igen(n, &n1, res) - Regalloc(a, Types[Tptr], &n1) + Regalloc(a, n.Type, &n1) Thearch.Gmove(&n1, a) Regfree(&n1) diff --git a/test/fixedbugs/issue15252.go b/test/fixedbugs/issue15252.go new file mode 100644 index 0000000000..370a885c7f --- /dev/null +++ b/test/fixedbugs/issue15252.go @@ -0,0 +1,32 @@ +// run + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test makes sure that we use all 64 bits of an +// index, even on 32 bit machines. It also tests that nacl +// can compile 64 bit indexes loaded from ODOTPTR properly. + +package main + +type T struct { + i int64 +} + +func f(t *T) byte { + b := [2]byte{3, 4} + return b[t.i] +} + +func main() { + t := &T{0x100000001} + defer func() { + r := recover() + if r == nil { + panic("panic wasn't recoverable") + } + }() + f(t) + panic("index didn't panic") +} -- cgit v1.3 From 5c593a3227d97f5d2afa66a39b6dd8ea6ebf73f3 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Wed, 13 Apr 2016 17:53:03 -0700 Subject: cmd/compile: first cut at exporting position info - position info for all exported globals, plus methods and fields - use delta-encoded line number info in most cases - canonicalize all strings: each filename appears only once, but will also compact other strings (names) to at most one occurence in encoding - positions not yet hooked up when reading in Also: - adjusted go/importer (gcimporter) - some refactoring for better symmetry Stats: - comparison of export data size w/o and w/ position info (bytes). - delta is increase in % - overall (see bottom of table): 14% increase - however, the current binary format decreased from the original binary format last week by 14% - compared to original textual format: 65% decrease (increase by 14% after decrease by 14% still leads to a decrease from original textual format) (caveat: we used the textual size from last week, assuming it has not changed - there may be a small error here). package w/o pos w/ pos delta archive/tar 4234 4902 16% archive/zip 6387 7340 15% bufio 3106 3419 10% bytes 4362 4757 9% cmd/addr2line 27 70 159% cmd/api 12065 13590 13% cmd/asm 27 64 137% cmd/asm/internal/arch 9957 11529 16% cmd/asm/internal/asm 11788 13385 14% cmd/asm/internal/flags 239 311 30% cmd/asm/internal/lex 13415 15358 14% cmd/cgo 13064 15006 15% cmd/compile 27 67 148% cmd/compile/internal/amd64 461 869 89% cmd/compile/internal/arm 5963 7273 22% cmd/compile/internal/arm64 363 657 81% cmd/compile/internal/big 7186 8590 20% cmd/compile/internal/gc 48242 56234 17% cmd/compile/internal/mips64 367 666 81% cmd/compile/internal/ppc64 372 721 94% cmd/compile/internal/s390x 330 569 72% cmd/compile/internal/ssa 30464 35058 15% cmd/compile/internal/x86 429 770 79% cmd/cover 3984 4731 19% cmd/dist 74 154 108% cmd/doc 7272 8591 18% cmd/expdump 27 71 163% cmd/fix 342 419 23% cmd/go 8126 9520 17% cmd/gofmt 27 70 159% cmd/gofmt2 27 69 156% cmd/gofmt2/internal/format 702 856 22% cmd/gofmt2/internal/lexical 2954 3509 19% cmd/gofmt2/internal/parse 6185 7295 18% cmd/gofmt2/internal/syntax 3533 4738 34% cmd/gofmt2/internal/test 540 615 14% cmd/internal/bio 5395 6060 12% cmd/internal/gcprog 533 663 24% cmd/internal/goobj 1022 1277 25% cmd/internal/obj 10951 12825 17% cmd/internal/obj/arm 8612 9985 16% cmd/internal/obj/arm64 15814 17638 12% cmd/internal/obj/mips 10928 12487 14% cmd/internal/obj/ppc64 13576 15277 13% cmd/internal/obj/s390x 16513 18708 13% cmd/internal/obj/x86 21152 23482 11% cmd/internal/objfile 14442 16505 14% cmd/internal/pprof/commands 1663 1885 13% cmd/internal/pprof/driver 9517 10789 13% cmd/internal/pprof/fetch 7632 8635 13% cmd/internal/pprof/plugin 13150 14809 13% cmd/internal/pprof/profile 7004 8248 18% cmd/internal/pprof/report 7763 8942 15% cmd/internal/pprof/svg 1332 1534 15% cmd/internal/pprof/symbolizer 7376 8439 14% cmd/internal/pprof/symbolz 6970 7976 14% cmd/internal/pprof/tempfile 3645 4093 12% cmd/internal/sys 505 619 23% cmd/internal/unvendor/golang.org/x/arch/arm/armasm 73951 79188 7% cmd/internal/unvendor/golang.org/x/arch/x86/x86asm 10140 11738 16% cmd/link 27 64 137% cmd/link/internal/amd64 9317 11034 18% cmd/link/internal/arm 110 213 94% cmd/link/internal/arm64 112 219 96% cmd/link/internal/ld 53524 60149 12% cmd/link/internal/mips64 113 222 96% cmd/link/internal/ppc64 113 220 95% cmd/link/internal/s390x 112 219 96% cmd/link/internal/x86 110 212 93% cmd/nm 27 61 126% cmd/objdump 27 68 152% cmd/pack 4141 4688 13% cmd/pprof 27 67 148% cmd/trace 624 842 35% cmd/vet 11194 13140 17% cmd/vet/internal/whitelist 52 113 117% cmd/yacc 1141 1317 15% compress/bzip2 2101 2484 18% compress/flate 3619 4336 20% compress/gzip 6261 7111 14% compress/lzw 276 401 45% compress/zlib 3630 4158 15% container/heap 187 250 34% container/list 1370 1506 10% container/ring 466 546 17% context 3005 3338 11% crypto 728 856 18% crypto/aes 181 321 77% crypto/cipher 744 1163 56% crypto/des 220 320 45% crypto/dsa 4526 4990 10% crypto/ecdsa 5341 5982 12% crypto/elliptic 4969 5593 13% crypto/hmac 188 250 33% crypto/md5 560 706 26% crypto/rand 4218 4746 13% crypto/rc4 214 321 50% crypto/rsa 5648 6355 13% crypto/sha1 597 751 26% crypto/sha256 228 351 54% crypto/sha512 354 484 37% crypto/subtle 586 621 6% crypto/tls 20909 23438 12% crypto/x509 14862 16857 13% crypto/x509/pkix 8384 9278 11% database/sql 6721 7715 15% database/sql/driver 1243 1535 23% debug/dwarf 7867 9153 16% debug/elf 25479 28025 10% debug/gosym 1887 2267 20% debug/macho 7222 8846 22% debug/pe 6921 8081 17% debug/plan9obj 1084 1319 22% encoding 217 280 29% encoding/ascii85 587 722 23% encoding/asn1 1043 1268 22% encoding/base32 929 1112 20% encoding/base64 1166 1368 17% encoding/binary 2168 2410 11% encoding/csv 3761 4203 12% encoding/gob 11304 12936 14% encoding/hex 510 606 19% encoding/json 9965 11395 14% encoding/pem 202 266 32% encoding/xml 11817 13361 13% errors 126 170 35% expvar 930 1142 23% flag 5905 6519 10% fmt 1027 1190 16% go/ast 12910 15541 20% go/build 5460 6173 13% go/constant 1645 1816 10% go/doc 3107 3882 25% go/format 1416 1729 22% go/importer 1426 1668 17% go/internal/gccgoimporter 1624 2028 25% go/internal/gcimporter 2650 3095 17% go/parser 6220 7073 14% go/printer 1924 2306 20% go/scanner 3137 3602 15% go/token 3053 3474 14% go/types 21793 25561 17% hash 234 327 40% hash/adler32 465 553 19% hash/crc32 668 817 22% hash/crc64 630 727 15% hash/fnv 1413 1582 12% html 76 114 50% html/template 14382 16457 14% image 10248 11409 11% image/color 2247 2562 14% image/color/palette 107 169 58% image/draw 2313 2494 8% image/gif 3079 3450 12% image/internal/imageutil 3136 3456 10% image/jpeg 2349 2735 16% image/png 2404 2695 12% index/suffixarray 4978 5596 12% internal/race 225 278 24% internal/singleflight 551 697 26% internal/syscall/windows/sysdll 97 166 71% internal/testenv 4488 5052 13% internal/trace 1392 1680 21% io 2811 3318 18% io/ioutil 3988 4467 12% log 3532 3907 11% log/syslog 4247 4775 12% math 3021 4499 49% math/big 7250 8456 17% math/cmplx 1034 1617 56% math/rand 734 885 21% mime 1889 2194 16% mime/multipart 4313 4849 12% mime/quotedprintable 1758 1996 14% net 15686 18617 19% net/http 42182 47848 13% net/http/cgi 19496 21768 12% net/http/cookiejar 4615 5248 14% net/http/fcgi 17758 19771 11% net/http/httptest 26108 29350 12% net/http/httputil 20732 23286 12% net/http/internal 2195 2497 14% net/http/pprof 17596 19545 11% net/internal/socktest 1689 2153 27% net/mail 4328 4810 11% net/rpc 24328 27249 12% net/rpc/jsonrpc 11052 12438 13% net/smtp 17127 19174 12% net/textproto 3705 4329 17% net/url 1193 1371 15% os 8493 10113 19% os/exec 6625 7532 14% os/signal 137 236 72% os/user 529 761 44% path 295 372 26% path/filepath 3452 3952 14% reflect 5091 6028 18% regexp 4848 5585 15% regexp/syntax 2590 3076 19% runtime 8721 11598 33% runtime/cgo 17 17 0% runtime/debug 2721 3130 15% runtime/internal/atomic 569 704 24% runtime/internal/sys 1874 2318 24% runtime/pprof 478 582 22% runtime/race 18 18 0% runtime/trace 95 146 54% sort 1052 1215 15% strconv 1389 1667 20% strings 3372 3772 12% sync 946 1371 45% sync/atomic 962 1079 12% syscall 41574 45613 10% testing 6184 7243 17% testing/iotest 883 1116 26% testing/quick 4659 5443 17% text/scanner 2930 3269 12% text/tabwriter 2333 2607 12% text/template 13335 15274 15% text/template/parse 8270 9285 12% time 4687 5313 13% unicode 3831 4355 14% unicode/utf16 530 584 10% unicode/utf8 872 946 8% vendor/golang.org/x/net/http2/hpack 3386 3970 17% 1295440 1481566 14% orig. textual 4253585 1481566 -65% orig. binary 1724071 1481566 -14% Change-Id: I4177c6511cc57ebe5eb80c89bf3aefc83376ce86 Reviewed-on: https://go-review.googlesource.com/22096 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 112 ++++++++++++++++++++--------- src/cmd/compile/internal/gc/bimport.go | 123 +++++++++++++++++++------------ src/go/internal/gcimporter/bimport.go | 128 ++++++++++++++++++++++----------- 3 files changed, 243 insertions(+), 120 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index e5fa3c39a6..eee71291be 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -36,25 +36,21 @@ If the field is a pointer to another object, that object is serialized, recursively. Otherwise the field is written. Non-pointer fields are all encoded as integer or string values. -Only packages and types may be referred to more than once. When getting -to a package or type that was not serialized before, an integer _index_ +Some objects (packages, types) may be referred to more than once. When +reaching an object that was not serialized before, an integer _index_ is assigned to it, starting at 0. In this case, the encoding starts with an integer _tag_ < 0. The tag value indicates the kind of object -(package or type) that follows and that this is the first time that we -see this object. If the package or tag was already serialized, the encoding -starts with the respective package or type index >= 0. An importer can -trivially determine if a package or type needs to be read in for the first -time (tag < 0) and entered into the respective package or type table, or -if the package or type was seen already (index >= 0), in which case the -index is used to look up the object in a table. +that follows and that this is the first time that we see this object. +If the object was already serialized, the encoding is simply the object +index >= 0. An importer can trivially determine if an object needs to +be read in for the first time (tag < 0) and entered into the respective +object table, or if the object was seen already (index >= 0), in which +case the index is used to look up the object in a table. Before exporting or importing, the type tables are populated with the predeclared types (int, string, error, unsafe.Pointer, etc.). This way they are automatically encoded with a known and fixed type index. -TODO(gri) We may consider using the same sharing for other items -that are written out, such as strings, or possibly symbols (*Sym). - Encoding format: The export data starts with a single byte indicating the encoding format @@ -73,11 +69,17 @@ the previously imported type pointer so that we have exactly one version (i.e., one pointer) for each named type (and read but discard the current type encoding). Unnamed types simply encode their respective fields. -In the encoding, some lists start with the list length (incl. strings). -Some lists are terminated with an end marker (usually for lists where -we may not know the length a priori). +In the encoding, some lists start with the list length. Some lists are +terminated with an end marker (usually for lists where we may not know +the length a priori). + +Integers use variable-length encoding for compact representation. -All integer values use variable-length encoding for compact representation. +Strings are canonicalized similar to objects that may occur multiple times: +If the string was exported already, it is represented by its index only. +Otherwise, the export data starts with the negative string length (negative, +so we can distinguish from string index), followed by the string bytes. +The empty string is mapped to index 0. The exporter and importer are completely symmetric in implementation: For each encoding routine there is a matching and symmetric decoding routine. @@ -125,9 +127,15 @@ const exportInlined = true // default: true type exporter struct { out *bufio.Writer - pkgIndex map[*Pkg]int // pkg -> pkg index in order of appearance - typIndex map[*Type]int // type -> type index in order of appearance - funcList []*Func // in order of appearance + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*Pkg]int + typIndex map[*Type]int + funcList []*Func + + // position encoding + prevFile string + prevLine int // debugging support written int // bytes written @@ -139,6 +147,7 @@ type exporter struct { func export(out *bufio.Writer, trace bool) int { p := exporter{ out: out, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 pkgIndex: make(map[*Pkg]int), typIndex: make(map[*Type]int), trace: trace, @@ -149,7 +158,7 @@ func export(out *bufio.Writer, trace bool) int { if debugFormat { format = 'd' } - p.byte(format) + p.rawByte(format) // --- generic export data --- @@ -419,6 +428,7 @@ func (p *exporter) obj(sym *Sym) { } p.tag(constTag) + p.pos(n) // TODO(gri) In inlined functions, constants are used directly // so they should never occur as re-exported objects. We may // not need the qualified name here. See also comment above. @@ -447,6 +457,7 @@ func (p *exporter) obj(sym *Sym) { if n.Type.Etype == TFUNC && n.Class == PFUNC { // function p.tag(funcTag) + p.pos(n) p.qualifiedName(sym) sig := sym.Def.Type @@ -471,6 +482,7 @@ func (p *exporter) obj(sym *Sym) { } else { // variable p.tag(varTag) + p.pos(n) p.qualifiedName(sym) p.typ(sym.Def.Type) } @@ -480,6 +492,26 @@ func (p *exporter) obj(sym *Sym) { } } +func (p *exporter) pos(n *Node) { + var file string + var line int + if n != nil { + file, line = Ctxt.LineHist.FileLine(int(n.Lineno)) + } + + if file == p.prevFile && line != p.prevLine { + // common case: write delta-encoded line number + p.int(line - p.prevLine) // != 0 + } else { + // uncommon case: filename changed, or line didn't change + p.int(0) + p.string(file) + p.int(line) + p.prevFile = file + } + p.prevLine = line +} + func isInlineable(n *Node) bool { if exportInlined && n != nil && n.Func != nil && len(n.Func.Inl.Slice()) != 0 { // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet. @@ -525,13 +557,17 @@ func (p *exporter) typ(t *Type) { if t.Orig == t { Fatalf("exporter: predeclared type missing from type map?") } - // TODO(gri) The assertion below seems incorrect (crashes during all.bash). - // we expect the respective definition to point to us + + // TODO(gri) The assertion below is incorrect (crashes during all.bash), + // likely because of symbol shadowing (we expect the respective definition + // to point to us). Determine the correct Def so we get correct position + // info. // if tsym.Def.Type != t { // Fatalf("exporter: type definition doesn't point to us?") // } p.tag(namedTag) + p.pos(tsym.Def) // TODO(gri) this may not be the correct node - fix and add tests p.qualifiedName(tsym) // write underlying type @@ -564,6 +600,7 @@ func (p *exporter) typ(t *Type) { Fatalf("invalid symbol name: %s (%v)", m.Sym.Name, m.Sym) } + p.pos(m.Sym.Def) p.fieldSym(m.Sym, false) sig := m.Type @@ -668,8 +705,12 @@ func (p *exporter) fieldList(t *Type) { } func (p *exporter) field(f *Field) { + p.pos(f.Sym.Def) p.fieldName(f.Sym, f) p.typ(f.Type) + // TODO(gri) Do we care that a non-present tag cannot be distinguished + // from a present but empty ta string? (reflect doesn't seem to make + // a difference). Investigate. p.note(f.Note) } @@ -697,6 +738,7 @@ func (p *exporter) methodList(t *Type) { } func (p *exporter) method(m *Field) { + p.pos(m.Sym.Def) p.fieldName(m.Sym, m) p.paramList(m.Type.Params(), false) p.paramList(m.Type.Results(), false) @@ -793,9 +835,6 @@ func (p *exporter) param(q *Field, n int, numbered bool) { // TODO(gri) This is compiler-specific (escape info). // Move into compiler-specific section eventually? // (Not having escape info causes tests to fail, e.g. runtime GCInfoTest) - // - // TODO(gri) The q.Note is much more verbose that necessary and - // adds significantly to export data size. FIX THIS. p.note(q.Note) } @@ -1497,9 +1536,17 @@ func (p *exporter) string(s string) { if p.trace { p.tracef("%q ", s) } - p.rawInt64(int64(len(s))) + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) for i := 0; i < len(s); i++ { - p.byte(s[i]) + p.rawByte(s[i]) } } @@ -1507,7 +1554,7 @@ func (p *exporter) string(s string) { // it easy for a reader to detect if it is "out of sync". Used only // if debugFormat is set. func (p *exporter) marker(m byte) { - p.byte(m) + p.rawByte(m) // Uncomment this for help tracking down the location // of an incorrect marker when running in debugFormat. // if p.trace { @@ -1521,12 +1568,12 @@ func (p *exporter) rawInt64(x int64) { var tmp [binary.MaxVarintLen64]byte n := binary.PutVarint(tmp[:], x) for i := 0; i < n; i++ { - p.byte(tmp[i]) + p.rawByte(tmp[i]) } } -// byte is the bottleneck interface to write to p.out. -// byte escapes b as follows (any encoding does that +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that // hides '$'): // // '$' => '|' 'S' @@ -1534,7 +1581,8 @@ func (p *exporter) rawInt64(x int64) { // // Necessary so other tools can find the end of the // export data by searching for "$$". -func (p *exporter) byte(b byte) { +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { switch b { case '$': // write '$' as '|' 'S' diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 223cc443aa..6654345ead 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -20,13 +20,18 @@ import ( // changes to bimport.go and bexport.go. type importer struct { - in *bufio.Reader - buf []byte // for reading strings - bufarray [64]byte // initial underlying array for buf, large enough to avoid allocation when compiling std lib + in *bufio.Reader + buf []byte // reused for reading strings - pkgList []*Pkg // in order of appearance - typList []*Type // in order of appearance - funcList []*Node // in order of appearance; nil entry means already declared + // object lists, in order of deserialization + strList []string + pkgList []*Pkg + typList []*Type + funcList []*Node // nil entry means already declared + + // position encoding + prevFile string + prevLine int // debugging support debugFormat bool @@ -35,11 +40,13 @@ type importer struct { // Import populates importpkg from the serialized package data. func Import(in *bufio.Reader) { - p := importer{in: in} - p.buf = p.bufarray[:] + p := importer{ + in: in, + strList: []string{""}, // empty string is mapped to 0 + } // read low-level encoding format - switch format := p.byte(); format { + switch format := p.rawByte(); format { case 'c': // compact format - nothing to do case 'd': @@ -221,6 +228,7 @@ func idealType(typ *Type) *Type { func (p *importer) obj(tag int) { switch tag { case constTag: + p.pos() sym := p.qualifiedName() typ := p.typ() val := p.value(typ) @@ -230,11 +238,13 @@ func (p *importer) obj(tag int) { p.typ() case varTag: + p.pos() sym := p.qualifiedName() typ := p.typ() importvar(sym, typ) case funcTag: + p.pos() sym := p.qualifiedName() params := p.paramList() result := p.paramList() @@ -268,6 +278,22 @@ func (p *importer) obj(tag int) { } } +func (p *importer) pos() { + file := p.prevFile + line := p.prevLine + + if delta := p.int(); delta != 0 { + line += delta + } else { + file = p.string() + line = p.int() + p.prevFile = file + } + p.prevLine = line + + // TODO(gri) register new position +} + func (p *importer) newtyp(etype EType) *Type { t := typ(etype) p.typList = append(p.typList, t) @@ -286,6 +312,7 @@ func (p *importer) typ() *Type { switch i { case namedTag: // parser.go:hidden_importsym + p.pos() tsym := p.qualifiedName() // parser.go:hidden_pkgtype @@ -311,6 +338,7 @@ func (p *importer) typ() *Type { for i := p.int(); i > 0; i-- { // parser.go:hidden_fndcl + p.pos() sym := p.fieldSym() recv := p.paramList() // TODO(gri) do we need a full param list for the receiver? @@ -409,20 +437,19 @@ func (p *importer) qualifiedName() *Sym { } // parser.go:hidden_structdcl_list -func (p *importer) fieldList() []*Node { - i := p.int() - if i == 0 { - return nil - } - n := make([]*Node, i) - for i := range n { - n[i] = p.field() +func (p *importer) fieldList() (fields []*Node) { + if n := p.int(); n > 0 { + fields = make([]*Node, n) + for i := range fields { + fields[i] = p.field() + } } - return n + return } // parser.go:hidden_structdcl func (p *importer) field() *Node { + p.pos() sym := p.fieldName() typ := p.typ() note := p.note() @@ -456,20 +483,19 @@ func (p *importer) note() (v Val) { } // parser.go:hidden_interfacedcl_list -func (p *importer) methodList() []*Node { - i := p.int() - if i == 0 { - return nil - } - n := make([]*Node, i) - for i := range n { - n[i] = p.method() +func (p *importer) methodList() (methods []*Node) { + if n := p.int(); n > 0 { + methods = make([]*Node, n) + for i := range methods { + methods[i] = p.method() + } } - return n + return } // parser.go:hidden_interfacedcl func (p *importer) method() *Node { + p.pos() sym := p.fieldName() params := p.paramList() result := p.paramList() @@ -1056,29 +1082,31 @@ func (p *importer) int64() int64 { } func (p *importer) string() string { - if p.debugFormat { + if debugFormat { p.marker('s') } - - // TODO(gri) should we intern strings here? - - if n := int(p.rawInt64()); n > 0 { - if cap(p.buf) < n { - p.buf = make([]byte, n) - } else { - p.buf = p.buf[:n] - } - for i := range p.buf { - p.buf[i] = p.byte() - } - return string(p.buf) + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] } - - return "" + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s } func (p *importer) marker(want byte) { - if got := p.byte(); got != want { + if got := p.rawByte(); got != want { Fatalf("importer: incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) } @@ -1099,12 +1127,13 @@ func (p *importer) rawInt64() int64 { // needed for binary.ReadVarint in rawInt64 func (p *importer) ReadByte() (byte, error) { - return p.byte(), nil + return p.rawByte(), nil } -// byte is the bottleneck interface for reading from p.in. +// rawByte is the bottleneck interface for reading from p.in. // It unescapes '|' 'S' to '$' and '|' '|' to '|'. -func (p *importer) byte() byte { +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { c, err := p.in.ReadByte() p.read++ if err != nil { diff --git a/src/go/internal/gcimporter/bimport.go b/src/go/internal/gcimporter/bimport.go index 7a7bc871f4..d75e533e97 100644 --- a/src/go/internal/gcimporter/bimport.go +++ b/src/go/internal/gcimporter/bimport.go @@ -19,13 +19,18 @@ type importer struct { imports map[string]*types.Package data []byte path string + buf []byte // for reading strings - buf []byte // for reading strings - bufarray [64]byte // initial underlying array for buf, large enough to avoid allocation when compiling std lib + // object lists + strList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance - pkgList []*types.Package - typList []types.Type + // position encoding + prevFile string + prevLine int + // debugging support debugFormat bool read int // bytes read } @@ -39,11 +44,11 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i imports: imports, data: data, path: path, + strList: []string{""}, // empty string is mapped to 0 } - p.buf = p.bufarray[:] // read low-level encoding format - switch format := p.byte(); format { + switch format := p.rawByte(); format { case 'c': // compact format - nothing to do case 'd': @@ -160,6 +165,7 @@ func (p *importer) declare(obj types.Object) { func (p *importer) obj(tag int) { switch tag { case constTag: + p.pos() pkg, name := p.qualifiedName() typ := p.typ(nil) val := p.value() @@ -169,11 +175,13 @@ func (p *importer) obj(tag int) { _ = p.typ(nil) case varTag: + p.pos() pkg, name := p.qualifiedName() typ := p.typ(nil) p.declare(types.NewVar(token.NoPos, pkg, name, typ)) case funcTag: + p.pos() pkg, name := p.qualifiedName() params, isddd := p.paramList() result, _ := p.paramList() @@ -185,6 +193,22 @@ func (p *importer) obj(tag int) { } } +func (p *importer) pos() { + file := p.prevFile + line := p.prevLine + + if delta := p.int(); delta != 0 { + line += delta + } else { + file = p.string() + line = p.int() + p.prevFile = file + } + p.prevLine = line + + // TODO(gri) register new position +} + func (p *importer) qualifiedName() (pkg *types.Package, name string) { name = p.string() pkg = p.pkg() @@ -220,6 +244,7 @@ func (p *importer) typ(parent *types.Package) types.Type { switch i { case namedTag: // read type object + p.pos() parent, name := p.qualifiedName() scope := parent.Scope() obj := scope.Lookup(name) @@ -252,6 +277,7 @@ func (p *importer) typ(parent *types.Package) types.Type { // read associated methods for i := p.int(); i > 0; i-- { // TODO(gri) replace this with something closer to fieldName + p.pos() name := p.string() if !exported(name) { p.pkg() @@ -293,14 +319,7 @@ func (p *importer) typ(parent *types.Package) types.Type { t := new(types.Struct) p.record(t) - n := p.int() - fields := make([]*types.Var, n) - tags := make([]string, n) - for i := range fields { - fields[i] = p.field(parent) - tags[i] = p.string() - } - *t = *types.NewStruct(fields, tags) + *t = *types.NewStruct(p.fieldList(parent)) return t case pointerTag: @@ -332,17 +351,7 @@ func (p *importer) typ(parent *types.Package) types.Type { panic("unexpected embedded interface") } - // read methods - methods := make([]*types.Func, p.int()) - for i := range methods { - pkg, name := p.fieldName(parent) - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - methods[i] = types.NewFunc(token.NoPos, pkg, name, sig) - } - - t := types.NewInterface(methods, nil) + t := types.NewInterface(p.methodList(parent), nil) p.typList[n] = t return t @@ -380,7 +389,20 @@ func (p *importer) typ(parent *types.Package) types.Type { } } +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i] = p.field(parent) + tags[i] = p.string() + } + } + return +} + func (p *importer) field(parent *types.Package) *types.Var { + p.pos() pkg, name := p.fieldName(parent) typ := p.typ(parent) @@ -402,6 +424,25 @@ func (p *importer) field(parent *types.Package) *types.Var { return types.NewField(token.NoPos, pkg, name, typ, anonymous) } +func (p *importer) methodList(parent *types.Package) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent) + } + } + return +} + +func (p *importer) method(parent *types.Package) *types.Func { + p.pos() + pkg, name := p.fieldName(parent) + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + return types.NewFunc(token.NoPos, pkg, name, sig) +} + func (p *importer) fieldName(parent *types.Package) (*types.Package, string) { pkg := parent if pkg == nil { @@ -567,24 +608,28 @@ func (p *importer) string() string { if p.debugFormat { p.marker('s') } - - if n := int(p.rawInt64()); n > 0 { - if cap(p.buf) < n { - p.buf = make([]byte, n) - } else { - p.buf = p.buf[:n] - } - for i := 0; i < n; i++ { - p.buf[i] = p.byte() - } - return string(p.buf) + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] } - - return "" + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s } func (p *importer) marker(want byte) { - if got := p.byte(); got != want { + if got := p.rawByte(); got != want { panic(fmt.Sprintf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)) } @@ -605,12 +650,13 @@ func (p *importer) rawInt64() int64 { // needed for binary.ReadVarint in rawInt64 func (p *importer) ReadByte() (byte, error) { - return p.byte(), nil + return p.rawByte(), nil } // byte is the bottleneck interface for reading p.data. // It unescapes '|' 'S' to '$' and '|' '|' to '|'. -func (p *importer) byte() byte { +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { b := p.data[0] r := 1 if b == '|' { -- cgit v1.3 From 77d374940e87935a2cc46a60591ec8213003e99a Mon Sep 17 00:00:00 2001 From: Todd Neal Date: Thu, 14 Apr 2016 19:09:57 -0400 Subject: cmd/compile: speed up dom checking in cse Process a slice of equivalent values by setting replaced values to nil instead of removing them from the slice to eliminate copying. Also take advantage of the entry number sort to break early once we reach a value in a block that is not dominated. For the code in issue #15112: Before: real 0m52.603s user 0m56.957s sys 0m1.213s After: real 0m22.048s user 0m26.445s sys 0m0.939s Updates #15112 Change-Id: I06d9e1e1f1ad85d7fa196c5d51f0dc163907376d Reviewed-on: https://go-review.googlesource.com/22068 Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/cse.go | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go index 76db9d5467..e3f1a1d07d 100644 --- a/src/cmd/compile/internal/ssa/cse.go +++ b/src/cmd/compile/internal/ssa/cse.go @@ -138,21 +138,29 @@ func cse(f *Func) { rewrite := make([]*Value, f.NumValues()) for _, e := range partition { sort.Sort(sortbyentry{e, f.sdom}) - for len(e) > 1 { + for i := 0; i < len(e)-1; i++ { // e is sorted by entry value so maximal dominant element should be // found first in the slice - v := e[0] - e = e[1:] + v := e[i] + if v == nil { + continue + } + + e[i] = nil // Replace all elements of e which v dominates - for i := 0; i < len(e); { - w := e[i] + for j := i + 1; j < len(e); j++ { + w := e[j] + if w == nil { + continue + } if f.sdom.isAncestorEq(v.Block, w.Block) { rewrite[w.ID] = v - // retain the sort order - copy(e[i:], e[i+1:]) - e = e[:len(e)-1] + e[j] = nil } else { - i++ + // since the blocks are assorted in ascending order by entry number + // once we know that we don't dominate a block we can't dominate any + // 'later' block + break } } } -- cgit v1.3 From 1441f76938bf61a2c8c2ed1a65082ddde0319633 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 14 Apr 2016 19:04:45 -0700 Subject: cmd: remove unnecessary type conversions CL generated mechanically with github.com/mdempsky/unconvert. Change-Id: Ic590315cbc7026163a1b3f8ea306ba35f1a53256 Reviewed-on: https://go-review.googlesource.com/22103 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Michael Hudson-Doyle --- src/cmd/asm/internal/asm/asm.go | 4 ++-- src/cmd/cgo/ast.go | 10 +++++----- src/cmd/compile/internal/s390x/peep.go | 4 ++-- src/cmd/go/http.go | 2 +- src/cmd/go/vcs.go | 4 ++-- src/cmd/internal/obj/arm64/obj7.go | 4 ++-- src/cmd/internal/obj/data.go | 2 +- src/cmd/internal/obj/mips/asm0.go | 2 +- src/cmd/internal/obj/objfile.go | 6 +++--- src/cmd/internal/obj/pcln.go | 10 +++++----- src/cmd/internal/obj/ppc64/asm9.go | 14 +++++++------- src/cmd/internal/obj/util.go | 2 +- src/cmd/internal/obj/x86/asm6.go | 16 ++++++++-------- src/cmd/internal/objfile/plan9obj.go | 2 +- src/cmd/internal/pprof/profile/legacy_profile.go | 10 +++++----- .../unvendor/golang.org/x/arch/arm/armasm/decode.go | 8 ++++---- .../unvendor/golang.org/x/arch/x86/x86asm/decode.go | 2 +- src/cmd/link/internal/amd64/asm.go | 2 +- src/cmd/link/internal/arm/asm.go | 2 +- src/cmd/link/internal/arm64/asm.go | 2 +- src/cmd/link/internal/ld/data.go | 18 +++++++++--------- src/cmd/link/internal/ld/decodesym.go | 4 ++-- src/cmd/link/internal/ld/dwarf.go | 18 +++++++++--------- src/cmd/link/internal/ld/elf.go | 2 +- src/cmd/link/internal/ld/ldelf.go | 6 +++--- src/cmd/link/internal/ld/ldmacho.go | 16 ++++++++-------- src/cmd/link/internal/ld/ldpe.go | 8 ++++---- src/cmd/link/internal/ld/lib.go | 4 ++-- src/cmd/link/internal/ld/macho.go | 4 ++-- src/cmd/link/internal/ld/objfile.go | 2 +- src/cmd/link/internal/ld/pcln.go | 4 ++-- src/cmd/link/internal/ld/pe.go | 4 ++-- src/cmd/link/internal/ld/symtab.go | 8 ++++---- src/cmd/link/internal/mips64/asm.go | 4 ++-- src/cmd/link/internal/ppc64/asm.go | 2 +- src/cmd/link/internal/x86/asm.go | 2 +- src/cmd/vet/structtag.go | 2 +- 37 files changed, 108 insertions(+), 108 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go index d674914c67..24906e2cce 100644 --- a/src/cmd/asm/internal/asm/asm.go +++ b/src/cmd/asm/internal/asm/asm.go @@ -59,7 +59,7 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) { } p.pendingLabels = p.pendingLabels[0:0] } - prog.Pc = int64(p.pc) + prog.Pc = p.pc if *flags.Debug { fmt.Println(p.histLineNum, prog) } @@ -371,7 +371,7 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { Offset: p.getConstant(prog, op, &a[0]), } reg := int16(p.getConstant(prog, op, &a[1])) - reg, ok := p.arch.RegisterNumber("R", int16(reg)) + reg, ok := p.arch.RegisterNumber("R", reg) if !ok { p.errorf("bad register number %d", reg) return diff --git a/src/cmd/cgo/ast.go b/src/cmd/cgo/ast.go index 2859d59750..823da43c1d 100644 --- a/src/cmd/cgo/ast.go +++ b/src/cmd/cgo/ast.go @@ -73,7 +73,7 @@ func (f *File) ReadGo(name string) { } for _, spec := range d.Specs { s, ok := spec.(*ast.ImportSpec) - if !ok || string(s.Path.Value) != `"C"` { + if !ok || s.Path.Value != `"C"` { continue } sawC = true @@ -106,7 +106,7 @@ func (f *File) ReadGo(name string) { ws := 0 for _, spec := range d.Specs { s, ok := spec.(*ast.ImportSpec) - if !ok || string(s.Path.Value) != `"C"` { + if !ok || s.Path.Value != `"C"` { d.Specs[ws] = spec ws++ } @@ -147,7 +147,7 @@ func commentText(g *ast.CommentGroup) string { } var pieces []string for _, com := range g.List { - c := string(com.Text) + c := com.Text // Remove comment markers. // The parser has given us exactly the comment text. switch c[1] { @@ -242,11 +242,11 @@ func (f *File) saveExport(x interface{}, context string) { return } for _, c := range n.Doc.List { - if !strings.HasPrefix(string(c.Text), "//export ") { + if !strings.HasPrefix(c.Text, "//export ") { continue } - name := strings.TrimSpace(string(c.Text[9:])) + name := strings.TrimSpace(c.Text[9:]) if name == "" { error_(c.Pos(), "export missing name") } diff --git a/src/cmd/compile/internal/s390x/peep.go b/src/cmd/compile/internal/s390x/peep.go index 86258d67da..cd6a8c5d8c 100644 --- a/src/cmd/compile/internal/s390x/peep.go +++ b/src/cmd/compile/internal/s390x/peep.go @@ -135,7 +135,7 @@ func pushback(r0 *gc.Flow) { } } - t := obj.Prog(*r0.Prog) + t := *r0.Prog for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) { p0 = r.Link.Prog p := r.Prog @@ -162,7 +162,7 @@ func pushback(r0 *gc.Flow) { if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 { fmt.Printf("\tafter\n") - for r := (*gc.Flow)(b); ; r = r.Link { + for r := b; ; r = r.Link { fmt.Printf("\t%v\n", r.Prog) if r == r0 { break diff --git a/src/cmd/go/http.go b/src/cmd/go/http.go index 19e1fe4f77..05ea503049 100644 --- a/src/cmd/go/http.go +++ b/src/cmd/go/http.go @@ -30,7 +30,7 @@ var httpClient = http.DefaultClient // when we're connecting to https servers that might not be there // or might be using self-signed certificates. var impatientInsecureHTTPClient = &http.Client{ - Timeout: time.Duration(5 * time.Second), + Timeout: 5 * time.Second, Transport: &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, diff --git a/src/cmd/go/vcs.go b/src/cmd/go/vcs.go index e3342999fa..4ff71f2168 100644 --- a/src/cmd/go/vcs.go +++ b/src/cmd/go/vcs.go @@ -253,7 +253,7 @@ func bzrResolveRepo(vcsBzr *vcsCmd, rootDir, remoteRepo string) (realRepo string return "", fmt.Errorf("unable to parse output of bzr info") } out = out[:i] - return strings.TrimSpace(string(out)), nil + return strings.TrimSpace(out), nil } // vcsSvn describes how to use Subversion. @@ -294,7 +294,7 @@ func svnRemoteRepo(vcsSvn *vcsCmd, rootDir string) (remoteRepo string, err error return "", fmt.Errorf("unable to parse output of svn info") } out = out[:i] - return strings.TrimSpace(string(out)), nil + return strings.TrimSpace(out), nil } func (v *vcsCmd) String() string { diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index d833beeb2d..ffa1b416d6 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -250,7 +250,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) { if p.From.Type == obj.TYPE_FCONST { f32 := float32(p.From.Val.(float64)) i32 := math.Float32bits(f32) - literal := fmt.Sprintf("$f32.%08x", uint32(i32)) + literal := fmt.Sprintf("$f32.%08x", i32) s := obj.Linklookup(ctxt, literal, 0) s.Size = 4 p.From.Type = obj.TYPE_MEM @@ -263,7 +263,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) { case AFMOVD: if p.From.Type == obj.TYPE_FCONST { i64 := math.Float64bits(p.From.Val.(float64)) - literal := fmt.Sprintf("$f64.%016x", uint64(i64)) + literal := fmt.Sprintf("$f64.%016x", i64) s := obj.Linklookup(ctxt, literal, 0) s.Size = 8 p.From.Type = obj.TYPE_MEM diff --git a/src/cmd/internal/obj/data.go b/src/cmd/internal/obj/data.go index d7f0840bc1..5fe4cb10a5 100644 --- a/src/cmd/internal/obj/data.go +++ b/src/cmd/internal/obj/data.go @@ -183,7 +183,7 @@ func Setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 { case 4: ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(v)) case 8: - ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(v)) + ctxt.Arch.ByteOrder.PutUint64(s.P[off:], v) } return off + wid diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index 13e7600c21..73d6cabbcb 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -1024,7 +1024,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg)) case 5: /* syscall */ - o1 = uint32(oprrr(ctxt, p.As)) + o1 = oprrr(ctxt, p.As) case 6: /* beq r1,[r2],sbra */ v := int32(0) diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index 60505dfbb5..17175ebf06 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -371,9 +371,9 @@ func (w *objWriter) writeSymDebug(s *LSym) { name = "TLS" } if ctxt.Arch.InFamily(sys.ARM, sys.PPC64) { - fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%x\n", int(r.Off), r.Siz, r.Type, name, uint64(int64(r.Add))) + fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%x\n", int(r.Off), r.Siz, r.Type, name, uint64(r.Add)) } else { - fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%d\n", int(r.Off), r.Siz, r.Type, name, int64(r.Add)) + fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%d\n", int(r.Off), r.Siz, r.Type, name, r.Add) } } } @@ -473,7 +473,7 @@ func (w *objWriter) writeSym(s *LSym) { func (w *objWriter) writeInt(sval int64) { var v uint64 - uv := (uint64(sval) << 1) ^ uint64(int64(sval>>63)) + uv := (uint64(sval) << 1) ^ uint64(sval>>63) p := w.varintbuf[:] for v = uv; v >= 0x80; v >>= 7 { p[0] = uint8(v | 0x80) diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go index eca7531f3c..a086be9f66 100644 --- a/src/cmd/internal/obj/pcln.go +++ b/src/cmd/internal/obj/pcln.go @@ -64,7 +64,7 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(* if val == oldval && started != 0 { val = valfunc(ctxt, func_, val, p, 1, arg) if ctxt.Debugpcln != 0 { - fmt.Fprintf(ctxt.Bso, "%6x %6s %v\n", uint64(int64(p.Pc)), "", p) + fmt.Fprintf(ctxt.Bso, "%6x %6s %v\n", uint64(p.Pc), "", p) } continue } @@ -76,7 +76,7 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(* if p.Link != nil && p.Link.Pc == p.Pc { val = valfunc(ctxt, func_, val, p, 1, arg) if ctxt.Debugpcln != 0 { - fmt.Fprintf(ctxt.Bso, "%6x %6s %v\n", uint64(int64(p.Pc)), "", p) + fmt.Fprintf(ctxt.Bso, "%6x %6s %v\n", uint64(p.Pc), "", p) } continue } @@ -96,7 +96,7 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(* // where the 0x80 bit indicates that the integer continues. if ctxt.Debugpcln != 0 { - fmt.Fprintf(ctxt.Bso, "%6x %6d %v\n", uint64(int64(p.Pc)), val, p) + fmt.Fprintf(ctxt.Bso, "%6x %6d %v\n", uint64(p.Pc), val, p) } if started != 0 { @@ -118,7 +118,7 @@ func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(* if started != 0 { if ctxt.Debugpcln != 0 { - fmt.Fprintf(ctxt.Bso, "%6x done\n", uint64(int64(func_.Text.Pc)+func_.Size)) + fmt.Fprintf(ctxt.Bso, "%6x done\n", uint64(func_.Text.Pc+func_.Size)) } addvarint(ctxt, dst, uint32((func_.Size-pc)/int64(ctxt.Arch.MinLC))) addvarint(ctxt, dst, 0) // terminator @@ -164,7 +164,7 @@ func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg if file == f { pcln.Lastfile = f pcln.Lastindex = int(i) - return int32(i) + return i } } pcln.File = append(pcln.File, f) diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index e793f26803..f786f3c443 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -1384,7 +1384,7 @@ const ( // which relocation to use with a load or store and only supports the needed // instructions. func opform(ctxt *obj.Link, insn uint32) int { - switch uint32(insn) { + switch insn { default: ctxt.Diag("bad insn in loadform: %x", insn) case OPVCC(58, 0, 0, 0), // ld @@ -2198,9 +2198,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { } v := oprrr(ctxt, p.As) t := v & (1<<10 | 1) /* OE|Rc */ - o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg)) + o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg)) - o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r)) + o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) if p.As == AREMU { o4 = o3 @@ -2216,9 +2216,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { } v := oprrr(ctxt, p.As) t := v & (1<<10 | 1) /* OE|Rc */ - o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg)) + o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg)) - o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r)) + o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) case 52: /* mtfsbNx cr(n) */ v := regoff(ctxt, &p.From) & 31 @@ -2485,7 +2485,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { ctxt.Diag("invalid offset against tls var %v", p) } o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) - o2 = AOP_IRR(uint32(opload(ctxt, AMOVD)), uint32(p.To.Reg), uint32(p.To.Reg), 0) + o2 = AOP_IRR(opload(ctxt, AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 8 @@ -2499,7 +2499,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) { } o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) - o2 = AOP_IRR(uint32(opload(ctxt, AMOVD)), uint32(p.To.Reg), uint32(p.To.Reg), 0) + o2 = AOP_IRR(opload(ctxt, AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) rel := obj.Addrel(ctxt.Cursym) rel.Off = int32(ctxt.Pc) rel.Siz = 8 diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go index 04e6a76e1a..294cedcb0a 100644 --- a/src/cmd/internal/obj/util.go +++ b/src/cmd/internal/obj/util.go @@ -279,7 +279,7 @@ func Dconv(p *Prog, a *Addr) string { case TYPE_SHIFT: v := int(a.Offset) - op := string("<<>>->@>"[((v>>5)&3)<<1:]) + op := "<<>>->@>"[((v>>5)&3)<<1:] if v&(1<<4) != 0 { str = fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15) } else { diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index e806a834fd..57ef045b98 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -3308,7 +3308,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { case Pf2, /* xmm opcode escape */ Pf3: - ctxt.AsmBuf.Put2(byte(o.prefix), Pm) + ctxt.AsmBuf.Put2(o.prefix, Pm) case Pef3: ctxt.AsmBuf.Put3(Pe, Pf3, Pm) @@ -3421,7 +3421,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { asmand(ctxt, p, &p.From, &p.To) case Zm2_r: - ctxt.AsmBuf.Put2(byte(op), byte(o.op[z+1])) + ctxt.AsmBuf.Put2(byte(op), o.op[z+1]) asmand(ctxt, p, &p.From, &p.To) case Zm_r_xm: @@ -3531,7 +3531,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { } ctxt.AsmBuf.Put1(byte(op)) if p.As == AXABORT { - ctxt.AsmBuf.Put1(byte(o.op[z+1])) + ctxt.AsmBuf.Put1(o.op[z+1]) } ctxt.AsmBuf.Put1(byte(vaddr(ctxt, p, a, nil))) @@ -3657,7 +3657,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { if yt.zcase == Zcallcon { ctxt.AsmBuf.Put1(byte(op)) } else { - ctxt.AsmBuf.Put1(byte(o.op[z+1])) + ctxt.AsmBuf.Put1(o.op[z+1]) } r = obj.Addrel(ctxt.Cursym) r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len())) @@ -3667,7 +3667,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { ctxt.AsmBuf.PutInt32(0) case Zcallind: - ctxt.AsmBuf.Put2(byte(op), byte(o.op[z+1])) + ctxt.AsmBuf.Put2(byte(op), o.op[z+1]) r = obj.Addrel(ctxt.Cursym) r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len())) r.Type = obj.R_ADDR @@ -3722,7 +3722,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { log.Fatalf("bad code") } - ctxt.AsmBuf.Put1(byte(o.op[z+1])) + ctxt.AsmBuf.Put1(o.op[z+1]) r = obj.Addrel(ctxt.Cursym) r.Off = int32(p.Pc + int64(ctxt.AsmBuf.Len())) r.Sym = p.To.Sym @@ -3762,7 +3762,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { v-- } - ctxt.AsmBuf.Put1(byte(o.op[z+1])) + ctxt.AsmBuf.Put1(o.op[z+1]) ctxt.AsmBuf.PutInt32(int32(v)) } @@ -3784,7 +3784,7 @@ func doasm(ctxt *obj.Link, p *obj.Prog) { if yt.zcase == Zbr { ctxt.AsmBuf.Put1(0x0f) } - ctxt.AsmBuf.Put1(byte(o.op[z+1])) + ctxt.AsmBuf.Put1(o.op[z+1]) ctxt.AsmBuf.PutInt32(0) } diff --git a/src/cmd/internal/objfile/plan9obj.go b/src/cmd/internal/objfile/plan9obj.go index 1d808f77eb..6ee389dc2e 100644 --- a/src/cmd/internal/objfile/plan9obj.go +++ b/src/cmd/internal/objfile/plan9obj.go @@ -59,7 +59,7 @@ func (f *plan9File) symbols() ([]Sym, error) { if !validSymType[s.Type] { continue } - sym := Sym{Addr: s.Value, Name: s.Name, Code: rune(s.Type)} + sym := Sym{Addr: s.Value, Name: s.Name, Code: s.Type} i := sort.Search(len(addrs), func(x int) bool { return addrs[x] > s.Value }) if i < len(addrs) { sym.Size = int64(addrs[i] - s.Value) diff --git a/src/cmd/internal/pprof/profile/legacy_profile.go b/src/cmd/internal/pprof/profile/legacy_profile.go index 3d4da6b4d7..8ccfe45176 100644 --- a/src/cmd/internal/pprof/profile/legacy_profile.go +++ b/src/cmd/internal/pprof/profile/legacy_profile.go @@ -74,7 +74,7 @@ func parseGoCount(b []byte) (*Profile, error) { if m == nil { return nil, errUnrecognized } - profileType := string(m[1]) + profileType := m[1] p := &Profile{ PeriodType: &ValueType{Type: profileType, Unit: "count"}, Period: 1, @@ -99,11 +99,11 @@ func parseGoCount(b []byte) (*Profile, error) { if m == nil { return nil, errMalformed } - n, err := strconv.ParseInt(string(m[1]), 0, 64) + n, err := strconv.ParseInt(m[1], 0, 64) if err != nil { return nil, errMalformed } - fields := strings.Fields(string(m[2])) + fields := strings.Fields(m[2]) locs := make([]*Location, 0, len(fields)) for _, stk := range fields { addr, err := strconv.ParseUint(stk, 0, 64) @@ -458,7 +458,7 @@ func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust boo } p.Sample = append(p.Sample, &Sample{ - Value: []int64{int64(count), int64(count) * int64(p.Period)}, + Value: []int64{int64(count), int64(count) * p.Period}, Location: sloc, }) } @@ -488,7 +488,7 @@ func parseHeap(b []byte) (p *Profile, err error) { var period int64 if len(header[6]) > 0 { - if period, err = strconv.ParseInt(string(header[6]), 10, 64); err != nil { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { return nil, errUnrecognized } } diff --git a/src/cmd/internal/unvendor/golang.org/x/arch/arm/armasm/decode.go b/src/cmd/internal/unvendor/golang.org/x/arch/arm/armasm/decode.go index 6b4d73841b..cc81dc3f50 100644 --- a/src/cmd/internal/unvendor/golang.org/x/arch/arm/armasm/decode.go +++ b/src/cmd/internal/unvendor/golang.org/x/arch/arm/armasm/decode.go @@ -233,9 +233,9 @@ func decodeArg(aop instArg, x uint32) Arg { typ, count := decodeShift(x) // ROR #0 here means ROR #0, but decodeShift rewrites to RRX #1. if typ == RotateRightExt { - return Reg(Rm) + return Rm } - return RegShift{Rm, typ, uint8(count)} + return RegShift{Rm, typ, count} case arg_R_shift_R: Rm := Reg(x & (1<<4 - 1)) @@ -247,9 +247,9 @@ func decodeArg(aop instArg, x uint32) Arg { Rm := Reg(x & (1<<4 - 1)) typ, count := decodeShift(x) if typ == ShiftLeft && count == 0 { - return Reg(Rm) + return Rm } - return RegShift{Rm, typ, uint8(count)} + return RegShift{Rm, typ, count} case arg_R1_0: return Reg((x & (1<<4 - 1))) diff --git a/src/cmd/internal/unvendor/golang.org/x/arch/x86/x86asm/decode.go b/src/cmd/internal/unvendor/golang.org/x/arch/x86/x86asm/decode.go index e4122c1e6d..9b3597300e 100644 --- a/src/cmd/internal/unvendor/golang.org/x/arch/x86/x86asm/decode.go +++ b/src/cmd/internal/unvendor/golang.org/x/arch/x86/x86asm/decode.go @@ -1041,7 +1041,7 @@ Decode: case xArgMoffs8, xArgMoffs16, xArgMoffs32, xArgMoffs64: // TODO(rsc): Can address be 64 bits? - mem = Mem{Disp: int64(immc)} + mem = Mem{Disp: immc} if segIndex >= 0 { mem.Segment = prefixToSegment(inst.Prefix[segIndex]) inst.Prefix[segIndex] |= PrefixImplicit diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go index a6dce6c2c9..ab96a59151 100644 --- a/src/cmd/link/internal/amd64/asm.go +++ b/src/cmd/link/internal/amd64/asm.go @@ -735,7 +735,7 @@ func asmb() { if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(uint8(sym.P[i])) + ld.Cput(sym.P[i]) } ld.Cflush() diff --git a/src/cmd/link/internal/arm/asm.go b/src/cmd/link/internal/arm/asm.go index 1188615716..69e1d8f317 100644 --- a/src/cmd/link/internal/arm/asm.go +++ b/src/cmd/link/internal/arm/asm.go @@ -649,7 +649,7 @@ func asmb() { if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(uint8(sym.P[i])) + ld.Cput(sym.P[i]) } ld.Cflush() diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index d3ba5ff3f3..d8ffffa157 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -488,7 +488,7 @@ func asmb() { if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(uint8(sym.P[i])) + ld.Cput(sym.P[i]) } ld.Cflush() diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index cf51b0a908..105503f6ef 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -80,7 +80,7 @@ func setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 { case 4: ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(v)) case 8: - ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(v)) + ctxt.Arch.ByteOrder.PutUint64(s.P[off:], v) } return off + wid @@ -757,7 +757,7 @@ func blk(start *LSym, addr int64, size int64) { } Ctxt.Cursym = sym if sym.Value < addr { - Diag("phase error: addr=%#x but sym=%#x type=%d", int64(addr), int64(sym.Value), sym.Type) + Diag("phase error: addr=%#x but sym=%#x type=%d", addr, sym.Value, sym.Type) errorexit() } @@ -773,7 +773,7 @@ func blk(start *LSym, addr int64, size int64) { addr = sym.Value + sym.Size } if addr != sym.Value+sym.Size { - Diag("phase error: addr=%#x value+size=%#x", int64(addr), int64(sym.Value)+sym.Size) + Diag("phase error: addr=%#x value+size=%#x", addr, sym.Value+sym.Size) errorexit() } @@ -821,14 +821,14 @@ func Codeblk(addr int64, size int64) { } if addr < sym.Value { - fmt.Fprintf(Bso, "%-20s %.8x|", "_", uint64(int64(addr))) + fmt.Fprintf(Bso, "%-20s %.8x|", "_", uint64(addr)) for ; addr < sym.Value; addr++ { fmt.Fprintf(Bso, " %.2x", 0) } fmt.Fprintf(Bso, "\n") } - fmt.Fprintf(Bso, "%.6x\t%-20s\n", uint64(int64(addr)), sym.Name) + fmt.Fprintf(Bso, "%.6x\t%-20s\n", uint64(addr), sym.Name) q = sym.P for len(q) >= 16 { @@ -844,7 +844,7 @@ func Codeblk(addr int64, size int64) { } if addr < eaddr { - fmt.Fprintf(Bso, "%-20s %.8x|", "_", uint64(int64(addr))) + fmt.Fprintf(Bso, "%-20s %.8x|", "_", uint64(addr)) for ; addr < eaddr; addr++ { fmt.Fprintf(Bso, " %.2x", 0) } @@ -892,7 +892,7 @@ func Datblk(addr int64, size int64) { p = sym.P ep = p[len(sym.P):] for -cap(p) < -cap(ep) { - if -cap(p) > -cap(sym.P) && int(-cap(p)+cap(sym.P))%16 == 0 { + if -cap(p) > -cap(sym.P) && (-cap(p)+cap(sym.P))%16 == 0 { fmt.Fprintf(Bso, "\n\t%.8x|", uint(addr+int64(-cap(p)+cap(sym.P)))) } fmt.Fprintf(Bso, " %.2x", p[0]) @@ -924,7 +924,7 @@ func Datblk(addr int64, size int64) { typ = "call" } - fmt.Fprintf(Bso, "\treloc %.8x/%d %s %s+%#x [%#x]\n", uint(sym.Value+int64(r.Off)), r.Siz, typ, rsname, int64(r.Add), int64(r.Sym.Value+r.Add)) + fmt.Fprintf(Bso, "\treloc %.8x/%d %s %s+%#x [%#x]\n", uint(sym.Value+int64(r.Off)), r.Siz, typ, rsname, r.Add, r.Sym.Value+r.Add) } } } @@ -1279,7 +1279,7 @@ func dodata() { for s := datap; s != nil; s = s.Next { if int64(len(s.P)) > s.Size { - Diag("%s: initialize bounds (%d < %d)", s.Name, int64(s.Size), len(s.P)) + Diag("%s: initialize bounds (%d < %d)", s.Name, s.Size, len(s.P)) } } diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index 5fa8b4c81f..4725b91d01 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -53,12 +53,12 @@ func uncommonSize() int { return 2 * SysArch.PtrSize } // runtime.uncommont // Type.commonType.kind func decodetype_kind(s *LSym) uint8 { - return uint8(s.P[2*SysArch.PtrSize+7] & obj.KindMask) // 0x13 / 0x1f + return s.P[2*SysArch.PtrSize+7] & obj.KindMask // 0x13 / 0x1f } // Type.commonType.kind func decodetype_usegcprog(s *LSym) uint8 { - return uint8(s.P[2*SysArch.PtrSize+7] & obj.KindGCProg) // 0x13 / 0x1f + return s.P[2*SysArch.PtrSize+7] & obj.KindGCProg // 0x13 / 0x1f } // Type.commonType.size diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index b1208b63a8..bec9946ec5 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -615,7 +615,7 @@ func putattr(s *LSym, abbrev int, form int, cls int, value int64, data interface Adduint8(Ctxt, s, uint8(value)) p := data.([]byte) for i := 0; int64(i) < value; i++ { - Adduint8(Ctxt, s, uint8(p[i])) + Adduint8(Ctxt, s, p[i]) } case DW_FORM_block2: // block @@ -624,7 +624,7 @@ func putattr(s *LSym, abbrev int, form int, cls int, value int64, data interface Adduint16(Ctxt, s, uint16(value)) p := data.([]byte) for i := 0; int64(i) < value; i++ { - Adduint8(Ctxt, s, uint8(p[i])) + Adduint8(Ctxt, s, p[i]) } case DW_FORM_block4: // block @@ -633,7 +633,7 @@ func putattr(s *LSym, abbrev int, form int, cls int, value int64, data interface Adduint32(Ctxt, s, uint32(value)) p := data.([]byte) for i := 0; int64(i) < value; i++ { - Adduint8(Ctxt, s, uint8(p[i])) + Adduint8(Ctxt, s, p[i]) } case DW_FORM_block: // block @@ -641,7 +641,7 @@ func putattr(s *LSym, abbrev int, form int, cls int, value int64, data interface p := data.([]byte) for i := 0; int64(i) < value; i++ { - Adduint8(Ctxt, s, uint8(p[i])) + Adduint8(Ctxt, s, p[i]) } case DW_FORM_data1: // constant @@ -1179,7 +1179,7 @@ func synthesizemaptypes(die *DWDie) { // Construct type to represent an array of BucketSize keys keyname := nameFromDIESym(keytype) dwhks := mkinternaltype(DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *DWDie) { - newattr(dwhk, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize*int64(keysize), 0) + newattr(dwhk, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize*keysize, 0) t := keytype if indirect_key { t = defptrto(keytype) @@ -1193,7 +1193,7 @@ func synthesizemaptypes(die *DWDie) { // Construct type to represent an array of BucketSize values valname := nameFromDIESym(valtype) dwhvs := mkinternaltype(DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *DWDie) { - newattr(dwhv, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize*int64(valsize), 0) + newattr(dwhv, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize*valsize, 0) t := valtype if indirect_val { t = defptrto(valtype) @@ -1225,7 +1225,7 @@ func synthesizemaptypes(die *DWDie) { newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(SysArch.PtrSize)) } - newattr(dwhb, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize+BucketSize*int64(keysize)+BucketSize*int64(valsize)+int64(SysArch.RegSize), 0) + newattr(dwhb, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize+BucketSize*keysize+BucketSize*valsize+int64(SysArch.RegSize), 0) }) // Construct hash @@ -1269,7 +1269,7 @@ func synthesizechantypes(die *DWDie) { } else { elemsize = 0 } - newattr(dws, DW_AT_byte_size, DW_CLS_CONSTANT, int64(sudogsize)+int64(elemsize), nil) + newattr(dws, DW_AT_byte_size, DW_CLS_CONSTANT, int64(sudogsize)+elemsize, nil) }) // waitq @@ -1787,7 +1787,7 @@ func writeinfo(prev *LSym) *LSym { } setuint32(Ctxt, s, 0, uint32(cusize)) - newattr(compunit, DW_AT_byte_size, DW_CLS_CONSTANT, int64(cusize), 0) + newattr(compunit, DW_AT_byte_size, DW_CLS_CONSTANT, cusize, 0) } return prev } diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 7c760775b5..02f7897db9 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -2026,7 +2026,7 @@ func doelf() { h.Write(l.hash) } addgonote(".note.go.abihash", ELF_NOTE_GOABIHASH_TAG, h.Sum([]byte{})) - addgonote(".note.go.pkg-list", ELF_NOTE_GOPKGLIST_TAG, []byte(pkglistfornote)) + addgonote(".note.go.pkg-list", ELF_NOTE_GOPKGLIST_TAG, pkglistfornote) var deplist []string for _, shlib := range Ctxt.Shlibs { deplist = append(deplist, filepath.Base(shlib.Path)) diff --git a/src/cmd/link/internal/ld/ldelf.go b/src/cmd/link/internal/ld/ldelf.go index d07a2a2c34..59e71f4dd4 100644 --- a/src/cmd/link/internal/ld/ldelf.go +++ b/src/cmd/link/internal/ld/ldelf.go @@ -500,7 +500,7 @@ func ldelf(f *bio.Reader, pkg string, length int64, pn string) { elfobj.e = e elfobj.f = f - elfobj.base = int64(base) + elfobj.base = base elfobj.length = length elfobj.name = pn @@ -612,7 +612,7 @@ func ldelf(f *bio.Reader, pkg string, length int64, pn string) { goto bad } - sect.nameoff = uint32(e.Uint32(b.Name[:])) + sect.nameoff = e.Uint32(b.Name[:]) sect.type_ = e.Uint32(b.Type[:]) sect.flags = e.Uint64(b.Flags[:]) sect.addr = e.Uint64(b.Addr[:]) @@ -629,7 +629,7 @@ func ldelf(f *bio.Reader, pkg string, length int64, pn string) { goto bad } - sect.nameoff = uint32(e.Uint32(b.Name[:])) + sect.nameoff = e.Uint32(b.Name[:]) sect.type_ = e.Uint32(b.Type[:]) sect.flags = uint64(e.Uint32(b.Flags[:])) sect.addr = uint64(e.Uint32(b.Addr[:])) diff --git a/src/cmd/link/internal/ld/ldmacho.go b/src/cmd/link/internal/ld/ldmacho.go index 8dc4033bbc..105fc137f9 100644 --- a/src/cmd/link/internal/ld/ldmacho.go +++ b/src/cmd/link/internal/ld/ldmacho.go @@ -399,8 +399,8 @@ func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int { return -1 } s.name = cstring(strbuf[v:]) - s.type_ = uint8(p[4]) - s.sectnum = uint8(p[5]) + s.type_ = p[4] + s.sectnum = p[5] s.desc = m.e.Uint16(p[6:]) if m.is64 { s.value = m.e.Uint64(p[8:]) @@ -460,8 +460,8 @@ func ldmacho(f *bio.Reader, pkg string, length int64, pn string) { } is64 = e.Uint32(hdr[:]) == 0xFEEDFACF - ncmd = e.Uint32([]byte(hdr[4*4:])) - cmdsz = e.Uint32([]byte(hdr[5*4:])) + ncmd = e.Uint32(hdr[4*4:]) + cmdsz = e.Uint32(hdr[5*4:]) if ncmd > 0x10000 || cmdsz >= 0x01000000 { err = fmt.Errorf("implausible mach-o header ncmd=%d cmdsz=%d", ncmd, cmdsz) goto bad @@ -475,11 +475,11 @@ func ldmacho(f *bio.Reader, pkg string, length int64, pn string) { m.f = f m.e = e - m.cputype = uint(e.Uint32([]byte(hdr[1*4:]))) - m.subcputype = uint(e.Uint32([]byte(hdr[2*4:]))) - m.filetype = e.Uint32([]byte(hdr[3*4:])) + m.cputype = uint(e.Uint32(hdr[1*4:])) + m.subcputype = uint(e.Uint32(hdr[2*4:])) + m.filetype = e.Uint32(hdr[3*4:]) m.ncmd = uint(ncmd) - m.flags = e.Uint32([]byte(hdr[6*4:])) + m.flags = e.Uint32(hdr[6*4:]) m.is64 = is64 m.base = base m.length = length diff --git a/src/cmd/link/internal/ld/ldpe.go b/src/cmd/link/internal/ld/ldpe.go index 7f7121ff94..c51479fb4e 100644 --- a/src/cmd/link/internal/ld/ldpe.go +++ b/src/cmd/link/internal/ld/ldpe.go @@ -175,14 +175,14 @@ func ldpe(f *bio.Reader, pkg string, length int64, pn string) { // TODO return error if found .cormeta // load string table - f.Seek(int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) + f.Seek(base+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) if _, err := io.ReadFull(f, symbuf[:4]); err != nil { goto bad } l = Le32(symbuf[:]) peobj.snames = make([]byte, l) - f.Seek(int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) + f.Seek(base+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(peobj.fh.NumberOfSymbols), 0) if _, err := io.ReadFull(f, peobj.snames); err != nil { goto bad } @@ -203,9 +203,9 @@ func ldpe(f *bio.Reader, pkg string, length int64, pn string) { peobj.pesym = make([]PeSym, peobj.fh.NumberOfSymbols) peobj.npesym = uint(peobj.fh.NumberOfSymbols) - f.Seek(int64(base)+int64(peobj.fh.PointerToSymbolTable), 0) + f.Seek(base+int64(peobj.fh.PointerToSymbolTable), 0) for i := 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 { - f.Seek(int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0) + f.Seek(base+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0) if _, err := io.ReadFull(f, symbuf[:]); err != nil { goto bad } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index bdcc84a129..a18098e7e7 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -765,7 +765,7 @@ func nextar(bp *bio.Reader, off int64, a *ArHdr) int64 { if arsize&1 != 0 { arsize++ } - return int64(arsize) + SAR_HDR + return arsize + SAR_HDR } func objfile(lib *Library) { @@ -1953,7 +1953,7 @@ func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) { continue } if len(s.P) > 0 { - Diag("%s should not be bss (size=%d type=%d special=%v)", s.Name, int(len(s.P)), s.Type, s.Attr.Special()) + Diag("%s should not be bss (size=%d type=%d special=%v)", s.Name, len(s.P), s.Type, s.Attr.Special()) } put(s, s.Name, 'B', Symaddr(s), s.Size, int(s.Version), s.Gotype) diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 1d9a1a9324..46cce4c331 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -703,11 +703,11 @@ func machosymtab() { Addstring(symstr, s.Extname) } else { for p = s.Extname; p != ""; p = p[1:] { - if uint8(p[0]) == 0xc2 && uint8((p[1:])[0]) == 0xb7 { + if p[0] == 0xc2 && (p[1:])[0] == 0xb7 { Adduint8(Ctxt, symstr, '.') p = p[1:] } else { - Adduint8(Ctxt, symstr, uint8(p[0])) + Adduint8(Ctxt, symstr, p[0]) } } diff --git a/src/cmd/link/internal/ld/objfile.go b/src/cmd/link/internal/ld/objfile.go index b4d2a2184f..dffb7a3d9b 100644 --- a/src/cmd/link/internal/ld/objfile.go +++ b/src/cmd/link/internal/ld/objfile.go @@ -472,7 +472,7 @@ func (r *objReader) readInt64() int64 { } } - return int64(uv>>1) ^ (int64(uint64(uv)<<63) >> 63) + return int64(uv>>1) ^ (int64(uv<<63) >> 63) } func (r *objReader) readInt() int { diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 74ef8c2929..345eaa1ac2 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -179,7 +179,7 @@ func renumberfiles(ctxt *Link, files []*LSym, d *Pcdata) { dv = val - newval newval = val - v = (uint32(dv) << 1) ^ uint32(int32(dv>>31)) + v = (uint32(dv) << 1) ^ uint32(dv>>31) addvarint(&out, v) // pc delta @@ -378,7 +378,7 @@ func pclntab() { ftab.Size = int64(len(ftab.P)) if Debug['v'] != 0 { - fmt.Fprintf(Bso, "%5.2f pclntab=%d bytes, funcdata total %d bytes\n", obj.Cputime(), int64(ftab.Size), int64(funcdata_bytes)) + fmt.Fprintf(Bso, "%5.2f pclntab=%d bytes, funcdata total %d bytes\n", obj.Cputime(), ftab.Size, funcdata_bytes) } } diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index 0204b8c8c2..8985c40588 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -877,7 +877,7 @@ func peemitreloc(text, data, ctors *IMAGE_SECTION_HEADER) { ctors.NumberOfRelocations = 1 ctors.PointerToRelocations = uint32(Cpos()) sectoff := ctors.VirtualAddress - Lputl(uint32(sectoff)) + Lputl(sectoff) Lputl(uint32(dottext.Dynid)) switch obj.Getgoarch() { default: @@ -1043,7 +1043,7 @@ func addpesymtable() { // write COFF string table Lputl(uint32(len(strtbl)) + 4) for i := 0; i < len(strtbl); i++ { - Cput(uint8(strtbl[i])) + Cput(strtbl[i]) } if Linkmode != LinkExternal { strnput("", int(h.SizeOfRawData-uint32(size))) diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 60bec0d6c9..96e8de5030 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -236,10 +236,10 @@ func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ var i int if t == 'z' || t == 'Z' { - Cput(uint8(s[0])) + Cput(s[0]) for i = 1; s[i] != 0 || s[i+1] != 0; i += 2 { - Cput(uint8(s[i])) - Cput(uint8(s[i+1])) + Cput(s[i]) + Cput(s[i+1]) } Cput(0) @@ -251,7 +251,7 @@ func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ s = s[1:] } for i = 0; i < len(s); i++ { - Cput(uint8(s[i])) + Cput(s[i]) } Cput(0) } diff --git a/src/cmd/link/internal/mips64/asm.go b/src/cmd/link/internal/mips64/asm.go index ad6a1f7524..785002b02c 100644 --- a/src/cmd/link/internal/mips64/asm.go +++ b/src/cmd/link/internal/mips64/asm.go @@ -193,7 +193,7 @@ func asmb() { if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(uint8(sym.P[i])) + ld.Cput(sym.P[i]) } ld.Cflush() @@ -214,7 +214,7 @@ func asmb() { if ld.SysArch == sys.ArchMIPS64LE { magic = uint32(4*26*26 + 7) } - ld.Thearch.Lput(uint32(magic)) /* magic */ + ld.Thearch.Lput(magic) /* magic */ ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */ ld.Thearch.Lput(uint32(ld.Segdata.Filelen)) ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go index 3970f3c5f9..562e0810e0 100644 --- a/src/cmd/link/internal/ppc64/asm.go +++ b/src/cmd/link/internal/ppc64/asm.go @@ -913,7 +913,7 @@ func asmb() { if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(uint8(sym.P[i])) + ld.Cput(sym.P[i]) } ld.Cflush() diff --git a/src/cmd/link/internal/x86/asm.go b/src/cmd/link/internal/x86/asm.go index 19a8917ec8..5231ad1f6c 100644 --- a/src/cmd/link/internal/x86/asm.go +++ b/src/cmd/link/internal/x86/asm.go @@ -699,7 +699,7 @@ func asmb() { if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(uint8(sym.P[i])) + ld.Cput(sym.P[i]) } ld.Cflush() diff --git a/src/cmd/vet/structtag.go b/src/cmd/vet/structtag.go index e8164a46f9..abff14fb1d 100644 --- a/src/cmd/vet/structtag.go +++ b/src/cmd/vet/structtag.go @@ -111,7 +111,7 @@ func validateStructTag(tag string) error { if i >= len(tag) { return errTagValueSyntax } - qvalue := string(tag[:i+1]) + qvalue := tag[:i+1] tag = tag[i+1:] if _, err := strconv.Unquote(qvalue); err != nil { -- cgit v1.3 From d57a118afabdd5b0f516d8d3225b2c7f8c96d64a Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 15 Apr 2016 14:22:27 -0700 Subject: cmd/compile: remove dead flags For some time now, the -d flag has been used to control various named debug options, rather than setting Debug['d']. Consequently, that means dflag() always returns false, which means the -y flag is also useless. Similarly, Debug['L'] is never used anywhere, so the -L flag can be dropped too. Change-Id: I4bb12454e462410115ec4f5565facf76c5c2f255 Reviewed-on: https://go-review.googlesource.com/22121 Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/dcl.go | 22 ---------------------- src/cmd/compile/internal/gc/main.go | 2 -- 2 files changed, 24 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index e1028f681c..0e4b5f6051 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -13,19 +13,6 @@ import ( // Declaration stack & operations -func dflag() bool { - if Debug['d'] == 0 { - return false - } - if Debug['y'] != 0 { - return true - } - if incannedimport != 0 { - return false - } - return true -} - var externdcl []*Node var blockgen int32 // max block number @@ -67,9 +54,6 @@ func push() *Sym { func pushdcl(s *Sym) *Sym { d := push() dcopy(d, s) - if dflag() { - fmt.Printf("\t%v push %v %p\n", linestr(lineno), s, s.Def) - } return d } @@ -82,9 +66,6 @@ func popdcl() { lno := s.Lastlineno dcopy(s, d) d.Lastlineno = lno - if dflag() { - fmt.Printf("\t%v pop %v %p\n", linestr(lineno), s, s.Def) - } } if d == nil { @@ -194,9 +175,6 @@ func declare(n *Node, ctxt Class) { gen := 0 if ctxt == PEXTERN { externdcl = append(externdcl, n) - if dflag() { - fmt.Printf("\t%v global decl %v %p\n", linestr(lineno), s, n) - } } else { if Curfn == nil && ctxt == PAUTO { Fatalf("automatic outside function") diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2baf9f6585..37e8a17886 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -153,7 +153,6 @@ func Main() { obj.Flagcount("E", "debug symbol export", &Debug['E']) obj.Flagfn1("I", "add `directory` to import search path", addidir) obj.Flagcount("K", "debug missing line numbers", &Debug['K']) - obj.Flagcount("L", "use full (long) path in error messages", &Debug['L']) obj.Flagcount("M", "debug move generation", &Debug['M']) obj.Flagcount("N", "disable optimizations", &Debug['N']) obj.Flagcount("P", "debug peephole optimizer", &Debug['P']) @@ -191,7 +190,6 @@ func Main() { obj.Flagcount("w", "debug type checking", &Debug['w']) flag.BoolVar(&use_writebarrier, "wb", true, "enable write barrier") obj.Flagcount("x", "debug lexer", &Debug['x']) - obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y']) var flag_shared bool var flag_dynlink bool if supportsDynlink(Thearch.LinkArch.Arch) { -- cgit v1.3 From 2563b6f9fe76da6c9f95c7766986f4684b80ae6d Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Sun, 3 Apr 2016 14:44:29 -0700 Subject: cmd/compile/internal/ssa: use Compare instead of Equal They have different semantics. Equal is stricter and is designed for the front-end. Compare is looser and cheaper and is designed for the back-end. To avoid possible regression, remove Equal from ssa.Type. Updates #15043 Change-Id: Ie23ce75ff6b4d01b7982e0a89e6f81b5d099d8d6 Reviewed-on: https://go-review.googlesource.com/21483 Reviewed-by: David Chase Run-TryBot: Josh Bleecher Snyder --- src/cmd/compile/internal/gc/type.go | 13 +++---------- src/cmd/compile/internal/ssa/TODO | 2 -- src/cmd/compile/internal/ssa/cse.go | 2 +- src/cmd/compile/internal/ssa/func.go | 2 +- src/cmd/compile/internal/ssa/stackalloc.go | 4 ++-- src/cmd/compile/internal/ssa/type.go | 11 +---------- 6 files changed, 8 insertions(+), 26 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index a44a85bed8..855b070af6 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -863,19 +863,12 @@ func (t *Type) SimpleString() string { return Econv(t.Etype) } -func (t *Type) Equal(u ssa.Type) bool { - x, ok := u.(*Type) - return ok && Eqtype(t, x) -} - // Compare compares types for purposes of the SSA back // end, returning an ssa.Cmp (one of CMPlt, CMPeq, CMPgt). // The answers are correct for an optimizer -// or code generator, but not for Go source. -// For example, "type gcDrainFlags int" results in -// two Go-different types that Compare equal. -// The order chosen is also arbitrary, only division into -// equivalence classes (Types that compare CMPeq) matters. +// or code generator, but not necessarily typechecking. +// The order chosen is arbitrary, only consistency and division +// into equivalence classes (Types that compare CMPeq) matters. func (t *Type) Compare(u ssa.Type) ssa.Cmp { x, ok := u.(*Type) // ssa.CompilerType is smaller than gc.Type diff --git a/src/cmd/compile/internal/ssa/TODO b/src/cmd/compile/internal/ssa/TODO index e081856bd3..dad4880994 100644 --- a/src/cmd/compile/internal/ssa/TODO +++ b/src/cmd/compile/internal/ssa/TODO @@ -41,8 +41,6 @@ Future/other ------------ - Start another architecture (arm?) - 64-bit ops on 32-bit machines -- Investigate type equality. During SSA generation, should we use n.Type or (say) TypeBool? - Should we get rid of named types in favor of underlying types during SSA generation? -- Should we introduce a new type equality routine that is less strict than the frontend's? - Infrastructure for enabling/disabling/configuring passes - Modify logging for at least pass=1, to be Warnl compatible diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go index e3f1a1d07d..d501f75e02 100644 --- a/src/cmd/compile/internal/ssa/cse.go +++ b/src/cmd/compile/internal/ssa/cse.go @@ -108,7 +108,7 @@ func cse(f *Func) { break } } - if !equivalent || !v.Type.Equal(w.Type) { + if !equivalent || v.Type.Compare(w.Type) != CMPeq { // w is not equivalent to v. // move it to the end and shrink e. e[j], e[len(e)-1] = e[len(e)-1], e[j] diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index da44f26106..11ff8d3792 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -318,7 +318,7 @@ func (f *Func) constVal(line int32, op Op, t Type, c int64, setAux bool) *Value } vv := f.constants[c] for _, v := range vv { - if v.Op == op && v.Type.Equal(t) { + if v.Op == op && v.Type.Compare(t) == CMPeq { if setAux && v.AuxInt != c { panic(fmt.Sprintf("cached const %s should have AuxInt of %d", v.LongString(), c)) } diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index e3ef66ab1b..44f4096cb2 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -201,7 +201,7 @@ func (s *stackAllocState) stackalloc() { } else { name = names[v.ID] } - if name.N != nil && v.Type.Equal(name.Type) { + if name.N != nil && v.Type.Compare(name.Type) == CMPeq { for _, id := range s.interfere[v.ID] { h := f.getHome(id) if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off { @@ -372,7 +372,7 @@ func (s *stackAllocState) buildInterferenceGraph() { if s.values[v.ID].needSlot { live.remove(v.ID) for _, id := range live.contents() { - if s.values[v.ID].typ.Equal(s.values[id].typ) { + if s.values[v.ID].typ.Compare(s.values[id].typ) == CMPeq { s.interfere[v.ID] = append(s.interfere[v.ID], id) s.interfere[id] = append(s.interfere[id], v.ID) } diff --git a/src/cmd/compile/internal/ssa/type.go b/src/cmd/compile/internal/ssa/type.go index 2a3de282cb..91a4efe78f 100644 --- a/src/cmd/compile/internal/ssa/type.go +++ b/src/cmd/compile/internal/ssa/type.go @@ -40,8 +40,7 @@ type Type interface { String() string SimpleString() string // a coarser generic description of T, e.g. T's underlying type - Equal(Type) bool - Compare(Type) Cmp // compare types, returning one of CMPlt, CMPeq, CMPgt. + Compare(Type) Cmp // compare types, returning one of CMPlt, CMPeq, CMPgt. } // Special compiler-only types. @@ -117,14 +116,6 @@ func (t *CompilerType) Compare(u Type) Cmp { return CMPlt } -func (t *CompilerType) Equal(u Type) bool { - x, ok := u.(*CompilerType) - if !ok { - return false - } - return x == t -} - var ( TypeInvalid = &CompilerType{Name: "invalid"} TypeMem = &CompilerType{Name: "mem", Memory: true} -- cgit v1.3 From 95df0c6ab93f6a42bdc9fd45500fd4d56bfc9add Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Mon, 28 Mar 2016 21:51:10 -0400 Subject: cmd/compile, etc: use name offset in method tables Introduce and start using nameOff for two encoded names. This pair of changes is best done together because the linker's method decoder expects the method layouts to match. Precursor to converting all existing name and *string fields to nameOff. linux/amd64: cmd/go: -45KB (0.5%) jujud: -389KB (0.6%) linux/amd64 PIE: cmd/go: -170KB (1.4%) jujud: -1.5MB (1.8%) For #6853. Change-Id: Ia044423f010fb987ce070b94c46a16fc78666ff6 Reviewed-on: https://go-review.googlesource.com/21396 Reviewed-by: Ian Lance Taylor --- src/cmd/compile/internal/gc/reflect.go | 14 ++--- src/cmd/link/internal/ld/decodesym.go | 18 +++---- src/cmd/link/internal/ld/symtab.go | 2 +- src/reflect/export_test.go | 8 +-- src/reflect/type.go | 96 +++++++++++++++++++++------------- src/reflect/value.go | 8 +-- src/runtime/iface.go | 17 +++--- src/runtime/runtime1.go | 6 +++ src/runtime/type.go | 46 ++++++++++------ 9 files changed, 130 insertions(+), 85 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index b8b9369f37..f782ce0974 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -70,7 +70,7 @@ const ( ) func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) -func imethodSize() int { return 2 * Widthptr } // Sizeof(runtime.imethod{}) +func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{}) if t.Sym == nil && len(methods(t)) == 0 { return 0 @@ -647,13 +647,11 @@ func dextratypeData(s *Sym, ot int, t *Type) int { pkg = a.pkg } nsym := dname(a.name, "", pkg, exported) - ot = dsymptrLSym(lsym, ot, nsym, 0) + + ot = dsymptrOffLSym(lsym, ot, nsym, 0) ot = dmethodptrOffLSym(lsym, ot, Linksym(dtypesym(a.mtype))) ot = dmethodptrOffLSym(lsym, ot, Linksym(a.isym)) ot = dmethodptrOffLSym(lsym, ot, Linksym(a.tsym)) - if Widthptr == 8 { - ot = duintxxLSym(lsym, ot, 0, 4) // pad to reflect.method size - } } return ot } @@ -1226,6 +1224,7 @@ ok: dataAdd := imethodSize() * n ot = dextratype(s, ot, t, dataAdd) + lsym := Linksym(s) for _, a := range m { // ../../../../runtime/type.go:/imethod exported := exportname(a.name) @@ -1234,8 +1233,9 @@ ok: pkg = a.pkg } nsym := dname(a.name, "", pkg, exported) - ot = dsymptrLSym(Linksym(s), ot, nsym, 0) - ot = dsymptr(s, ot, dtypesym(a.type_), 0) + + ot = dsymptrOffLSym(lsym, ot, nsym, 0) + ot = dsymptrOffLSym(lsym, ot, Linksym(dtypesym(a.type_)), 0) } // ../../../../runtime/type.go:/mapType diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index 4725b91d01..5eb20c2fb2 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -262,8 +262,9 @@ const ( ) // decode_methodsig decodes an array of method signature information. -// Each element of the array is size bytes. The first word is a -// reflect.name for the name, the second word is a *rtype for the funcType. +// Each element of the array is size bytes. The first 4 bytes is a +// nameOff for the method name, and the next 4 bytes is a typeOff for +// the function type. // // Conveniently this is the layout of both runtime.method and runtime.imethod. func decode_methodsig(s *LSym, off, size, count int) []methodsig { @@ -271,7 +272,7 @@ func decode_methodsig(s *LSym, off, size, count int) []methodsig { var methods []methodsig for i := 0; i < count; i++ { buf.WriteString(decodetype_name(s, off)) - mtypSym := decode_reloc_sym(s, int32(off+SysArch.PtrSize)) + mtypSym := decode_reloc_sym(s, int32(off+4)) buf.WriteRune('(') inCount := decodetype_funcincount(mtypSym) @@ -311,7 +312,7 @@ func decodetype_ifacemethods(s *LSym) []methodsig { } off := int(r.Add) // array of reflect.imethod values numMethods := int(decodetype_ifacemethodcount(s)) - sizeofIMethod := 2 * SysArch.PtrSize + sizeofIMethod := 4 + 4 return decode_methodsig(s, off, sizeofIMethod, numMethods) } @@ -343,12 +344,7 @@ func decodetype_methods(s *LSym) []methodsig { mcount := int(decode_inuxi(s.P[off+SysArch.PtrSize:], 2)) moff := int(decode_inuxi(s.P[off+SysArch.PtrSize+2:], 2)) - off += moff // offset to array of reflect.method values - var sizeofMethod int // sizeof reflect.method in program - if SysArch.PtrSize == 4 { - sizeofMethod = 4 * SysArch.PtrSize - } else { - sizeofMethod = 3 * SysArch.PtrSize - } + off += moff // offset to array of reflect.method values + const sizeofMethod = 4 * 4 // sizeof reflect.method in program return decode_methodsig(s, off, sizeofMethod, mcount) } diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 96e8de5030..1f07a4eb77 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -427,7 +427,7 @@ func symtab() { if !DynlinkingGo() { s.Attr |= AttrHidden } - if UseRelro() && len(s.R) > 0 { + if UseRelro() { s.Type = obj.STYPERELRO s.Outer = symtyperel } else { diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index 2769e0db40..f527434f0d 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -50,7 +50,8 @@ func TypeLinks() []string { for i, offs := range offset { rodata := sections[i] for _, off := range offs { - r = append(r, rtypeOff(rodata, off).string) + typ := (*rtype)(resolveTypeOff(unsafe.Pointer(rodata), off)) + r = append(r, typ.string) } } return r @@ -91,10 +92,11 @@ func FirstMethodNameBytes(t Type) *byte { panic("type has no methods") } m := ut.methods()[0] - if *m.name.data(0)&(1<<2) == 0 { + mname := t.(*rtype).nameOff(m.name) + if *mname.data(0)&(1<<2) == 0 { panic("method name does not have pkgPath *string") } - return m.name.bytes + return mname.bytes } type OtherPkgFields struct { diff --git a/src/reflect/type.go b/src/reflect/type.go index b8c778cc2b..0cae69a79c 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -288,7 +288,7 @@ type typeAlg struct { // Method on non-interface type type method struct { - name name // name of method + name nameOff // name of method mtyp typeOff // method type (without receiver) ifn textOff // fn used in interface call (one-word receiver) tfn textOff // fn used for normal method call @@ -347,8 +347,8 @@ type funcType struct { // imethod represents a method on an interface type type imethod struct { - name name // name of method - typ *rtype // .(*FuncType) underneath + name nameOff // name of method + typ typeOff // .(*FuncType) underneath } // interfaceType represents an interface type. @@ -424,19 +424,19 @@ type name struct { bytes *byte } -func (n *name) data(off int) *byte { +func (n name) data(off int) *byte { return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) } -func (n *name) isExported() bool { +func (n name) isExported() bool { return (*n.bytes)&(1<<0) != 0 } -func (n *name) nameLen() int { +func (n name) nameLen() int { return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) } -func (n *name) tagLen() int { +func (n name) tagLen() int { if *n.data(0)&(1<<1) == 0 { return 0 } @@ -444,7 +444,7 @@ func (n *name) tagLen() int { return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) } -func (n *name) name() (s string) { +func (n name) name() (s string) { if n.bytes == nil { return "" } @@ -458,7 +458,7 @@ func (n *name) name() (s string) { return s } -func (n *name) tag() (s string) { +func (n name) tag() (s string) { tl := n.tagLen() if tl == 0 { return "" @@ -470,7 +470,7 @@ func (n *name) tag() (s string) { return s } -func (n *name) pkgPath() string { +func (n name) pkgPath() string { if n.bytes == nil || *n.data(0)&(1<<2) == 0 { return "" } @@ -480,7 +480,7 @@ func (n *name) pkgPath() string { } var nameOff int32 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:]) - pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n), nameOff))} + pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))} return pkgPathName.name() } @@ -605,6 +605,11 @@ func (t *uncommonType) PkgPath() string { return t.pkgPath.name() } +// resolveNameOff resolves a name offset from a base pointer. +// The (*rtype).nameOff method is a convenience wrapper for this function. +// Implemented in the runtime package. +func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer + // resolveTypeOff resolves an *rtype offset from a base type. // The (*rtype).typeOff method is a convenience wrapper for this function. // Implemented in the runtime package. @@ -620,6 +625,12 @@ func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer // be resolved correctly. Implemented in the runtime package. func addReflectOff(ptr unsafe.Pointer) int32 +// resolveReflectType adds a name to the reflection lookup map in the runtime. +// It returns a new nameOff that can be used to refer to the pointer. +func resolveReflectName(n name) nameOff { + return nameOff(addReflectOff(unsafe.Pointer(n.bytes))) +} + // resolveReflectType adds a *rtype to the reflection lookup map in the runtime. // It returns a new typeOff that can be used to refer to the pointer. func resolveReflectType(t *rtype) typeOff { @@ -633,9 +644,17 @@ func resolveReflectText(ptr unsafe.Pointer) textOff { return textOff(addReflectOff(ptr)) } +type nameOff int32 // offset to a name type typeOff int32 // offset to an *rtype type textOff int32 // offset from top of text section +func (t *rtype) nameOff(off nameOff) name { + if off == 0 { + return name{} + } + return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))} +} + func (t *rtype) typeOff(off typeOff) *rtype { if off == 0 { return nil @@ -753,10 +772,11 @@ func (t *rtype) Method(i int) (m Method) { panic("reflect: Method index out of range") } p := ut.methods()[i] - m.Name = p.name.name() + pname := t.nameOff(p.name) + m.Name = pname.name() fl := flag(Func) - if !p.name.isExported() { - m.PkgPath = p.name.pkgPath() + if !pname.isExported() { + m.PkgPath = pname.pkgPath() if m.PkgPath == "" { m.PkgPath = ut.pkgPath.name() } @@ -796,7 +816,8 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) { utmethods := ut.methods() for i := 0; i < int(ut.mcount); i++ { p := utmethods[i] - if p.name.name() == name { + pname := t.nameOff(p.name) + if pname.name() == name { return t.Method(i), true } } @@ -1005,14 +1026,15 @@ func (t *interfaceType) Method(i int) (m Method) { return } p := &t.methods[i] - m.Name = p.name.name() - if !p.name.isExported() { - m.PkgPath = p.name.pkgPath() + pname := t.nameOff(p.name) + m.Name = pname.name() + if !pname.isExported() { + m.PkgPath = pname.pkgPath() if m.PkgPath == "" { m.PkgPath = t.pkgPath.name() } } - m.Type = toType(p.typ) + m.Type = toType(t.typeOff(p.typ)) m.Index = i return } @@ -1028,7 +1050,7 @@ func (t *interfaceType) MethodByName(name string) (m Method, ok bool) { var p *imethod for i := range t.methods { p = &t.methods[i] - if p.name.name() == name { + if t.nameOff(p.name).name() == name { return t.Method(i), true } } @@ -1468,7 +1490,7 @@ func implements(T, V *rtype) bool { for j := 0; j < len(v.methods); j++ { tm := &t.methods[i] vm := &v.methods[j] - if vm.name.name() == tm.name.name() && vm.typ == tm.typ { + if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) { if i++; i >= len(t.methods) { return true } @@ -1486,7 +1508,7 @@ func implements(T, V *rtype) bool { for j := 0; j < int(v.mcount); j++ { tm := &t.methods[i] vm := vmethods[j] - if vm.name.name() == tm.name.name() && V.typeOff(vm.mtyp) == tm.typ { + if V.nameOff(vm.name).name() == t.nameOff(tm.name).name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) { if i++; i >= len(t.methods) { return true } @@ -2327,12 +2349,13 @@ func StructOf(fields []StructField) Type { case Interface: ift := (*interfaceType)(unsafe.Pointer(ft)) for im, m := range ift.methods { - if m.name.pkgPath() != "" { + if ift.nameOff(m.name).pkgPath() != "" { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } var ( + mtyp = ift.typeOff(m.typ) ifield = i imethod = im ifn Value @@ -2340,7 +2363,7 @@ func StructOf(fields []StructField) Type { ) if ft.kind&kindDirectIface != 0 { - tfn = MakeFunc(m.typ, func(in []Value) []Value { + tfn = MakeFunc(mtyp, func(in []Value) []Value { var args []Value var recv = in[0] if len(in) > 1 { @@ -2348,7 +2371,7 @@ func StructOf(fields []StructField) Type { } return recv.Field(ifield).Method(imethod).Call(args) }) - ifn = MakeFunc(m.typ, func(in []Value) []Value { + ifn = MakeFunc(mtyp, func(in []Value) []Value { var args []Value var recv = in[0] if len(in) > 1 { @@ -2357,7 +2380,7 @@ func StructOf(fields []StructField) Type { return recv.Field(ifield).Method(imethod).Call(args) }) } else { - tfn = MakeFunc(m.typ, func(in []Value) []Value { + tfn = MakeFunc(mtyp, func(in []Value) []Value { var args []Value var recv = in[0] if len(in) > 1 { @@ -2365,7 +2388,7 @@ func StructOf(fields []StructField) Type { } return recv.Field(ifield).Method(imethod).Call(args) }) - ifn = MakeFunc(m.typ, func(in []Value) []Value { + ifn = MakeFunc(mtyp, func(in []Value) []Value { var args []Value var recv = Indirect(in[0]) if len(in) > 1 { @@ -2376,8 +2399,8 @@ func StructOf(fields []StructField) Type { } methods = append(methods, method{ - name: m.name, - mtyp: resolveReflectType(m.typ), + name: resolveReflectName(ift.nameOff(m.name)), + mtyp: resolveReflectType(mtyp), ifn: resolveReflectText(unsafe.Pointer(&ifn)), tfn: resolveReflectText(unsafe.Pointer(&tfn)), }) @@ -2386,12 +2409,13 @@ func StructOf(fields []StructField) Type { ptr := (*ptrType)(unsafe.Pointer(ft)) if unt := ptr.uncommon(); unt != nil { for _, m := range unt.methods() { - if m.name.pkgPath() != "" { + mname := ptr.nameOff(m.name) + if mname.pkgPath() != "" { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } methods = append(methods, method{ - name: m.name, + name: resolveReflectName(mname), mtyp: resolveReflectType(ptr.typeOff(m.mtyp)), ifn: resolveReflectText(ptr.textOff(m.ifn)), tfn: resolveReflectText(ptr.textOff(m.tfn)), @@ -2400,12 +2424,13 @@ func StructOf(fields []StructField) Type { } if unt := ptr.elem.uncommon(); unt != nil { for _, m := range unt.methods() { - if m.name.pkgPath() != "" { + mname := ptr.nameOff(m.name) + if mname.pkgPath() != "" { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } methods = append(methods, method{ - name: m.name, + name: resolveReflectName(mname), mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)), ifn: resolveReflectText(ptr.elem.textOff(m.ifn)), tfn: resolveReflectText(ptr.elem.textOff(m.tfn)), @@ -2415,12 +2440,13 @@ func StructOf(fields []StructField) Type { default: if unt := ft.uncommon(); unt != nil { for _, m := range unt.methods() { - if m.name.pkgPath() != "" { + mname := ft.nameOff(m.name) + if mname.pkgPath() != "" { // TODO(sbinet) panic("reflect: embedded interface with unexported method(s) not implemented") } methods = append(methods, method{ - name: m.name, + name: resolveReflectName(mname), mtyp: resolveReflectType(ft.typeOff(m.mtyp)), ifn: resolveReflectText(ft.textOff(m.ifn)), tfn: resolveReflectText(ft.textOff(m.tfn)), diff --git a/src/reflect/value.go b/src/reflect/value.go index d4d317436a..e6b846e5d1 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -553,7 +553,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn panic("reflect: internal error: invalid method index") } m := &tt.methods[i] - if !m.name.isExported() { + if !tt.nameOff(m.name).isExported() { panic("reflect: " + op + " of unexported method") } iface := (*nonEmptyInterface)(v.ptr) @@ -562,7 +562,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn } rcvrtype = iface.itab.typ fn = unsafe.Pointer(&iface.itab.fun[i]) - t = m.typ + t = tt.typeOff(m.typ) } else { rcvrtype = v.typ ut := v.typ.uncommon() @@ -570,7 +570,7 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn panic("reflect: internal error: invalid method index") } m := ut.methods()[i] - if !m.name.isExported() { + if !v.typ.nameOff(m.name).isExported() { panic("reflect: " + op + " of unexported method") } ifn := v.typ.textOff(m.ifn) @@ -1684,7 +1684,7 @@ func (v Value) Type() Type { panic("reflect: internal error: invalid method index") } m := &tt.methods[i] - return m.typ + return v.typ.typeOff(m.typ) } // Method on concrete type. ut := v.typ.uncommon() diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 84f0ee8f0c..8f179bac80 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -37,7 +37,8 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { if canfail { return nil } - panic(&TypeAssertionError{"", typ._string, inter.typ._string, inter.mhdr[0].name.name()}) + name := inter.typ.nameOff(inter.mhdr[0].name) + panic(&TypeAssertionError{"", typ._string, inter.typ._string, name.name()}) } h := itabhash(inter, typ) @@ -98,20 +99,22 @@ func additab(m *itab, locked, canfail bool) { j := 0 for k := 0; k < ni; k++ { i := &inter.mhdr[k] - iname := i.name.name() - itype := i._type - ipkg := i.name.pkgPath() + itype := inter.typ.typeOff(i.ityp) + name := inter.typ.nameOff(i.name) + iname := name.name() + ipkg := name.pkgPath() if ipkg == "" { ipkg = inter.pkgpath.name() } for ; j < nt; j++ { t := &xmhdr[j] - if typ.typeOff(t.mtyp) == itype && t.name.name() == iname { - pkgPath := t.name.pkgPath() + tname := typ.nameOff(t.name) + if typ.typeOff(t.mtyp) == itype && tname.name() == iname { + pkgPath := tname.pkgPath() if pkgPath == "" { pkgPath = x.pkgpath.name() } - if t.name.isExported() || pkgPath == ipkg { + if tname.isExported() || pkgPath == ipkg { if m != nil { ifn := typ.textOff(t.ifn) *(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = ifn diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 02aeedaf75..9089383904 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -487,6 +487,12 @@ func reflect_typelinks() ([]unsafe.Pointer, [][]int32) { return sections, ret } +// reflect_resolveNameOff resolves a name offset from a base pointer. +//go:linkname reflect_resolveNameOff reflect.resolveNameOff +func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer { + return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes) +} + // reflect_resolveTypeOff resolves an *rtype offset from a base type. //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { diff --git a/src/runtime/type.go b/src/runtime/type.go index 711753bab5..31f7ff81b8 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -161,11 +161,17 @@ func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name { } } if md == nil { - println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:") - for next := &firstmoduledata; next != nil; next = next.next { - println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) + lock(&reflectOffs.lock) + res, found := reflectOffs.m[int32(off)] + unlock(&reflectOffs.lock) + if !found { + println("runtime: nameOff", hex(off), "base", hex(base), "not in ranges:") + for next := &firstmoduledata; next != nil; next = next.next { + println("\ttypes", hex(next.types), "etypes", hex(next.etypes)) + } + throw("runtime: name offset base pointer out of range") } - throw("runtime: name offset base pointer out of range") + return name{(*byte)(res)} } res := md.types + uintptr(off) if res > md.etypes { @@ -175,6 +181,10 @@ func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name { return name{(*byte)(unsafe.Pointer(res))} } +func (t *_type) nameOff(off nameOff) name { + return resolveNameOff(unsafe.Pointer(t), off) +} + func (t *_type) typeOff(off typeOff) *_type { if off == 0 { return nil @@ -269,7 +279,7 @@ type typeOff int32 type textOff int32 type method struct { - name name + name nameOff mtyp typeOff ifn textOff tfn textOff @@ -282,8 +292,8 @@ type uncommontype struct { } type imethod struct { - name name - _type *_type + name nameOff + ityp typeOff } type interfacetype struct { @@ -354,19 +364,19 @@ type name struct { bytes *byte } -func (n *name) data(off int) *byte { +func (n name) data(off int) *byte { return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off))) } -func (n *name) isExported() bool { +func (n name) isExported() bool { return (*n.bytes)&(1<<0) != 0 } -func (n *name) nameLen() int { +func (n name) nameLen() int { return int(uint16(*n.data(1))<<8 | uint16(*n.data(2))) } -func (n *name) tagLen() int { +func (n name) tagLen() int { if *n.data(0)&(1<<1) == 0 { return 0 } @@ -374,7 +384,7 @@ func (n *name) tagLen() int { return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1))) } -func (n *name) name() (s string) { +func (n name) name() (s string) { if n.bytes == nil { return "" } @@ -388,7 +398,7 @@ func (n *name) name() (s string) { return s } -func (n *name) tag() (s string) { +func (n name) tag() (s string) { tl := n.tagLen() if tl == 0 { return "" @@ -400,7 +410,7 @@ func (n *name) tag() (s string) { return s } -func (n *name) pkgPath() string { +func (n name) pkgPath() string { if n.bytes == nil || *n.data(0)&(1<<2) == 0 { return "" } @@ -545,13 +555,15 @@ func typesEqual(t, v *_type) bool { for i := range it.mhdr { tm := &it.mhdr[i] vm := &iv.mhdr[i] - if tm.name.name() != vm.name.name() { + tname := it.typ.nameOff(tm.name) + vname := iv.typ.nameOff(vm.name) + if tname.name() != vname.name() { return false } - if tm.name.pkgPath() != vm.name.pkgPath() { + if tname.pkgPath() != vname.pkgPath() { return false } - if !typesEqual(tm._type, vm._type) { + if !typesEqual(it.typ.typeOff(tm.ityp), iv.typ.typeOff(vm.ityp)) { return false } } -- cgit v1.3 From f5423a63dfa5d010e7796271666f592a5f9dad70 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Sun, 17 Apr 2016 15:33:07 -0700 Subject: cmd/compile: a dot expression can not be a struct literal key Passes toolstash -cmp. Fixes #15311. Change-Id: I1d67f5c9de38e899ab2d6c8986fabd6f197df23a Reviewed-on: https://go-review.googlesource.com/22162 Reviewed-by: David Crawshaw --- src/cmd/compile/internal/gc/typecheck.go | 7 ++++++- test/fixedbugs/issue15311.go | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 test/fixedbugs/issue15311.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 6067677738..328737ee14 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3099,7 +3099,12 @@ func typecheckcomplit(n *Node) *Node { } s := l.Left.Sym - if s == nil { + + // An OXDOT uses the Sym field to hold + // the field to the right of the dot, + // so s will be non-nil, but an OXDOT + // is never a valid struct literal key. + if s == nil || l.Left.Op == OXDOT { Yyerror("invalid field name %v in struct initializer", l.Left) l.Right = typecheck(l.Right, Erv) continue diff --git a/test/fixedbugs/issue15311.go b/test/fixedbugs/issue15311.go new file mode 100644 index 0000000000..81fa541325 --- /dev/null +++ b/test/fixedbugs/issue15311.go @@ -0,0 +1,20 @@ +// errorcheck + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The compiler was failing to correctly report an error when a dot +// expression was used a struct literal key. + +package p + +type T struct { + toInt map[string]int + toString map[int]string +} + +var t = T{ + foo.toInt: make(map[string]int), // ERROR "field name" + bar.toString: make(map[int]string), // ERROR "field name" +} -- cgit v1.3 From b024ed0d944c0f839e699fb10af633d295abb311 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 18 Apr 2016 11:17:55 -0700 Subject: cmd/compile: eliminate copy for static literals *p = [5]byte{1,2,3,4,5} First we allocate a global containing the RHS. Then we copy that global to a local stack variable, and then copy that local stack variable to *p. The intermediate copy is unnecessary. Note that this only works if the RHS is completely constant. If the code was: *p = [5]byte{1,2,x,4,5} this optimization doesn't apply as we have to construct the RHS on the stack before copying it to *p. Fixes #12841 Change-Id: I7cd0404ecc7a2d1750cbd8fe1222dba0fa44611f Reviewed-on: https://go-review.googlesource.com/22192 Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/gc/sinit.go | 26 ++++++++++++++++++++++++++ src/cmd/compile/internal/gc/walk.go | 13 +++++++++++++ 2 files changed, 39 insertions(+) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 85ef78b973..1021609d3a 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -563,6 +563,32 @@ func getdyn(n *Node, top int) initGenType { return mode } +// isStaticCompositeLiteral reports whether n is a compile-time constant. +// n must be a struct or array literal. +func isStaticCompositeLiteral(n *Node) bool { + for _, r := range n.List.Slice() { + if r.Op != OKEY { + Fatalf("isStaticCompositeLiteral: rhs not OKEY: %v", r) + } + index := r.Left + if n.Op == OARRAYLIT && index.Op != OLITERAL { + return false + } + value := r.Right + switch value.Op { + case OSTRUCTLIT, OARRAYLIT: + if !isStaticCompositeLiteral(value) { + return false + } + default: + if value.Op != OLITERAL { + return false + } + } + } + return true +} + func structlit(ctxt int, pass int, n *Node, var_ *Node, init *Nodes) { for _, r := range n.List.Slice() { if r.Op != OKEY { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 78bad8d348..1a15bd93d0 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1531,6 +1531,19 @@ opswitch: n = r case OARRAYLIT, OMAPLIT, OSTRUCTLIT, OPTRLIT: + if (n.Op == OSTRUCTLIT || (n.Op == OARRAYLIT && !n.Type.IsSlice())) && isStaticCompositeLiteral(n) { + // n can be directly represented in the read-only data section. + // Make direct reference to the static data. See issue 12841. + vstat := staticname(n.Type, 0) + if n.Op == OSTRUCTLIT { + structlit(0, 1, n, vstat, init) + } else { + arraylit(0, 1, n, vstat, init) + } + n = vstat + n = typecheck(n, Erv) + break + } var_ := temp(n.Type) anylit(0, n, var_, init) n = var_ -- cgit v1.3 From 4140da7b57f944cc16324496adcc5a41d7a987ed Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Mon, 4 Apr 2016 13:07:24 -0400 Subject: cmd/link, cmd/compile: typelink sorting in linker Instead of writing out the type almost twice in the symbol name, teach the linker how to sort typelink symbols by their contents. This ~halves the size of typelink symbol names, which helps very large (6KB) names like those mentioned in #15104. This does not increase the total sorting work done by the linker, and makes it possible to use shorter symbol names for types. See the follow-on CL 21583. Change-Id: Ie5807565ed07d31bc477d20f60e4c0b47144f337 Reviewed-on: https://go-review.googlesource.com/21457 Reviewed-by: Ian Lance Taylor --- src/cmd/compile/internal/gc/reflect.go | 11 +---------- src/cmd/link/internal/ld/data.go | 8 ++++++++ src/cmd/link/internal/ld/decodesym.go | 12 ++++++++++++ 3 files changed, 21 insertions(+), 10 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index f782ce0974..5031045c64 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -912,16 +912,7 @@ func tracksym(t *Type, f *Field) *Sym { } func typelinkLSym(t *Type) *obj.LSym { - // %-uT is what the generated Type's string field says. - // It uses (ambiguous) package names instead of import paths. - // %-T is the complete, unambiguous type name. - // We want the types to end up sorted by string field, - // so use that first in the name, and then add :%-T to - // disambiguate. We use a tab character as the separator to - // ensure the types appear sorted by their string field. The - // names are a little long but they are discarded by the linker - // and do not end up in the symbol table of the final binary. - name := "go.typelink." + Tconv(t, FmtLeft|FmtUnsigned) + "\t" + Tconv(t, FmtLeft) + name := "go.typelink." + Tconv(t, FmtLeft) // complete, unambiguous type name return obj.Linklookup(Ctxt, name, 0) } diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 105503f6ef..8e2cf99877 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -32,6 +32,7 @@ package ld import ( + "bytes" "cmd/internal/gcprog" "cmd/internal/obj" "cmd/internal/sys" @@ -1199,6 +1200,13 @@ func (d dataSlice) Less(i, j int) bool { return s1.Size < s2.Size } + // Sort typelinks by the string field. + if strings.HasPrefix(s1.Name, "go.typelink.") && strings.HasPrefix(s2.Name, "go.typelink.") { + s1n := decodetype_string(s1.Lsym.R[0].Sym) + s2n := decodetype_string(s2.Lsym.R[0].Sym) + return bytes.Compare(s1n, s2n) < 0 + } + return s1.Name < s2.Name } diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index 5eb20c2fb2..b1c55cf787 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -211,6 +211,18 @@ func decodetype_structfieldarrayoff(s *LSym, i int) int { return off } +// decodetype_string returns the contents of an rtype's string field. +func decodetype_string(s *LSym) []byte { + off := 4*SysArch.PtrSize + 8 + strlen := int64(decode_inuxi(s.P[off+SysArch.PtrSize:], SysArch.IntSize)) + + r := decode_reloc(s, int32(off)) + if r == nil { + return nil + } + return r.Sym.P[r.Add : r.Add+strlen] +} + // decodetype_name decodes the name from a reflect.name. func decodetype_name(s *LSym, off int) string { r := decode_reloc(s, int32(off)) -- cgit v1.3 From 4d5adf1eb1a955bae08012e568c645eb4d7f3544 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 18 Apr 2016 09:28:50 -0700 Subject: cmd/compile: logical operation identities MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some rewrites to simplify logical operations. Fixes #14363 Change-Id: I45a1e8f227267cbcca0778101125f7bab776a5dd Reviewed-on: https://go-review.googlesource.com/22188 Reviewed-by: Alexandru Moșoi Run-TryBot: Alexandru Moșoi TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/ssa/gen/generic.rules | 49 ++ src/cmd/compile/internal/ssa/rewritegeneric.go | 900 ++++++++++++++++++++++++- 2 files changed, 931 insertions(+), 18 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index dacc2007c8..3270ec1534 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -414,6 +414,55 @@ (Neg32 (Sub32 x y)) -> (Sub32 y x) (Neg64 (Sub64 x y)) -> (Sub64 y x) +(And64 x (And64 x y)) -> (And64 x y) +(And32 x (And32 x y)) -> (And32 x y) +(And16 x (And16 x y)) -> (And16 x y) +(And8 x (And8 x y)) -> (And8 x y) +(And64 x (And64 y x)) -> (And64 x y) +(And32 x (And32 y x)) -> (And32 x y) +(And16 x (And16 y x)) -> (And16 x y) +(And8 x (And8 y x)) -> (And8 x y) +(And64 (And64 x y) x) -> (And64 x y) +(And32 (And32 x y) x) -> (And32 x y) +(And16 (And16 x y) x) -> (And16 x y) +(And8 (And8 x y) x) -> (And8 x y) +(And64 (And64 x y) y) -> (And64 x y) +(And32 (And32 x y) y) -> (And32 x y) +(And16 (And16 x y) y) -> (And16 x y) +(And8 (And8 x y) y) -> (And8 x y) +(Or64 x (Or64 x y)) -> (Or64 x y) +(Or32 x (Or32 x y)) -> (Or32 x y) +(Or16 x (Or16 x y)) -> (Or16 x y) +(Or8 x (Or8 x y)) -> (Or8 x y) +(Or64 x (Or64 y x)) -> (Or64 x y) +(Or32 x (Or32 y x)) -> (Or32 x y) +(Or16 x (Or16 y x)) -> (Or16 x y) +(Or8 x (Or8 y x)) -> (Or8 x y) +(Or64 (Or64 x y) x) -> (Or64 x y) +(Or32 (Or32 x y) x) -> (Or32 x y) +(Or16 (Or16 x y) x) -> (Or16 x y) +(Or8 (Or8 x y) x) -> (Or8 x y) +(Or64 (Or64 x y) y) -> (Or64 x y) +(Or32 (Or32 x y) y) -> (Or32 x y) +(Or16 (Or16 x y) y) -> (Or16 x y) +(Or8 (Or8 x y) y) -> (Or8 x y) +(Xor64 x (Xor64 x y)) -> y +(Xor32 x (Xor32 x y)) -> y +(Xor16 x (Xor16 x y)) -> y +(Xor8 x (Xor8 x y)) -> y +(Xor64 x (Xor64 y x)) -> y +(Xor32 x (Xor32 y x)) -> y +(Xor16 x (Xor16 y x)) -> y +(Xor8 x (Xor8 y x)) -> y +(Xor64 (Xor64 x y) x) -> y +(Xor32 (Xor32 x y) x) -> y +(Xor16 (Xor16 x y) x) -> y +(Xor8 (Xor8 x y) x) -> y +(Xor64 (Xor64 x y) y) -> x +(Xor32 (Xor32 x y) y) -> x +(Xor16 (Xor16 x y) y) -> x +(Xor8 (Xor8 x y) y) -> x + (Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF -> (Trunc64to8 x) (Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc64to16 x) (Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF -> (Trunc64to32 x) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 9b0f43c414..54a6815c93 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -732,6 +732,78 @@ func rewriteValuegeneric_OpAnd16(v *Value, config *Config) bool { v.AuxInt = 0 return true } + // match: (And16 x (And16 x y)) + // cond: + // result: (And16 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAnd16 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpAnd16) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And16 x (And16 y x)) + // cond: + // result: (And16 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAnd16 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpAnd16) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And16 (And16 x y) x) + // cond: + // result: (And16 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpAnd16 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpAnd16) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And16 (And16 x y) y) + // cond: + // result: (And16 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpAnd16 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpAnd16) + v.AddArg(x) + v.AddArg(y) + return true + } return false } func rewriteValuegeneric_OpAnd32(v *Value, config *Config) bool { @@ -803,6 +875,78 @@ func rewriteValuegeneric_OpAnd32(v *Value, config *Config) bool { v.AuxInt = 0 return true } + // match: (And32 x (And32 x y)) + // cond: + // result: (And32 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAnd32 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpAnd32) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And32 x (And32 y x)) + // cond: + // result: (And32 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAnd32 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpAnd32) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And32 (And32 x y) x) + // cond: + // result: (And32 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpAnd32 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpAnd32) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And32 (And32 x y) y) + // cond: + // result: (And32 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpAnd32 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpAnd32) + v.AddArg(x) + v.AddArg(y) + return true + } return false } func rewriteValuegeneric_OpAnd64(v *Value, config *Config) bool { @@ -874,6 +1018,78 @@ func rewriteValuegeneric_OpAnd64(v *Value, config *Config) bool { v.AuxInt = 0 return true } + // match: (And64 x (And64 x y)) + // cond: + // result: (And64 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAnd64 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpAnd64) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And64 x (And64 y x)) + // cond: + // result: (And64 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAnd64 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpAnd64) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And64 (And64 x y) x) + // cond: + // result: (And64 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpAnd64 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpAnd64) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And64 (And64 x y) y) + // cond: + // result: (And64 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpAnd64 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpAnd64) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (And64 (Const64 [y]) x) // cond: nlz(y) + nto(y) == 64 && nto(y) >= 32 // result: (Rsh64Ux64 (Lsh64x64 x (Const64 [nlz(y)])) (Const64 [nlz(y)])) @@ -997,6 +1213,78 @@ func rewriteValuegeneric_OpAnd8(v *Value, config *Config) bool { v.AuxInt = 0 return true } + // match: (And8 x (And8 x y)) + // cond: + // result: (And8 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAnd8 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpAnd8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And8 x (And8 y x)) + // cond: + // result: (And8 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAnd8 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpAnd8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And8 (And8 x y) x) + // cond: + // result: (And8 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpAnd8 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpAnd8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (And8 (And8 x y) y) + // cond: + // result: (And8 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpAnd8 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpAnd8) + v.AddArg(x) + v.AddArg(y) + return true + } return false } func rewriteValuegeneric_OpArg(v *Value, config *Config) bool { @@ -5739,35 +6027,107 @@ func rewriteValuegeneric_OpOr16(v *Value, config *Config) bool { v.AuxInt = -1 return true } - return false -} -func rewriteValuegeneric_OpOr32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Or32 x (Const32 [c])) - // cond: x.Op != OpConst32 - // result: (Or32 (Const32 [c]) x) + // match: (Or16 x (Or16 x y)) + // cond: + // result: (Or16 x y) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpOr16 { break } - t := v_1.Type - c := v_1.AuxInt - if !(x.Op != OpConst32) { + if x != v_1.Args[0] { break } - v.reset(OpOr32) - v0 := b.NewValue0(v.Line, OpConst32, t) - v0.AuxInt = c - v.AddArg(v0) + y := v_1.Args[1] + v.reset(OpOr16) v.AddArg(x) + v.AddArg(y) return true } - // match: (Or32 x x) + // match: (Or16 x (Or16 y x)) // cond: - // result: x + // result: (Or16 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpOr16 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpOr16) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or16 (Or16 x y) x) + // cond: + // result: (Or16 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr16 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpOr16) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or16 (Or16 x y) y) + // cond: + // result: (Or16 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr16 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpOr16) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValuegeneric_OpOr32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (Or32 (Const32 [c]) x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + break + } + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst32) { + break + } + v.reset(OpOr32) + v0 := b.NewValue0(v.Line, OpConst32, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (Or32 x x) + // cond: + // result: x for { x := v.Args[0] if x != v.Args[1] { @@ -5810,6 +6170,78 @@ func rewriteValuegeneric_OpOr32(v *Value, config *Config) bool { v.AuxInt = -1 return true } + // match: (Or32 x (Or32 x y)) + // cond: + // result: (Or32 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpOr32 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpOr32) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or32 x (Or32 y x)) + // cond: + // result: (Or32 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpOr32 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpOr32) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or32 (Or32 x y) x) + // cond: + // result: (Or32 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr32 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpOr32) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or32 (Or32 x y) y) + // cond: + // result: (Or32 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr32 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpOr32) + v.AddArg(x) + v.AddArg(y) + return true + } return false } func rewriteValuegeneric_OpOr64(v *Value, config *Config) bool { @@ -5881,6 +6313,78 @@ func rewriteValuegeneric_OpOr64(v *Value, config *Config) bool { v.AuxInt = -1 return true } + // match: (Or64 x (Or64 x y)) + // cond: + // result: (Or64 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpOr64 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpOr64) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or64 x (Or64 y x)) + // cond: + // result: (Or64 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpOr64 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpOr64) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or64 (Or64 x y) x) + // cond: + // result: (Or64 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr64 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpOr64) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or64 (Or64 x y) y) + // cond: + // result: (Or64 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr64 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpOr64) + v.AddArg(x) + v.AddArg(y) + return true + } return false } func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool { @@ -5952,6 +6456,78 @@ func rewriteValuegeneric_OpOr8(v *Value, config *Config) bool { v.AuxInt = -1 return true } + // match: (Or8 x (Or8 x y)) + // cond: + // result: (Or8 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpOr8 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpOr8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or8 x (Or8 y x)) + // cond: + // result: (Or8 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpOr8 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpOr8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or8 (Or8 x y) x) + // cond: + // result: (Or8 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr8 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpOr8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or8 (Or8 x y) y) + // cond: + // result: (Or8 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr8 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpOr8) + v.AddArg(x) + v.AddArg(y) + return true + } return false } func rewriteValuegeneric_OpPhi(v *Value, config *Config) bool { @@ -8941,6 +9517,78 @@ func rewriteValuegeneric_OpXor16(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (Xor16 x (Xor16 x y)) + // cond: + // result: y + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpXor16 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor16 x (Xor16 y x)) + // cond: + // result: y + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpXor16 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor16 (Xor16 x y) x) + // cond: + // result: y + for { + v_0 := v.Args[0] + if v_0.Op != OpXor16 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor16 (Xor16 x y) y) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpXor16 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } return false } func rewriteValuegeneric_OpXor32(v *Value, config *Config) bool { @@ -8996,6 +9644,78 @@ func rewriteValuegeneric_OpXor32(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (Xor32 x (Xor32 x y)) + // cond: + // result: y + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpXor32 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor32 x (Xor32 y x)) + // cond: + // result: y + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpXor32 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor32 (Xor32 x y) x) + // cond: + // result: y + for { + v_0 := v.Args[0] + if v_0.Op != OpXor32 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor32 (Xor32 x y) y) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpXor32 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } return false } func rewriteValuegeneric_OpXor64(v *Value, config *Config) bool { @@ -9051,6 +9771,78 @@ func rewriteValuegeneric_OpXor64(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (Xor64 x (Xor64 x y)) + // cond: + // result: y + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpXor64 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor64 x (Xor64 y x)) + // cond: + // result: y + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpXor64 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor64 (Xor64 x y) x) + // cond: + // result: y + for { + v_0 := v.Args[0] + if v_0.Op != OpXor64 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor64 (Xor64 x y) y) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpXor64 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } return false } func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool { @@ -9106,6 +9898,78 @@ func rewriteValuegeneric_OpXor8(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (Xor8 x (Xor8 x y)) + // cond: + // result: y + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpXor8 { + break + } + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor8 x (Xor8 y x)) + // cond: + // result: y + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpXor8 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor8 (Xor8 x y) x) + // cond: + // result: y + for { + v_0 := v.Args[0] + if v_0.Op != OpXor8 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (Xor8 (Xor8 x y) y) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpXor8 { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } return false } func rewriteBlockgeneric(b *Block) bool { -- cgit v1.3 From a5386f3c7dc7735aa4695647896ba94bab0341e0 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Mon, 18 Apr 2016 13:55:40 -0700 Subject: cmd/compile: fix internal consistency check with binary exporter Per feedback from mdempsky from https://go-review.googlesource.com/22096. Also fix emitted position info. Change-Id: I7ff1967430867d922be8784832042c75d81df28b Reviewed-on: https://go-review.googlesource.com/22198 Run-TryBot: Robert Griesemer Reviewed-by: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/bexport.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index eee71291be..90b4edff18 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -558,16 +558,13 @@ func (p *exporter) typ(t *Type) { Fatalf("exporter: predeclared type missing from type map?") } - // TODO(gri) The assertion below is incorrect (crashes during all.bash), - // likely because of symbol shadowing (we expect the respective definition - // to point to us). Determine the correct Def so we get correct position - // info. - // if tsym.Def.Type != t { - // Fatalf("exporter: type definition doesn't point to us?") - // } + n := typenod(t) + if n.Type != t { + Fatalf("exporter: named type definition incorrectly set up") + } p.tag(namedTag) - p.pos(tsym.Def) // TODO(gri) this may not be the correct node - fix and add tests + p.pos(n) p.qualifiedName(tsym) // write underlying type -- cgit v1.3 From 03e216f30d8bad7f4f9dadb50f7f6ca71e632682 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Mon, 18 Apr 2016 09:40:30 -0700 Subject: cmd/compile: re-enable in-place append optimization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CL 21891 was too clever in its attempts to avoid spills. Storing newlen too early caused uses of append in the runtime itself to receive an inconsistent view of a slice, leading to corruption. This CL makes the generate code much more similar to the old backend. It spills more than before, but those spills have been contained to the grow path. It recalculates newlen unnecessarily on the fast path, but that's measurably cheaper than spilling it. CL 21891 caused runtime failures in 6 of 2000 runs of net/http and crypto/x509 in my test setup. This CL has gone 6000 runs without a failure. Benchmarks going from master to this CL: name old time/op new time/op delta AppendInPlace/NoGrow/Byte-8 439ns ± 2% 436ns ± 2% -0.72% (p=0.001 n=28+27) AppendInPlace/NoGrow/1Ptr-8 901ns ± 0% 856ns ± 0% -4.95% (p=0.000 n=26+29) AppendInPlace/NoGrow/2Ptr-8 2.15µs ± 1% 1.95µs ± 0% -9.07% (p=0.000 n=28+30) AppendInPlace/NoGrow/3Ptr-8 2.66µs ± 0% 2.45µs ± 0% -7.93% (p=0.000 n=29+26) AppendInPlace/NoGrow/4Ptr-8 3.24µs ± 1% 3.02µs ± 1% -6.75% (p=0.000 n=28+30) AppendInPlace/Grow/Byte-8 269ns ± 1% 271ns ± 1% +0.84% (p=0.000 n=30+29) AppendInPlace/Grow/1Ptr-8 275ns ± 1% 280ns ± 1% +1.75% (p=0.000 n=30+30) AppendInPlace/Grow/2Ptr-8 384ns ± 0% 391ns ± 0% +1.94% (p=0.000 n=27+30) AppendInPlace/Grow/3Ptr-8 455ns ± 0% 462ns ± 0% +1.43% (p=0.000 n=29+29) AppendInPlace/Grow/4Ptr-8 478ns ± 0% 479ns ± 0% +0.23% (p=0.000 n=30+27) However, for the large no-grow cases, there is still more work to be done. Going from this CL to the non-SSA backend: name old time/op new time/op delta AppendInPlace/NoGrow/Byte-8 436ns ± 2% 436ns ± 2% ~ (p=0.967 n=27+29) AppendInPlace/NoGrow/1Ptr-8 856ns ± 0% 884ns ± 0% +3.28% (p=0.000 n=29+26) AppendInPlace/NoGrow/2Ptr-8 1.95µs ± 0% 1.56µs ± 0% -20.28% (p=0.000 n=30+29) AppendInPlace/NoGrow/3Ptr-8 2.45µs ± 0% 1.89µs ± 0% -22.88% (p=0.000 n=26+28) AppendInPlace/NoGrow/4Ptr-8 3.02µs ± 1% 2.56µs ± 1% -15.35% (p=0.000 n=30+28) AppendInPlace/Grow/Byte-8 271ns ± 1% 283ns ± 1% +4.56% (p=0.000 n=29+29) AppendInPlace/Grow/1Ptr-8 280ns ± 1% 288ns ± 1% +2.99% (p=0.000 n=30+30) AppendInPlace/Grow/2Ptr-8 391ns ± 0% 409ns ± 0% +4.66% (p=0.000 n=30+29) AppendInPlace/Grow/3Ptr-8 462ns ± 0% 481ns ± 0% +4.13% (p=0.000 n=29+30) AppendInPlace/Grow/4Ptr-8 479ns ± 0% 502ns ± 0% +4.81% (p=0.000 n=27+26) New generated code: var x []byte func a() { x = append(x, 1) } "".a t=1 size=208 args=0x0 locals=0x48 0x0000 00000 (a.go:5) TEXT "".a(SB), $72-0 0x0000 00000 (a.go:5) MOVQ (TLS), CX 0x0009 00009 (a.go:5) CMPQ SP, 16(CX) 0x000d 00013 (a.go:5) JLS 190 0x0013 00019 (a.go:5) SUBQ $72, SP 0x0017 00023 (a.go:5) FUNCDATA $0, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0017 00023 (a.go:5) FUNCDATA $1, gclocals·33cdeccccebe80329f1fdbee7f5874cb(SB) 0x0017 00023 (a.go:6) MOVQ "".x+16(SB), CX 0x001e 00030 (a.go:6) MOVQ "".x+8(SB), DX 0x0025 00037 (a.go:6) MOVQ "".x(SB), BX 0x002c 00044 (a.go:6) LEAQ 1(DX), BP 0x0030 00048 (a.go:6) CMPQ BP, CX 0x0033 00051 (a.go:6) JGT $0, 73 0x0035 00053 (a.go:6) LEAQ 1(DX), AX 0x0039 00057 (a.go:6) MOVQ AX, "".x+8(SB) 0x0040 00064 (a.go:6) MOVB $1, (BX)(DX*1) 0x0044 00068 (a.go:7) ADDQ $72, SP 0x0048 00072 (a.go:7) RET 0x0049 00073 (a.go:6) LEAQ type.[]uint8(SB), AX 0x0050 00080 (a.go:6) MOVQ AX, (SP) 0x0054 00084 (a.go:6) MOVQ BX, 8(SP) 0x0059 00089 (a.go:6) MOVQ DX, 16(SP) 0x005e 00094 (a.go:6) MOVQ CX, 24(SP) 0x0063 00099 (a.go:6) MOVQ BP, 32(SP) 0x0068 00104 (a.go:6) PCDATA $0, $0 0x0068 00104 (a.go:6) CALL runtime.growslice(SB) 0x006d 00109 (a.go:6) MOVQ 40(SP), CX 0x0072 00114 (a.go:6) MOVQ 48(SP), DX 0x0077 00119 (a.go:6) MOVQ DX, "".autotmp_0+64(SP) 0x007c 00124 (a.go:6) MOVQ 56(SP), BX 0x0081 00129 (a.go:6) MOVQ BX, "".x+16(SB) 0x0088 00136 (a.go:6) MOVL runtime.writeBarrier(SB), AX 0x008e 00142 (a.go:6) TESTB AL, AL 0x0090 00144 (a.go:6) JNE $0, 162 0x0092 00146 (a.go:6) MOVQ CX, "".x(SB) 0x0099 00153 (a.go:6) MOVQ "".x(SB), BX 0x00a0 00160 (a.go:6) JMP 53 0x00a2 00162 (a.go:6) LEAQ "".x(SB), BX 0x00a9 00169 (a.go:6) MOVQ BX, (SP) 0x00ad 00173 (a.go:6) MOVQ CX, 8(SP) 0x00b2 00178 (a.go:6) PCDATA $0, $0 0x00b2 00178 (a.go:6) CALL runtime.writebarrierptr(SB) 0x00b7 00183 (a.go:6) MOVQ "".autotmp_0+64(SP), DX 0x00bc 00188 (a.go:6) JMP 153 0x00be 00190 (a.go:6) NOP 0x00be 00190 (a.go:5) CALL runtime.morestack_noctxt(SB) 0x00c3 00195 (a.go:5) JMP 0 Fixes #14969 again Change-Id: Ia50463b1f506011aad0718a4fef1d4738e43c32d Reviewed-on: https://go-review.googlesource.com/22197 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 4a93dc1087..c4008c9ce1 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -338,6 +338,7 @@ var ( // dummy nodes for temporary variables ptrVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "ptr"}} + lenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "len"}} newlenVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "newlen"}} capVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "cap"}} typVar = Node{Op: ONAME, Class: Pxxx, Sym: &Sym{Name: "typ"}} @@ -699,8 +700,7 @@ func (s *state) stmt(n *Node) { // If the slice can be SSA'd, it'll be on the stack, // so there will be no write barriers, // so there's no need to attempt to prevent them. - const doInPlaceAppend = false // issue 15246 - if doInPlaceAppend && samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) { + if samesafeexpr(n.Left, rhs.List.First()) && !s.canSSA(n.Left) { s.append(rhs, true) return } @@ -2128,12 +2128,14 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { // a := &s // ptr, len, cap := s // newlen := len + 3 - // *a.len = newlen // store newlen immediately to avoid a spill // if newlen > cap { - // newptr, _, newcap = growslice(ptr, len, cap, newlen) + // newptr, len, newcap = growslice(ptr, len, cap, newlen) + // vardef(a) // if necessary, advise liveness we are writing a new a // *a.cap = newcap // write before ptr to avoid a spill // *a.ptr = newptr // with write barrier // } + // newlen = len + 3 // recalculate to avoid a spill + // *a.len = newlen // // with write barriers, if needed: // *(ptr+len) = e1 // *(ptr+len+1) = e2 @@ -2164,17 +2166,14 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice) nl := s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) - if inplace { - lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr) - s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) - } - cmp := s.newValue2(s.ssaOp(OGT, Types[TINT]), Types[TBOOL], nl, c) s.vars[&ptrVar] = p if !inplace { s.vars[&newlenVar] = nl s.vars[&capVar] = c + } else { + s.vars[&lenVar] = l } b := s.endBlock() @@ -2191,11 +2190,16 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) if inplace { + if sn.Op == ONAME { + // Tell liveness we're about to build a new slice + s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, ssa.TypeMem, sn, s.mem()) + } capaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_cap), addr) s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, capaddr, r[2], s.mem()) s.insertWBstore(pt, addr, r[0], n.Lineno, 0) // load the value we just stored to avoid having to spill it s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem()) + s.vars[&lenVar] = r[1] // avoid a spill in the fast path } else { s.vars[&ptrVar] = r[0] s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], r[1], s.constInt(Types[TINT], nargs)) @@ -2208,6 +2212,13 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { // assign new elements to slots s.startBlock(assign) + if inplace { + l = s.variable(&lenVar, Types[TINT]) // generates phi for len + nl = s.newValue2(s.ssaOp(OADD, Types[TINT]), Types[TINT], l, s.constInt(Types[TINT], nargs)) + lenaddr := s.newValue1I(ssa.OpOffPtr, pt, int64(Array_nel), addr) + s.vars[&memVar] = s.newValue3I(ssa.OpStore, ssa.TypeMem, s.config.IntSize, lenaddr, nl, s.mem()) + } + // Evaluate args args := make([]*ssa.Value, 0, nargs) store := make([]bool, 0, nargs) @@ -2248,6 +2259,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { delete(s.vars, &ptrVar) if inplace { + delete(s.vars, &lenVar) return nil } delete(s.vars, &newlenVar) -- cgit v1.3 From 3c6e60c0e41ed42d5df6dcbf134e3a664c08c154 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Tue, 19 Apr 2016 12:08:33 -0700 Subject: cmd/compile: fix isStaticCompositeLiteral Previously, isStaticCompositeLiteral would return the wrong value for literals like: [1]struct{ b []byte }{b: []byte{1}} Note that the outermost component is an array, but once we recurse into isStaticCompositeLiteral, we never check again that arrays are actually arrays. Instead of adding more logic to the guts of isStaticCompositeLiteral, allow it to accept any Node and return the correct answer. Change-Id: I6af7814a9037bbc7043da9a96137fbee067bbe0e Reviewed-on: https://go-review.googlesource.com/22247 Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/sinit.go | 25 ++++++++++++++----------- src/cmd/compile/internal/gc/walk.go | 2 +- 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 1021609d3a..5a3a4dbe7f 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -564,8 +564,18 @@ func getdyn(n *Node, top int) initGenType { } // isStaticCompositeLiteral reports whether n is a compile-time constant. -// n must be a struct or array literal. func isStaticCompositeLiteral(n *Node) bool { + switch n.Op { + case OARRAYLIT: + if n.Type.IsSlice() { + return false + } + case OSTRUCTLIT: + case OLITERAL: + return true + default: + return false + } for _, r := range n.List.Slice() { if r.Op != OKEY { Fatalf("isStaticCompositeLiteral: rhs not OKEY: %v", r) @@ -575,15 +585,8 @@ func isStaticCompositeLiteral(n *Node) bool { return false } value := r.Right - switch value.Op { - case OSTRUCTLIT, OARRAYLIT: - if !isStaticCompositeLiteral(value) { - return false - } - default: - if value.Op != OLITERAL { - return false - } + if !isStaticCompositeLiteral(value) { + return false } } return true @@ -1031,7 +1034,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init *Nodes) { t := n.Type switch n.Op { default: - Fatalf("anylit: not lit") + Fatalf("anylit: not lit, op=%v node=%v", opnames[n.Op], n) case OPTRLIT: if !t.IsPtr() { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 1a15bd93d0..e4d93339a9 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1531,7 +1531,7 @@ opswitch: n = r case OARRAYLIT, OMAPLIT, OSTRUCTLIT, OPTRLIT: - if (n.Op == OSTRUCTLIT || (n.Op == OARRAYLIT && !n.Type.IsSlice())) && isStaticCompositeLiteral(n) { + if isStaticCompositeLiteral(n) { // n can be directly represented in the read-only data section. // Make direct reference to the static data. See issue 12841. vstat := staticname(n.Type, 0) -- cgit v1.3 From 55ab07c224a358cabe795fb1e52a627194d7daee Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Tue, 19 Apr 2016 12:26:28 -0700 Subject: cmd/compile: static composite literals are side-effect free MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This extends CL 22192. This removes the remaining performance disparity between non-SSA and SSA on the AppendInPlace benchmarks. Going from non-SSA to SSA: AppendInPlace/NoGrow/2Ptr-8 1.60µs ± 5% 1.53µs ± 5% -4.04% (p=0.000 n=15+14) AppendInPlace/NoGrow/3Ptr-8 2.04µs ± 3% 1.96µs ± 2% -3.90% (p=0.000 n=13+14) AppendInPlace/NoGrow/4Ptr-8 2.83µs ± 8% 2.62µs ± 4% -7.39% (p=0.000 n=13+15) Previously these were 20% regressions. Change-Id: Ie87810bffd598730658e07585f5e2ef979a12b8f Reviewed-on: https://go-review.googlesource.com/22248 Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/subr.go | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index f6af11adba..51a78317f2 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1328,6 +1328,11 @@ func safeexpr(n *Node, init *Nodes) *Node { a.Right = r a = walkexpr(a, init) return a + + case OSTRUCTLIT, OARRAYLIT: + if isStaticCompositeLiteral(n) { + return n + } } // make a copy; must not be used as an lvalue -- cgit v1.3 From 8b20fd000d7e894865442134f9d6d197ac5dabed Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Tue, 12 Apr 2016 18:24:34 +0200 Subject: cmd/compile: transform some Phis into Or8. func f(a, b bool) bool { return a || b } is now a single instructions (excluding loading and unloading the arguments): v10 = ORB v11 v12 : AX Change-Id: Iff63399410cb46909f4318ea1c3f45a029f4aa5e Reviewed-on: https://go-review.googlesource.com/21872 TryBot-Result: Gobot Gobot Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/ssa/compile.go | 3 +- src/cmd/compile/internal/ssa/phiopt.go | 57 ++++++++++++++++++--------------- test/phiopt.go | 40 ++++++++++++++++++++--- test/prove.go | 4 +++ 4 files changed, 74 insertions(+), 30 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index a0b5ff71cf..bc9c830ee9 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -289,8 +289,9 @@ var passOrder = [...]constraint{ {"opt", "nilcheckelim"}, // tighten should happen before lowering to avoid splitting naturally paired instructions such as CMP/SET {"tighten", "lower"}, - // cse, nilcheckelim, prove and loopbce share idom. + // cse, phiopt, nilcheckelim, prove and loopbce share idom. {"generic domtree", "generic cse"}, + {"generic domtree", "phiopt"}, {"generic domtree", "nilcheckelim"}, {"generic domtree", "prove"}, {"generic domtree", "loopbce"}, diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go index 2d0a45733a..4efd497bdb 100644 --- a/src/cmd/compile/internal/ssa/phiopt.go +++ b/src/cmd/compile/internal/ssa/phiopt.go @@ -45,44 +45,51 @@ func phiopt(f *Func) { } // b0 is the if block giving the boolean value. - var reverse bool + // reverse is the predecessor from which the truth value comes. + var reverse int if b0.Succs[0] == pb0 && b0.Succs[1] == pb1 { - reverse = false + reverse = 0 } else if b0.Succs[0] == pb1 && b0.Succs[1] == pb0 { - reverse = true + reverse = 1 } else { b.Fatalf("invalid predecessors\n") } for _, v := range b.Values { - if v.Op != OpPhi || !v.Type.IsBoolean() || v.Args[0].Op != OpConstBool || v.Args[1].Op != OpConstBool { + if v.Op != OpPhi || !v.Type.IsBoolean() { continue } - ok, isCopy := false, false - if v.Args[0].AuxInt == 1 && v.Args[1].AuxInt == 0 { - ok, isCopy = true, !reverse - } else if v.Args[0].AuxInt == 0 && v.Args[1].AuxInt == 1 { - ok, isCopy = true, reverse - } - - // (Phi (ConstBool [x]) (ConstBool [x])) is already handled by opt / phielim. - - if ok && isCopy { - if f.pass.debug > 0 { - f.Config.Warnl(b.Line, "converted OpPhi to OpCopy") + // Replaces + // if a { x = true } else { x = false } with x = a + // and + // if a { x = false } else { x = true } with x = !a + if v.Args[0].Op == OpConstBool && v.Args[1].Op == OpConstBool { + if v.Args[reverse].AuxInt != v.Args[1-reverse].AuxInt { + ops := [2]Op{OpNot, OpCopy} + v.reset(ops[v.Args[reverse].AuxInt]) + v.AddArg(b0.Control) + if f.pass.debug > 0 { + f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) + } + continue } - v.reset(OpCopy) - v.AddArg(b0.Control) - continue } - if ok && !isCopy { - if f.pass.debug > 0 { - f.Config.Warnl(b.Line, "converted OpPhi to OpNot") + + // Replaces + // if a { x = true } else { x = value } with x = a || value. + // Requires that value dominates x, meaning that regardless of a, + // value is always computed. This guarantees that the side effects + // of value are not seen if a is false. + if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 { + if tmp := v.Args[1-reverse]; f.sdom.isAncestorEq(tmp.Block, b) { + v.reset(OpOr8) + v.SetArgs2(b0.Control, tmp) + if f.pass.debug > 0 { + f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) + } + continue } - v.reset(OpNot) - v.AddArg(b0.Control) - continue } } } diff --git a/test/phiopt.go b/test/phiopt.go index 9b9b701124..37caab0b51 100644 --- a/test/phiopt.go +++ b/test/phiopt.go @@ -1,8 +1,13 @@ // +build amd64 // errorcheck -0 -d=ssa/phiopt/debug=3 +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package main +//go:noinline func f0(a bool) bool { x := false if a { @@ -10,9 +15,10 @@ func f0(a bool) bool { } else { x = false } - return x // ERROR "converted OpPhi to OpCopy$" + return x // ERROR "converted OpPhi to Copy$" } +//go:noinline func f1(a bool) bool { x := false if a { @@ -20,23 +26,49 @@ func f1(a bool) bool { } else { x = true } - return x // ERROR "converted OpPhi to OpNot$" + return x // ERROR "converted OpPhi to Not$" } +//go:noinline func f2(a, b int) bool { x := true if a == b { x = false } - return x // ERROR "converted OpPhi to OpNot$" + return x // ERROR "converted OpPhi to Not$" } +//go:noinline func f3(a, b int) bool { x := false if a == b { x = true } - return x // ERROR "converted OpPhi to OpCopy$" + return x // ERROR "converted OpPhi to Copy$" +} + +//go:noinline +func f4(a, b bool) bool { + return a || b // ERROR "converted OpPhi to Or8$" +} + +//go:noinline +func f5(a int, b bool) bool { + x := b + if a == 0 { + x = true + } + return x // ERROR "converted OpPhi to Or8$" +} + +//go:noinline +func f6(a int, b bool) bool { + x := b + if a == 0 { + // f6 has side effects so the OpPhi should not be converted. + x = f6(a, b) + } + return x } func main() { diff --git a/test/prove.go b/test/prove.go index a78adf03dc..8bcc9ae614 100644 --- a/test/prove.go +++ b/test/prove.go @@ -1,6 +1,10 @@ // +build amd64 // errorcheck -0 -d=ssa/prove/debug=3 +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package main import "math" -- cgit v1.3 From 2244ae417312a59e722643f6ea2f1b8168c599c9 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 19 Apr 2016 14:15:37 -0700 Subject: cmd/compile/internal/gc: simplify typecheck's Efoo consts There's no need for Eiota, Eindir, Eaddr, or Eproc; the values are threaded through to denote various typechecking contexts, but they don't actually influence typechecking behavior at all. Also, while here, switch the Efoo const declarations to use iota. Passes toolstash -cmp. Change-Id: I5cea869ccd0755c481cf071978f863474bc9c1ed Reviewed-on: https://go-review.googlesource.com/22271 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/typecheck.go | 48 +++++++++++++------------------- 1 file changed, 19 insertions(+), 29 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 328737ee14..bf85819bce 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -12,17 +12,13 @@ import ( ) const ( - Etop = 1 << 1 // evaluated at statement level - Erv = 1 << 2 // evaluated in value context - Etype = 1 << 3 - Ecall = 1 << 4 // call-only expressions are ok - Efnstruct = 1 << 5 // multivalue function returns are ok - Eiota = 1 << 6 // iota is ok - Easgn = 1 << 7 // assigning to expression - Eindir = 1 << 8 // indirecting through expression - Eaddr = 1 << 9 // taking address of expression - Eproc = 1 << 10 // inside a go statement - Ecomplit = 1 << 11 // type in composite literal + Etop = 1 << iota // evaluated at statement level + Erv // evaluated in value context + Etype // evaluated in type context + Ecall // call-only expressions are ok + Efnstruct // multivalue function returns are ok + Easgn // assigning to expression + Ecomplit // type in composite literal ) // type check the whole tree of an expression. @@ -476,13 +472,7 @@ OpSwitch: // type or expr case OIND: - ntop := Erv | Etype - - if top&Eaddr == 0 { // The *x in &*x is not an indirect. - ntop |= Eindir - } - ntop |= top & Ecomplit - n.Left = typecheck(n.Left, ntop) + n.Left = typecheck(n.Left, Erv|Etype|top&Ecomplit) l := n.Left t := l.Type if t == nil { @@ -556,8 +546,8 @@ OpSwitch: op = Op(n.Etype) } else { ok |= Erv - n.Left = typecheck(n.Left, Erv|top&Eiota) - n.Right = typecheck(n.Right, Erv|top&Eiota) + n.Left = typecheck(n.Left, Erv) + n.Right = typecheck(n.Right, Erv) l = n.Left r = n.Right if l.Type == nil || r.Type == nil { @@ -775,7 +765,7 @@ OpSwitch: case OCOM, OMINUS, ONOT, OPLUS: ok |= Erv - n.Left = typecheck(n.Left, Erv|top&Eiota) + n.Left = typecheck(n.Left, Erv) l := n.Left t := l.Type if t == nil { @@ -795,7 +785,7 @@ OpSwitch: case OADDR: ok |= Erv - n.Left = typecheck(n.Left, Erv|Eaddr) + n.Left = typecheck(n.Left, Erv) if n.Left.Type == nil { n.Type = nil return n @@ -1262,7 +1252,7 @@ OpSwitch: } } - n.Left = typecheck(n.Left, Erv|Etype|Ecall|top&Eproc) + n.Left = typecheck(n.Left, Erv|Etype|Ecall) n.Diag |= n.Left.Diag l = n.Left if l.Op == ONAME && l.Etype != 0 { @@ -1479,8 +1469,8 @@ OpSwitch: n.Type = nil return n } - n.Left = typecheck(n.Left, Erv|top&Eiota) - n.Right = typecheck(n.Right, Erv|top&Eiota) + n.Left = typecheck(n.Left, Erv) + n.Right = typecheck(n.Right, Erv) l = n.Left r = n.Right if l.Type == nil || r.Type == nil { @@ -1738,7 +1728,7 @@ OpSwitch: case OCONV: ok |= Erv saveorignode(n) - n.Left = typecheck(n.Left, Erv|top&(Eindir|Eiota)) + n.Left = typecheck(n.Left, Erv) n.Left = convlit1(n.Left, n.Type, true, noReuse) t := n.Left.Type if t == nil || n.Type == nil { @@ -1926,7 +1916,7 @@ OpSwitch: case OPRINT, OPRINTN: ok |= Etop - typecheckslice(n.List.Slice(), Erv|Eindir) // Eindir: address does not escape + typecheckslice(n.List.Slice(), Erv) ls := n.List.Slice() for i1, n1 := range ls { // Special case for print: int constant is int64, not int. @@ -2062,7 +2052,7 @@ OpSwitch: case OPROC: ok |= Etop - n.Left = typecheck(n.Left, Etop|Eproc|Erv) + n.Left = typecheck(n.Left, Etop|Erv) checkdefergo(n) break OpSwitch @@ -3707,7 +3697,7 @@ func typecheckdef(n *Node) *Node { Yyerror("xxx") } - e = typecheck(e, Erv|Eiota) + e = typecheck(e, Erv) if Isconst(e, CTNIL) { Yyerror("const initializer cannot be nil") goto ret -- cgit v1.3 From bfe0cbdc50cbc6a632d1e5ebbdcc625d69451935 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 19 Apr 2016 15:38:59 -0700 Subject: cmd/compile,runtime: pass elem type to {make,grow}slice No point in passing the slice type to these functions. All they need is the element type. One less indirection, maybe a few less []T type descriptors in the binary. Change-Id: Ib0b83b5f14ca21d995ecc199ce8ac00c4eb375e6 Reviewed-on: https://go-review.googlesource.com/22275 Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/gc/cgen.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 2 +- src/cmd/compile/internal/gc/walk.go | 8 ++++---- src/runtime/slice.go | 16 +++++++--------- 4 files changed, 13 insertions(+), 15 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index 658cc8a50e..5c5bedaa31 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -2876,7 +2876,7 @@ func cgen_append(n, res *Node) { arg.Addable = true arg.Xoffset = Ctxt.FixedFrameSize() arg.Type = Ptrto(Types[TUINT8]) - Cgen(typename(res.Type), &arg) + Cgen(typename(res.Type.Elem()), &arg) arg.Xoffset += int64(Widthptr) arg.Type = Types[Tptr] diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index c4008c9ce1..11e362c116 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2185,7 +2185,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { // Call growslice s.startBlock(grow) - taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Types[TUINTPTR], typenamesym(n.Type)}, s.sb) + taddr := s.newValue1A(ssa.OpAddr, Types[TUINTPTR], &ssa.ExternSymbol{Types[TUINTPTR], typenamesym(n.Type.Elem())}, s.sb) r := s.rtcall(growslice, true, []*Type{pt, Types[TINT], Types[TINT]}, taddr, p, l, c, nl) diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index e4d93339a9..82ac74ae33 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1420,11 +1420,11 @@ opswitch: r = walkexpr(r, init) n = r } else { - // makeslice(t *Type, nel int64, max int64) (ary []any) + // makeslice(et *Type, nel int64, max int64) (ary []any) fn := syslook("makeslice") fn = substArgTypes(fn, t.Elem()) // any-1 - n = mkcall1(fn, n.Type, init, typename(n.Type), conv(l, Types[TINT64]), conv(r, Types[TINT64])) + n = mkcall1(fn, n.Type, init, typename(t.Elem()), conv(l, Types[TINT64]), conv(r, Types[TINT64])) } case ORUNESTR: @@ -2799,7 +2799,7 @@ func appendslice(n *Node, init *Nodes) *Node { fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem()) // s = growslice(T, s, n) - nif.Nbody.Set1(Nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type), s, nn))) + nif.Nbody.Set1(Nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type.Elem()), s, nn))) l = append(l, nif) // s = s[:n] @@ -2929,7 +2929,7 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem()) nx.Nbody.Set1(Nod(OAS, ns, - mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type), ns, + mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns, Nod(OADD, Nod(OLEN, ns, nil), na)))) l = append(l, nx) diff --git a/src/runtime/slice.go b/src/runtime/slice.go index 873e97ebff..e86c1ce2c8 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -37,14 +37,14 @@ func maxSliceCap(elemsize uintptr) uintptr { } // TODO: take uintptrs instead of int64s? -func makeslice(t *slicetype, len64, cap64 int64) slice { +func makeslice(et *_type, len64, cap64 int64) slice { // NOTE: The len > maxElements check here is not strictly necessary, // but it produces a 'len out of range' error instead of a 'cap out of range' error // when someone does make([]T, bignumber). 'cap out of range' is true too, // but since the cap is only being supplied implicitly, saying len is clearer. // See issue 4085. - maxElements := maxSliceCap(t.elem.size) + maxElements := maxSliceCap(et.size) len := int(len64) if len64 < 0 || int64(len) != len64 || uintptr(len) > maxElements { panic(errorString("makeslice: len out of range")) @@ -55,7 +55,6 @@ func makeslice(t *slicetype, len64, cap64 int64) slice { panic(errorString("makeslice: cap out of range")) } - et := t.elem var flags uint32 if et.kind&kindNoPointers != 0 { flags = flagNoScan @@ -65,7 +64,7 @@ func makeslice(t *slicetype, len64, cap64 int64) slice { } // growslice handles slice growth during append. -// It is passed the slice type, the old slice, and the desired new minimum capacity, +// It is passed the slice element type, the old slice, and the desired new minimum capacity, // and it returns a new slice with at least that capacity, with the old data // copied into it. // The new slice's length is set to the old slice's length, @@ -74,16 +73,15 @@ func makeslice(t *slicetype, len64, cap64 int64) slice { // to calculate where to write new values during an append. // TODO: When the old backend is gone, reconsider this decision. // The SSA backend might prefer the new length or to return only ptr/cap and save stack space. -func growslice(t *slicetype, old slice, cap int) slice { +func growslice(et *_type, old slice, cap int) slice { if raceenabled { - callerpc := getcallerpc(unsafe.Pointer(&t)) - racereadrangepc(old.array, uintptr(old.len*int(t.elem.size)), callerpc, funcPC(growslice)) + callerpc := getcallerpc(unsafe.Pointer(&et)) + racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice)) } if msanenabled { - msanread(old.array, uintptr(old.len*int(t.elem.size))) + msanread(old.array, uintptr(old.len*int(et.size))) } - et := t.elem if et.size == 0 { if cap < old.cap { panic(errorString("growslice: cap out of range")) -- cgit v1.3 From b57ac333310e8b8ec01708dcca99430b641457c5 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 15 Apr 2016 12:49:30 -0700 Subject: cmd/compile: forward-looking desired register biasing Improve forward-looking desired register calculations. It is now inter-block and handles a bunch more cases. Fixes #14504 Fixes #14828 Fixes #15254 Change-Id: Ic240fa0ec6a779d80f577f55c8a6c4ac8c1a940a Reviewed-on: https://go-review.googlesource.com/22160 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/regalloc.go | 541 ++++++++++++++++++++++++------- 1 file changed, 422 insertions(+), 119 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 7be1cf593c..2ac684f121 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -189,11 +189,9 @@ type valState struct { uses *use // list of uses in this block spill *Value // spilled copy of the Value spillUsed bool - spillUsedShuffle bool // true if used in shuffling, after ordinary uses - needReg bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags() - rematerializeable bool // cached value of v.rematerializeable() - desired register // register we want value to be in, if any - avoid regMask // registers to avoid if we can + spillUsedShuffle bool // true if used in shuffling, after ordinary uses + needReg bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags() + rematerializeable bool // cached value of v.rematerializeable() } type regState struct { @@ -205,10 +203,11 @@ type regState struct { type regAllocState struct { f *Func - registers []Register - numRegs register - SPReg register - SBReg register + registers []Register + numRegs register + SPReg register + SBReg register + allocatable regMask // for each block, its primary predecessor. // A predecessor of b is primary if it is the closest @@ -220,6 +219,11 @@ type regAllocState struct { // which are live at the end of b, together with a count of how many instructions // forward to the next use. live [][]liveInfo + // desired register assignments at the end of each block. + // Note that this is a static map computed before allocation occurs. Dynamic + // register desires (from partially completed allocations) will trump + // this information. + desired []desiredState // current state of each (preregalloc) Value values []valState @@ -333,6 +337,7 @@ func (s *regAllocState) assignReg(r register, v *Value, c *Value) { // If there is no unused register, a Value will be kicked out of // a register to make room. func (s *regAllocState) allocReg(v *Value, mask regMask) register { + mask &= s.allocatable mask &^= s.nospill if mask == 0 { s.f.Fatalf("no register available") @@ -340,20 +345,7 @@ func (s *regAllocState) allocReg(v *Value, mask regMask) register { // Pick an unused register if one is available. if mask&^s.used != 0 { - mask &^= s.used - - // Use desired register if we can. - d := s.values[v.ID].desired - if d != noRegister && mask>>d&1 != 0 { - mask = regMask(1) << d - } - - // Avoid avoidable registers if we can. - if mask&^s.values[v.ID].avoid != 0 { - mask &^= s.values[v.ID].avoid - } - - return pickReg(mask) + return pickReg(mask &^ s.used) } // Pick a value to spill. Spill the value with the @@ -363,10 +355,6 @@ func (s *regAllocState) allocReg(v *Value, mask regMask) register { // TODO: if a single value is in multiple registers, spill one of them // before spilling a value in just a single register. - // SP and SB are allocated specially. No regular value should - // be allocated to them. - mask &^= 1< noRegister || s.numRegs > register(unsafe.Sizeof(regMask(0))*8) { @@ -471,7 +452,17 @@ func (s *regAllocState) init(f *Func) { } } - s.f = f + // Figure out which registers we're allowed to use. + s.allocatable = regMask(1)<= 0; i-- { v := oldSched[i] - liveSet.remove(v.ID) - - r := s.values[v.ID].desired - if r != noRegister { - m := regMask(1) << r - // All live values should avoid this register so - // it will be available at this point. - for _, w := range liveSet.contents() { - s.values[w].avoid |= m + prefs := desired.remove(v.ID) + desired.clobber(opcodeTable[v.Op].reg.clobbers) + for _, j := range opcodeTable[v.Op].reg.inputs { + if countRegs(j.regs) != 1 { + continue } + desired.clobber(j.regs) + desired.add(v.Args[j.idx].ID, pickReg(j.regs)) } - - for _, a := range v.Args { - if !s.values[a.ID].needReg { - continue + if opcodeTable[v.Op].resultInArg0 { + if opcodeTable[v.Op].commutative { + desired.addList(v.Args[1].ID, prefs) } - liveSet.add(a.ID) + desired.addList(v.Args[0].ID, prefs) + } + // Save desired registers for this value. + dinfo[i].out = prefs + for j, a := range v.Args { + if j >= len(dinfo[i].in) { + break + } + dinfo[i].in[j] = desired.get(a.ID) } } // Process all the non-phi values. - for _, v := range oldSched { + for idx, v := range oldSched { if s.f.pass.debug > regDebug { fmt.Printf(" processing %s\n", v.LongString()) } @@ -960,36 +958,132 @@ func (s *regAllocState) regalloc(f *Func) { continue } + if s.f.pass.debug > regDebug { + fmt.Printf("value %s\n", v.LongString()) + fmt.Printf(" out:") + for _, r := range dinfo[idx].out { + if r != noRegister { + fmt.Printf(" %s", s.registers[r].Name()) + } + } + fmt.Println() + for i := 0; i < len(v.Args) && i < 3; i++ { + fmt.Printf(" in%d:", i) + for _, r := range dinfo[idx].in[i] { + if r != noRegister { + fmt.Printf(" %s", s.registers[r].Name()) + } + } + fmt.Println() + } + } + // Move arguments to registers. Process in an ordering defined // by the register specification (most constrained first). args = append(args[:0], v.Args...) for _, i := range regspec.inputs { - if i.regs == flagRegMask { + mask := i.regs + if mask == flagRegMask { // TODO: remove flag input from regspec.inputs. continue } - args[i.idx] = s.allocValToReg(v.Args[i.idx], i.regs, true, v.Line) + if mask&s.values[args[i.idx].ID].regs == 0 { + // Need a new register for the input. + mask &= s.allocatable + mask &^= s.nospill + // Used desired register if available. + if i.idx < 3 { + for _, r := range dinfo[idx].in[i.idx] { + if r != noRegister && (mask&^s.used)>>r&1 != 0 { + // Desired register is allowed and unused. + mask = regMask(1) << r + break + } + } + } + // Avoid registers we're saving for other values. + if mask&^desired.avoid != 0 { + mask &^= desired.avoid + } + } + args[i.idx] = s.allocValToReg(args[i.idx], mask, true, v.Line) } - // If the output clobbers the input register, and the input register is - // live beyond the instruction, make another copy of the input register so - // we don't have to reload the value from the spill location. - if opcodeTable[v.Op].resultInArg0 && - s.liveAfterCurrentInstruction(v.Args[0]) && - countRegs(s.values[v.Args[0].ID].regs) == 1 { + // If the output clobbers the input register, make sure we have + // at least two copies of the input register so we don't + // have to reload the value from the spill location. + if opcodeTable[v.Op].resultInArg0 { + var m regMask + if !s.liveAfterCurrentInstruction(v.Args[0]) { + // arg0 is dead. We can clobber its register. + goto ok + } + if countRegs(s.values[v.Args[0].ID].regs) >= 2 { + // we have at least 2 copies of arg0. We can afford to clobber one. + goto ok + } + if opcodeTable[v.Op].commutative { + if !s.liveAfterCurrentInstruction(v.Args[1]) { + args[0], args[1] = args[1], args[0] + goto ok + } + if countRegs(s.values[v.Args[1].ID].regs) >= 2 { + args[0], args[1] = args[1], args[0] + goto ok + } + } - if opcodeTable[v.Op].commutative && - (!s.liveAfterCurrentInstruction(v.Args[1]) || - countRegs(s.values[v.Args[1].ID].regs) > 1) { - // Input #1 is dead after the instruction, or we have - // more than one copy of it in a register. Either way, - // use that input as the one that is clobbered. - args[0], args[1] = args[1], args[0] - } else { - m := s.compatRegs(v.Args[0].Type) - m &^= s.values[v.Args[0].ID].regs // a register not already holding v.Args[0] - s.allocValToReg(v.Args[0], m, true, v.Line) + // We can't overwrite arg0 (or arg1, if commutative). So we + // need to make a copy of an input so we have a register we can modify. + + // Possible new registers to copy into. + m = s.compatRegs(v.Args[0].Type) &^ s.used + if m == 0 { + // No free registers. In this case we'll just clobber + // an input and future uses of that input must use a restore. + // TODO(khr): We should really do this like allocReg does it, + // spilling the value with the most distant next use. + goto ok + } + + // Try to move an input to the desired output. + for _, r := range dinfo[idx].out { + if r != noRegister && m>>r&1 != 0 { + m = regMask(1) << r + args[0] = s.allocValToReg(v.Args[0], m, true, v.Line) + // Note: we update args[0] so the instruction will + // use the register copy we just made. + goto ok + } + } + // Try to copy input to its desired location & use its old + // location as the result register. + for _, r := range dinfo[idx].in[0] { + if r != noRegister && m>>r&1 != 0 { + m = regMask(1) << r + s.allocValToReg(v.Args[0], m, true, v.Line) + // Note: no update to args[0] so the instruction will + // use the original copy. + goto ok + } } + if opcodeTable[v.Op].commutative { + for _, r := range dinfo[idx].in[1] { + if r != noRegister && m>>r&1 != 0 { + m = regMask(1) << r + s.allocValToReg(v.Args[1], m, true, v.Line) + args[0], args[1] = args[1], args[0] + goto ok + } + } + } + // Avoid future fixed uses if we can. + if m&^desired.avoid != 0 { + m &^= desired.avoid + } + // Save input 0 to a new register so we can clobber it. + s.allocValToReg(v.Args[0], m, true, v.Line) + ok: } // Now that all args are in regs, we're ready to issue the value itself. @@ -1004,14 +1098,44 @@ func (s *regAllocState) regalloc(f *Func) { // Pick register for output. if s.values[v.ID].needReg { - mask := regspec.outputs[0] &^ s.reserved() - if mask>>33&1 != 0 { - s.f.Fatalf("bad mask %s\n", v.LongString()) - } + mask := regspec.outputs[0] & s.allocatable if opcodeTable[v.Op].resultInArg0 { - // Output must use the same register as input 0. - r := register(s.f.getHome(args[0].ID).(*Register).Num) - mask = regMask(1) << r + if !opcodeTable[v.Op].commutative { + // Output must use the same register as input 0. + r := register(s.f.getHome(args[0].ID).(*Register).Num) + mask = regMask(1) << r + } else { + // Output must use the same register as input 0 or 1. + r0 := register(s.f.getHome(args[0].ID).(*Register).Num) + r1 := register(s.f.getHome(args[1].ID).(*Register).Num) + // Check r0 and r1 for desired output register. + found := false + for _, r := range dinfo[idx].out { + if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 { + mask = regMask(1) << r + found = true + if r == r1 { + args[0], args[1] = args[1], args[0] + } + break + } + } + if !found { + // Neither are desired, pick r0. + mask = regMask(1) << r0 + } + } + } + for _, r := range dinfo[idx].out { + if r != noRegister && (mask&^s.used)>>r&1 != 0 { + // Desired register is allowed and unused. + mask = regMask(1) << r + break + } + } + // Avoid registers we're saving for other values. + if mask&^desired.avoid != 0 { + mask &^= desired.avoid } r := s.allocReg(v, mask) s.assignReg(r, v, v) @@ -1089,6 +1213,9 @@ func (s *regAllocState) regalloc(f *Func) { } v := s.orig[vid] m := s.compatRegs(v.Type) &^ s.used + if m&^desired.avoid != 0 { + m &^= desired.avoid + } if m != 0 { s.allocValToReg(v, m, false, b.Line) } @@ -1875,24 +2002,36 @@ func (v *Value) rematerializeable() bool { } type liveInfo struct { - ID ID // ID of variable + ID ID // ID of value dist int32 // # of instructions before next use } +// dblock contains information about desired & avoid registers at the end of a block. +type dblock struct { + prefers []desiredStateEntry + avoid regMask +} + // computeLive computes a map from block ID to a list of value IDs live at the end // of that block. Together with the value ID is a count of how many instructions -// to the next use of that value. The resulting map is stored at s.live. +// to the next use of that value. The resulting map is stored in s.live. +// computeLive also computes the desired register information at the end of each block. +// This desired register information is stored in s.desired. // TODO: this could be quadratic if lots of variables are live across lots of // basic blocks. Figure out a way to make this function (or, more precisely, the user // of this function) require only linear size & time. func (s *regAllocState) computeLive() { f := s.f s.live = make([][]liveInfo, f.NumBlocks()) + s.desired = make([]desiredState, f.NumBlocks()) var phis []*Value live := newSparseMap(f.NumValues()) t := newSparseMap(f.NumValues()) + // Keep track of which value we want in each register. + var desired desiredState + // Instead of iterating over f.Blocks, iterate over their postordering. // Liveness information flows backward, so starting at the end // increases the probability that we will stabilize quickly. @@ -1915,7 +2054,7 @@ func (s *regAllocState) computeLive() { d := int32(len(b.Values)) if b.Kind == BlockCall || b.Kind == BlockDefer { // Because we keep no values in registers across a call, - // make every use past a call very far away. + // make every use past a call appear very far away. d += unlikelyDistance } for _, e := range s.live[b.ID] { @@ -1944,6 +2083,35 @@ func (s *regAllocState) computeLive() { } } } + // Propagate desired registers backwards. + desired.copy(&s.desired[b.ID]) + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + prefs := desired.remove(v.ID) + if v.Op == OpPhi { + // TODO: if v is a phi, save desired register for phi inputs. + // For now, we just drop it and don't propagate + // desired registers back though phi nodes. + continue + } + // Cancel desired registers if they get clobbered. + desired.clobber(opcodeTable[v.Op].reg.clobbers) + // Update desired registers if there are any fixed register inputs. + for _, j := range opcodeTable[v.Op].reg.inputs { + if countRegs(j.regs) != 1 { + continue + } + desired.clobber(j.regs) + desired.add(v.Args[j.idx].ID, pickReg(j.regs)) + } + // Set desired register of input 0 if this is a 2-operand instruction. + if opcodeTable[v.Op].resultInArg0 { + if opcodeTable[v.Op].commutative { + desired.addList(v.Args[1].ID, prefs) + } + desired.addList(v.Args[0].ID, prefs) + } + } // For each predecessor of b, expand its list of live-at-end values. // invariant: live contains the values live at the start of b (excluding phi inputs) @@ -1963,6 +2131,9 @@ func (s *regAllocState) computeLive() { } } + // Update any desired registers at the end of p. + s.desired[p.ID].merge(&desired) + // Start t off with the previously known live values at the end of p. t.clear() for _, e := range s.live[p.ID] { @@ -1983,7 +2154,7 @@ func (s *regAllocState) computeLive() { // simultaneously happening at the start of the block). for _, v := range phis { id := v.Args[i].ID - if s.values[id].needReg && !t.contains(id) || delta < t.get(id) { + if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) { update = true t.set(id, delta) } @@ -2015,20 +2186,152 @@ func (s *regAllocState) computeLive() { fmt.Printf(" %s:", b) for _, x := range s.live[b.ID] { fmt.Printf(" v%d", x.ID) + for _, e := range s.desired[b.ID].entries { + if e.ID != x.ID { + continue + } + fmt.Printf("[") + first := true + for _, r := range e.regs { + if r == noRegister { + continue + } + if !first { + fmt.Printf(",") + } + fmt.Print(s.registers[r].Name()) + first = false + } + fmt.Printf("]") + } } + fmt.Printf(" avoid=%x", int64(s.desired[b.ID].avoid)) fmt.Println() } } } -// reserved returns a mask of reserved registers. -func (s *regAllocState) reserved() regMask { - var m regMask - if obj.Framepointer_enabled != 0 { - m |= 1 << 5 // BP +// A desiredState represents desired register assignments. +type desiredState struct { + // Desired assignments will be small, so we just use a list + // of valueID+registers entries. + entries []desiredStateEntry + // Registers that other values want to be in. This value will + // contain at least the union of the regs fields of entries, but + // may contain additional entries for values that were once in + // this data structure but are no longer. + avoid regMask +} +type desiredStateEntry struct { + // (pre-regalloc) value + ID ID + // Registers it would like to be in, in priority order. + // Unused slots are filled with noRegister. + regs [4]register +} + +func (d *desiredState) clear() { + d.entries = d.entries[:0] + d.avoid = 0 +} + +// get returns a list of desired registers for value vid. +func (d *desiredState) get(vid ID) [4]register { + for _, e := range d.entries { + if e.ID == vid { + return e.regs + } } - if s.f.Config.ctxt.Flag_dynlink { - m |= 1 << 15 // R15 + return [4]register{noRegister, noRegister, noRegister, noRegister} +} + +// add records that we'd like value vid to be in register r. +func (d *desiredState) add(vid ID, r register) { + d.avoid |= regMask(1) << r + for i := range d.entries { + e := &d.entries[i] + if e.ID != vid { + continue + } + if e.regs[0] == r { + // Already known and highest priority + return + } + for j := 1; j < len(e.regs); j++ { + if e.regs[j] == r { + // Move from lower priority to top priority + copy(e.regs[1:], e.regs[:j]) + e.regs[0] = r + return + } + } + copy(e.regs[1:], e.regs[:]) + e.regs[0] = r + return + } + d.entries = append(d.entries, desiredStateEntry{vid, [4]register{r, noRegister, noRegister, noRegister}}) +} + +func (d *desiredState) addList(vid ID, regs [4]register) { + // regs is in priority order, so iterate in reverse order. + for i := len(regs) - 1; i >= 0; i-- { + r := regs[i] + if r != noRegister { + d.add(vid, r) + } + } +} + +// clobber erases any desired registers in the set m. +func (d *desiredState) clobber(m regMask) { + for i := 0; i < len(d.entries); { + e := &d.entries[i] + j := 0 + for _, r := range e.regs { + if r != noRegister && m>>r&1 == 0 { + e.regs[j] = r + j++ + } + } + if j == 0 { + // No more desired registers for this value. + d.entries[i] = d.entries[len(d.entries)-1] + d.entries = d.entries[:len(d.entries)-1] + continue + } + for ; j < len(e.regs); j++ { + e.regs[j] = noRegister + } + i++ + } + d.avoid &^= m +} + +// copy copies a desired state from another desiredState x. +func (d *desiredState) copy(x *desiredState) { + d.entries = append(d.entries[:0], x.entries...) + d.avoid = x.avoid +} + +// remove removes the desired registers for vid and returns them. +func (d *desiredState) remove(vid ID) [4]register { + for i := range d.entries { + if d.entries[i].ID == vid { + regs := d.entries[i].regs + d.entries[i] = d.entries[len(d.entries)-1] + d.entries = d.entries[:len(d.entries)-1] + return regs + } + } + return [4]register{noRegister, noRegister, noRegister, noRegister} +} + +// merge merges another desired state x into d. +func (d *desiredState) merge(x *desiredState) { + d.avoid |= x.avoid + // There should only be a few desired registers, so + // linear insert is ok. + for _, e := range x.entries { + d.addList(e.ID, e.regs) } - return m } -- cgit v1.3 From 60fd32a47fdffb95d3646c9fc75acc9beff67183 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 19 Apr 2016 08:31:04 -0700 Subject: cmd/compile: change the way we handle large map values mapaccess{1,2} returns a pointer to the value. When the key is not in the map, it returns a pointer to zeroed memory. Currently, for large map values we have a complicated scheme which dynamically allocates zeroed memory for this purpose. It is ugly code and requires an atomic.Load in a bunch of places we'd rather not have it. Switch to a scheme where callsites of mapaccess{1,2} which expect large return values pass in a pointer to zeroed memory that mapaccess can return if the key is not found. This avoids the atomic.Load on all map accesses with a few extra instructions only for the large value acccesses, plus a bit of bss space. There was a time (1.4 & 1.5?) where we did something like this but all the tricks to make the right size zero value were done by the linker. That scheme broke in the presence of dyamic linking. The scheme in this CL works even when dynamic linking. Fixes #12337 Change-Id: Ic2d0319944af33bbb59785938d9ab80958d1b4b1 Reviewed-on: https://go-review.googlesource.com/22221 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Michael Hudson-Doyle --- src/cmd/compile/internal/gc/builtin.go | 2 + src/cmd/compile/internal/gc/builtin/runtime.go | 2 + src/cmd/compile/internal/gc/go.go | 3 ++ src/cmd/compile/internal/gc/main.go | 5 ++ src/cmd/compile/internal/gc/obj.go | 5 ++ src/cmd/compile/internal/gc/reflect.go | 24 ++++++++++ src/cmd/compile/internal/gc/walk.go | 18 +++++-- src/runtime/hashmap.go | 65 +++++++++----------------- src/runtime/hashmap_fast.go | 33 +++++++------ src/runtime/map_test.go | 16 +++++++ 10 files changed, 110 insertions(+), 63 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index 411c7b8605..b593d11296 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -70,10 +70,12 @@ const runtimeimport = "" + "func @\"\".mapaccess1_fast32 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess1_fast64 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess1_faststr (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + + "func @\"\".mapaccess1_fat (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 *any, @\"\".zero·5 *byte) (@\"\".val·1 *any)\n" + "func @\"\".mapaccess2 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 *any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_fast32 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_fast64 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapaccess2_faststr (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + + "func @\"\".mapaccess2_fat (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 *any, @\"\".zero·6 *byte) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + "func @\"\".mapassign1 (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any, @\"\".val·4 *any)\n" + "func @\"\".mapiterinit (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".hiter·3 *any)\n" + "func @\"\".mapdelete (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any)\n" + diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go index 584368a144..e9316cb313 100644 --- a/src/cmd/compile/internal/gc/builtin/runtime.go +++ b/src/cmd/compile/internal/gc/builtin/runtime.go @@ -89,10 +89,12 @@ func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any) func mapaccess1_fast32(mapType *byte, hmap map[any]any, key any) (val *any) func mapaccess1_fast64(mapType *byte, hmap map[any]any, key any) (val *any) func mapaccess1_faststr(mapType *byte, hmap map[any]any, key any) (val *any) +func mapaccess1_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any) func mapaccess2(mapType *byte, hmap map[any]any, key *any) (val *any, pres bool) func mapaccess2_fast32(mapType *byte, hmap map[any]any, key any) (val *any, pres bool) func mapaccess2_fast64(mapType *byte, hmap map[any]any, key any) (val *any, pres bool) func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pres bool) +func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool) func mapassign1(mapType *byte, hmap map[any]any, key *any, val *any) func mapiterinit(mapType *byte, hmap map[any]any, hiter *any) func mapdelete(mapType *byte, hmap map[any]any, key *any) diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index af9aaf0dae..87b6121c8e 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -175,6 +175,9 @@ var unsafepkg *Pkg // package unsafe var trackpkg *Pkg // fake package for field tracking +var mappkg *Pkg // fake package for map zero value +var zerosize int64 + var Tptr EType // either TPTR32 or TPTR64 var myimportpath string diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 37e8a17886..2afd262fed 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -137,6 +137,11 @@ func Main() { typepkg = mkpkg("type") typepkg.Name = "type" + // pseudo-package used for map zero values + mappkg = mkpkg("go.map") + mappkg.Name = "go.map" + mappkg.Prefix = "go.map" + goroot = obj.Getgoroot() goos = obj.Getgoos() diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index b60f78f638..fab611fdb5 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -87,6 +87,11 @@ func dumpobj() { dumpglobls() externdcl = tmp + if zerosize > 0 { + zero := Pkglookup("zero", mappkg) + ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA) + } + dumpdata() obj.Writeobjdirect(Ctxt, bout.Writer) diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 5031045c64..4792f88abe 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -1689,3 +1689,27 @@ func (p *GCProg) emit(t *Type, offset int64) { } } } + +// zeroaddr returns the address of a symbol with at least +// size bytes of zeros. +func zeroaddr(size int64) *Node { + if size >= 1<<31 { + Fatalf("map value too big %d", size) + } + if zerosize < size { + zerosize = size + } + s := Pkglookup("zero", mappkg) + if s.Def == nil { + x := newname(s) + x.Type = Types[TUINT8] + x.Class = PEXTERN + x.Typecheck = 1 + s.Def = x + } + z := Nod(OADDR, s.Def, nil) + z.Type = Ptrto(Types[TUINT8]) + z.Addable = true + z.Typecheck = 1 + return z +} diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 82ac74ae33..8cce85de9a 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -864,8 +864,14 @@ opswitch: // a = *var a := n.List.First() - fn := mapfn(p, t) - r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key) + if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero + fn := mapfn(p, t) + r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key) + } else { + fn := mapfn("mapaccess2_fat", t) + z := zeroaddr(w) + r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z) + } // mapaccess2* returns a typed bool, but due to spec changes, // the boolean result of i.(T) is now untyped so we make it the @@ -1222,7 +1228,13 @@ opswitch: p = "mapaccess1" } - n = mkcall1(mapfn(p, t), Ptrto(t.Val()), init, typename(t), n.Left, key) + if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero + n = mkcall1(mapfn(p, t), Ptrto(t.Val()), init, typename(t), n.Left, key) + } else { + p = "mapaccess1_fat" + z := zeroaddr(w) + n = mkcall1(mapfn(p, t), Ptrto(t.Val()), init, typename(t), n.Left, key, z) + } n = Nod(OIND, n, nil) n.Type = t.Val() n.Typecheck = 1 diff --git a/src/runtime/hashmap.go b/src/runtime/hashmap.go index 4f5d03d983..ff59faab5d 100644 --- a/src/runtime/hashmap.go +++ b/src/runtime/hashmap.go @@ -236,9 +236,6 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap { throw("need padding in bucket (value)") } - // make sure zeroptr is large enough - mapzero(t.elem) - // find size parameter which will hold the requested # of elements B := uint8(0) for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<2GB zero on 32-bit machine - throw("map element too large") - } - } - atomic.StorepNoWB(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys)) - atomic.StorepNoWB(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize)) - } - unlock(&zerolock) -} +const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go +var zeroVal [maxZero]byte diff --git a/src/runtime/hashmap_fast.go b/src/runtime/hashmap_fast.go index 6a5484edee..8f9bb5a6fc 100644 --- a/src/runtime/hashmap_fast.go +++ b/src/runtime/hashmap_fast.go @@ -5,7 +5,6 @@ package runtime import ( - "runtime/internal/atomic" "runtime/internal/sys" "unsafe" ) @@ -16,7 +15,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -50,7 +49,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } } } @@ -61,7 +60,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -95,7 +94,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } } } @@ -106,7 +105,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -140,7 +139,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } } } @@ -151,7 +150,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -185,7 +184,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } } } @@ -196,7 +195,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -220,7 +219,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) } } - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } // long key, try not to do more comparisons than necessary keymaybe := uintptr(bucketCnt) @@ -258,7 +257,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)) } } - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } dohash: hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) @@ -290,7 +289,7 @@ dohash: } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)) + return unsafe.Pointer(&zeroVal[0]) } } } @@ -301,7 +300,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) } if h == nil || h.count == 0 { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } if h.flags&hashWriting != 0 { throw("concurrent map read and map write") @@ -325,7 +324,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true } } - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } // long key, try not to do more comparisons than necessary keymaybe := uintptr(bucketCnt) @@ -361,7 +360,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true } } - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } dohash: hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) @@ -393,7 +392,7 @@ dohash: } b = b.overflow(t) if b == nil { - return atomic.Loadp(unsafe.Pointer(&zeroptr)), false + return unsafe.Pointer(&zeroVal[0]), false } } } diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go index 9d2894cb6f..496f8e8868 100644 --- a/src/runtime/map_test.go +++ b/src/runtime/map_test.go @@ -317,6 +317,22 @@ func TestBigItems(t *testing.T) { } } +func TestMapHugeZero(t *testing.T) { + type T [4000]byte + m := map[int]T{} + x := m[0] + if x != (T{}) { + t.Errorf("map value not zero") + } + y, ok := m[0] + if ok { + t.Errorf("map value should be missing") + } + if y != (T{}) { + t.Errorf("map value not zero") + } +} + type empty struct { } -- cgit v1.3 From f4f1b30749be167b7c5ecb7c775c2acd8d32ae9e Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Wed, 20 Apr 2016 16:41:43 -0700 Subject: cmd/compile: accept old and new import format for builtin declarations Test with forceNewExport set to true (but continues to be disabled by default for now). Fixes #15322. Change-Id: I3b893db2206cbb79e66339284f22f4a0b20bf137 Reviewed-on: https://go-review.googlesource.com/22328 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/main.go | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2afd262fed..f6de58462e 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -643,11 +643,24 @@ func loadsys() { iota_ = -1000000 incannedimport = 1 - importpkg = Runtimepkg - parse_import(bufio.NewReader(strings.NewReader(runtimeimport)), nil) - - importpkg = unsafepkg - parse_import(bufio.NewReader(strings.NewReader(unsafeimport)), nil) + // The first byte in the binary export format is a 'c' or 'd' + // specifying the encoding format. We could just check that + // byte, but this is a perhaps more robust. Also, it is not + // speed-critical. + // TODO(gri) simplify once textual export format has gone + if strings.HasPrefix(runtimeimport, "package") { + // textual export format + importpkg = Runtimepkg + parse_import(bufio.NewReader(strings.NewReader(runtimeimport)), nil) + importpkg = unsafepkg + parse_import(bufio.NewReader(strings.NewReader(unsafeimport)), nil) + } else { + // binary export format + importpkg = Runtimepkg + Import(bufio.NewReader(strings.NewReader(runtimeimport))) + importpkg = unsafepkg + Import(bufio.NewReader(strings.NewReader(unsafeimport))) + } importpkg = nil incannedimport = 0 -- cgit v1.3 From 75b886ab790782f34945c0e1b0dee4189399ac9e Mon Sep 17 00:00:00 2001 From: Tal Shprecher Date: Wed, 20 Apr 2016 14:05:48 -0700 Subject: cmd/compile: reject embedded unsafe.Pointer values Fixes #14729 Change-Id: Ied819aa7b23e25de30aa8cde049c97297b4cab11 Reviewed-on: https://go-review.googlesource.com/22325 Reviewed-by: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/dcl.go | 2 +- src/cmd/compile/internal/gc/type.go | 5 +++++ src/cmd/compile/internal/gc/typecheck.go | 2 +- test/fixedbugs/issue14729.go | 14 ++++++++++++++ 4 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 test/fixedbugs/issue14729.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 0e4b5f6051..e303f11c09 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -719,7 +719,7 @@ func checkembeddedtype(t *Type) { } } - if t.IsPtr() { + if t.IsPtr() || t.IsUnsafePtr() { Yyerror("embedded type cannot be a pointer") } else if t.Etype == TFORW && t.ForwardType().Embedlineno == 0 { t.ForwardType().Embedlineno = lineno diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index 855b070af6..16399547c7 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -1111,6 +1111,11 @@ func (t *Type) IsPtr() bool { return t.Etype == TPTR32 || t.Etype == TPTR64 } +// IsUnsafePtr reports whether t is an unsafe pointer. +func (t *Type) IsUnsafePtr() bool { + return t.Etype == TUNSAFEPTR +} + // IsPtrShaped reports whether t is represented by a single machine pointer. // In addition to regular Go pointer types, this includes map, channel, and // function types and unsafe.Pointer. It does not include array or struct types diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index bf85819bce..e158c87611 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3557,7 +3557,7 @@ func copytype(n *Node, t *Type) { if embedlineno != 0 { lineno = embedlineno - if t.IsPtr() { + if t.IsPtr() || t.IsUnsafePtr() { Yyerror("embedded type cannot be a pointer") } } diff --git a/test/fixedbugs/issue14729.go b/test/fixedbugs/issue14729.go new file mode 100644 index 0000000000..88e01f9e16 --- /dev/null +++ b/test/fixedbugs/issue14729.go @@ -0,0 +1,14 @@ +// errorcheck + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 14729: structs cannot embed unsafe.Pointer per the spec. + +package main + +import "unsafe" + +type s struct { unsafe.Pointer } // ERROR "embedded type cannot be a pointer" +type s1 struct { p unsafe.Pointer } -- cgit v1.3 From 4938d7b5fc06bbd137619eddd494a8cca288eb25 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Wed, 20 Apr 2016 17:29:50 -0700 Subject: cmd/compile: fix dominator check in check() Ancestor comparison was the wrong way around, effectively disabling the def-must-dominate-use check. Update #15084 Change-Id: Ic56d674c5000569d2cc855bbb000a60eae517c7c Reviewed-on: https://go-review.googlesource.com/22330 Run-TryBot: Keith Randall Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/ssa/check.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index e4b8cb05f4..f1d3857f88 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -338,7 +338,7 @@ func checkFunc(f *Func) { // domCheck reports whether x dominates y (including x==y). func domCheck(f *Func, sdom sparseTree, x, y *Block) bool { - if !sdom.isAncestorEq(y, f.Entry) { + if !sdom.isAncestorEq(f.Entry, y) { // unreachable - ignore return true } -- cgit v1.3 From e48434887e568fa96800a0dff36ab45bc844ea04 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Wed, 20 Apr 2016 21:46:39 -0700 Subject: cmd/compile: fix ssa/check/on build Disable phielimValue from rewrite pass for now. Change-Id: I9f3bb1f527b50bc7a21cc6b7cb89f6136efd81e8 Reviewed-on: https://go-review.googlesource.com/22335 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/rewrite.go | 2 -- 1 file changed, 2 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index c2f8ceadaf..9c625825b9 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -40,8 +40,6 @@ func applyRewrite(f *Func, rb func(*Block) bool, rv func(*Value, *Config) bool) } curb = nil for _, v := range b.Values { - change = phielimValue(v) || change - // Eliminate copy inputs. // If any copy input becomes unused, mark it // as invalid and discard its argument. Repeat -- cgit v1.3 From 7c6b48ffba9e0ea8ed846d194fe30189863f17f0 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Mon, 18 Apr 2016 12:21:51 -0400 Subject: cmd/compile/internal/arm: fix comparison & conditional branch for SSA on ARM Progress on SSA for ARM. Still not complete. Now Fibonacci function compiles and runs correctly. The old backend swaps the operands for CMP instruction. This CL does the same on SSA backend, and uses conditional branch accordingly. Updates #15365. Change-Id: I117e17feb22f03d936608bd232f76970e4bbe21a Reviewed-on: https://go-review.googlesource.com/22187 Reviewed-by: Keith Randall --- src/cmd/compile/internal/arm/ssa.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index e6211d00b7..ca10f1c508 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -91,8 +91,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpARMCMP: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG - p.From.Reg = gc.SSARegNum(v.Args[0]) - p.Reg = gc.SSARegNum(v.Args[1]) + // Special layout in ARM assembly + // Comparing to x86, the operands of ARM's CMP are reversed. + p.From.Reg = gc.SSARegNum(v.Args[1]) + p.Reg = gc.SSARegNum(v.Args[0]) case ssa.OpARMMOVWload: p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM @@ -142,7 +144,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { case ssa.BlockRet: gc.Prog(obj.ARET) case ssa.BlockARMLT: - p := gc.Prog(arm.ABGE) + p := gc.Prog(arm.ABLT) p.To.Type = obj.TYPE_BRANCH s.Branches = append(s.Branches, gc.Branch{p, b.Succs[0]}) p = gc.Prog(obj.AJMP) -- cgit v1.3 From 508a424eedccfe77f64d50c9870988a8c15b46b1 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Mon, 18 Apr 2016 10:30:20 -0400 Subject: cmd/compile/internal/gc: fix return value offset for SSA backend on ARM Progress on SSA backend for ARM. Still not complete. It compiles a Fibonacci function, but the caller picked the return value from an incorrect offset. This CL adjusts it to match the stack frame layout for architectures with link register. Updates #15365. Change-Id: I01e03c3e95f5503a185e8ac2b6d9caf4faf3d014 Reviewed-on: https://go-review.googlesource.com/22186 Reviewed-by: Keith Randall Run-TryBot: Brad Fitzpatrick TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/ssa.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 11e362c116..4a33a3808e 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2668,7 +2668,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { return nil } fp := res.Field(0) - return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset, s.sp) + return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset+Ctxt.FixedFrameSize(), s.sp) } // etypesign returns the signed-ness of e, for integer/pointer etypes. -- cgit v1.3 From f8fc3710fd4c596adac57048f705a994f199df8c Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 21 Apr 2016 10:02:36 -0700 Subject: cmd/compile: handle mem copies in amd64 backend Fixes noopt builder. Change-Id: If13373b2597f0fcc9b1b2f9c860f2bd043e43c6c Reviewed-on: https://go-review.googlesource.com/22338 Reviewed-by: Keith Randall --- src/cmd/compile/internal/amd64/ssa.go | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 723a2ddec5..21dbc6238c 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -667,6 +667,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Offset = v.AuxInt case ssa.OpCopy, ssa.OpAMD64MOVQconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy? + if v.Type.IsMemory() { + return + } x := gc.SSARegNum(v.Args[0]) y := gc.SSARegNum(v) if x != y { -- cgit v1.3 From 40f1d0ca9f978376f7db24de3737b58589c8542b Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 18 Apr 2016 14:02:08 -0700 Subject: cmd/compile: split TSLICE into separate Type kind Instead of using TARRAY for both arrays and slices, create a new TSLICE kind to handle slices. Also, get rid of the "DDDArray" distinction. While kinda ugly, it seems likely we'll need to defer evaluating the constant bounds expressions for golang.org/issue/13890. Passes toolstash/buildall. Change-Id: I8e45d4900e7df3a04cce59428ec8b38035d3cc3a Reviewed-on: https://go-review.googlesource.com/22329 Reviewed-by: Josh Bleecher Snyder Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/alg.go | 15 +--- src/cmd/compile/internal/gc/align.go | 38 ++++----- src/cmd/compile/internal/gc/bexport.go | 12 +-- src/cmd/compile/internal/gc/bimport.go | 18 ++--- src/cmd/compile/internal/gc/const.go | 5 +- src/cmd/compile/internal/gc/export.go | 4 +- src/cmd/compile/internal/gc/fmt.go | 7 +- src/cmd/compile/internal/gc/gen.go | 13 ++-- src/cmd/compile/internal/gc/gsubr.go | 2 +- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/plive.go | 21 +++-- src/cmd/compile/internal/gc/range.go | 4 +- src/cmd/compile/internal/gc/reflect.go | 65 +++++++--------- src/cmd/compile/internal/gc/sinit.go | 8 +- src/cmd/compile/internal/gc/sizeof_test.go | 1 - src/cmd/compile/internal/gc/ssa.go | 7 +- src/cmd/compile/internal/gc/subr.go | 12 +-- src/cmd/compile/internal/gc/type.go | 121 ++++++++++------------------- src/cmd/compile/internal/gc/typecheck.go | 50 +++++------- src/cmd/compile/internal/gc/universe.go | 7 +- src/cmd/compile/internal/gc/walk.go | 7 +- 21 files changed, 168 insertions(+), 251 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 6e85438610..136612d56f 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -127,11 +127,10 @@ func algtype1(t *Type) (AlgKind, *Type) { } return AINTER, nil - case TARRAY: - if t.IsSlice() { - return ANOEQ, t - } + case TSLICE: + return ANOEQ, t + case TARRAY: a, bad := algtype1(t.Elem()) switch a { case AMEM: @@ -219,10 +218,6 @@ func genhash(sym *Sym, t *Type) { Fatalf("genhash %v", t) case TARRAY: - if t.IsSlice() { - Fatalf("genhash %v", t) - } - // An array of pure memory would be handled by the // standard algorithm, so the element type must not be // pure memory. @@ -399,10 +394,6 @@ func geneq(sym *Sym, t *Type) { Fatalf("geneq %v", t) case TARRAY: - if t.IsSlice() { - Fatalf("geneq %v", t) - } - // An array of pure memory would be handled by the // standard memequal, so the element type must not be // pure memory. Even if we unrolled the range loop, diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index e43ed7b225..8123041318 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -238,29 +238,31 @@ func dowidth(t *Type) { if t.Elem() == nil { break } - if t.IsArray() { - dowidth(t.Elem()) - if t.Elem().Width != 0 { - cap := (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width) - if uint64(t.NumElem()) > cap { - Yyerror("type %v larger than address space", Tconv(t, FmtLong)) - } - } - - w = t.NumElem() * t.Elem().Width - t.Align = t.Elem().Align - } else if t.IsSlice() { - w = int64(sizeof_Array) - checkwidth(t.Elem()) - t.Align = uint8(Widthptr) - } else if t.isDDDArray() { + if t.isDDDArray() { if !t.Broke { Yyerror("use of [...] array outside of array literal") t.Broke = true } - } else { - Fatalf("dowidth %v", t) // probably [...]T + break + } + + dowidth(t.Elem()) + if t.Elem().Width != 0 { + cap := (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Elem().Width) + if uint64(t.NumElem()) > cap { + Yyerror("type %v larger than address space", Tconv(t, FmtLong)) + } } + w = t.NumElem() * t.Elem().Width + t.Align = t.Elem().Align + + case TSLICE: + if t.Elem() == nil { + break + } + w = int64(sizeof_Array) + checkwidth(t.Elem()) + t.Align = uint8(Widthptr) case TSTRUCT: if t.IsFuncArgStruct() { diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 90b4edff18..7aa6c9ce6f 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -629,12 +629,12 @@ func (p *exporter) typ(t *Type) { if t.isDDDArray() { Fatalf("array bounds should be known at export time: %v", t) } - if t.IsArray() { - p.tag(arrayTag) - p.int64(t.NumElem()) - } else { - p.tag(sliceTag) - } + p.tag(arrayTag) + p.int64(t.NumElem()) + p.typ(t.Elem()) + + case TSLICE: + p.tag(sliceTag) p.typ(t.Elem()) case TDDDFIELD: diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 6654345ead..ef89f9ad0a 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -369,18 +369,16 @@ func (p *importer) typ() *Type { dclcontext = savedContext - case arrayTag, sliceTag: + case arrayTag: t = p.newtyp(TARRAY) - var bound int64 - if i == arrayTag { - bound = p.int64() - } + bound := p.int64() elem := p.typ() - if i == arrayTag { - t.Extra = &ArrayType{Elem: elem, Bound: bound} - } else { - t.Extra = SliceType{Elem: elem} - } + t.Extra = &ArrayType{Elem: elem, Bound: bound} + + case sliceTag: + t = p.newtyp(TSLICE) + elem := p.typ() + t.Extra = SliceType{Elem: elem} case dddTag: t = p.newtyp(TDDDFIELD) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index c7fb4d97e5..c2ed0d31d8 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -284,9 +284,7 @@ func convlit1(n *Node, t *Type, explicit bool, reuse canReuseNode) *Node { return n case TARRAY: - if !t.IsSlice() { - goto bad - } + goto bad case TPTR32, TPTR64, @@ -294,6 +292,7 @@ func convlit1(n *Node, t *Type, explicit bool, reuse canReuseNode) *Node { TMAP, TCHAN, TFUNC, + TSLICE, TUNSAFEPTR: break diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index cfe192f3ba..1dd02aef1f 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -203,7 +203,7 @@ func reexportdep(n *Node) { t := n.Type switch t.Etype { - case TARRAY, TCHAN, TPTR32, TPTR64: + case TARRAY, TCHAN, TPTR32, TPTR64, TSLICE: if t.Sym == nil { t = t.Elem() } @@ -303,7 +303,7 @@ func dumpexporttype(t *Type) { case TMAP: dumpexporttype(t.Val()) dumpexporttype(t.Key()) - case TARRAY, TCHAN, TPTR32, TPTR64: + case TARRAY, TCHAN, TPTR32, TPTR64, TSLICE: dumpexporttype(t.Elem()) } diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 41d696574c..bfb031aac5 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -416,6 +416,7 @@ var etnames = []string{ TPTR64: "PTR64", TFUNC: "FUNC", TARRAY: "ARRAY", + TSLICE: "SLICE", TSTRUCT: "STRUCT", TCHAN: "CHAN", TMAP: "MAP", @@ -587,12 +588,12 @@ func typefmt(t *Type, flag FmtFlag) string { return "*" + t.Elem().String() case TARRAY: - if t.IsArray() { - return fmt.Sprintf("[%d]%v", t.NumElem(), t.Elem()) - } if t.isDDDArray() { return "[...]" + t.Elem().String() } + return fmt.Sprintf("[%d]%v", t.NumElem(), t.Elem()) + + case TSLICE: return "[]" + t.Elem().String() case TCHAN: diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index cc624cce7a..d16c4fa992 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -1031,7 +1031,7 @@ func componentgen_wb(nr, nl *Node, wb bool) bool { // Emit vardef if needed. if nl.Op == ONAME { switch nl.Type.Etype { - case TARRAY, TSTRING, TINTER, TSTRUCT: + case TARRAY, TSLICE, TSTRING, TINTER, TSTRUCT: Gvardef(nl) } } @@ -1204,13 +1204,12 @@ func visitComponents(t *Type, startOffset int64, f func(elem *Type, elemOffset i return f(Ptrto(Types[TUINT8]), startOffset) && f(Types[Simtype[TUINT]], startOffset+int64(Widthptr)) - case TARRAY: - if t.IsSlice() { - return f(Ptrto(t.Elem()), startOffset+int64(Array_array)) && - f(Types[Simtype[TUINT]], startOffset+int64(Array_nel)) && - f(Types[Simtype[TUINT]], startOffset+int64(Array_cap)) - } + case TSLICE: + return f(Ptrto(t.Elem()), startOffset+int64(Array_array)) && + f(Types[Simtype[TUINT]], startOffset+int64(Array_nel)) && + f(Types[Simtype[TUINT]], startOffset+int64(Array_cap)) + case TARRAY: // Short-circuit [1e6]struct{}. if t.Elem().Width == 0 { return true diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index f1316db8d8..bcfd3439a0 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -277,7 +277,7 @@ func gused(n *Node) { func Isfat(t *Type) bool { if t != nil { switch t.Etype { - case TSTRUCT, TARRAY, TSTRING, + case TSTRUCT, TARRAY, TSLICE, TSTRING, TINTER: // maybe remove later return true } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 3b83e3bcc0..2b9546f4f5 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -731,7 +731,7 @@ func orderstmt(n *Node, order *Order) { default: Fatalf("orderstmt range %v", n.Type) - case TARRAY: + case TARRAY, TSLICE: if n.List.Len() < 2 || isblank(n.List.Second()) { // for i := range x will only use x once, to compute len(x). // No need to copy it. diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 6e43d3133f..e04c8563b1 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -918,18 +918,17 @@ func onebitwalktype1(t *Type, xoffset *int64, bv Bvec) { bvset(bv, int32(*xoffset/int64(Widthptr)+1)) // pointer in second slot *xoffset += t.Width + case TSLICE: + // struct { byte *array; uintgo len; uintgo cap; } + if *xoffset&int64(Widthptr-1) != 0 { + Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) + } + bvset(bv, int32(*xoffset/int64(Widthptr))) // pointer in first slot (BitsPointer) + *xoffset += t.Width + case TARRAY: - if t.IsSlice() { - // struct { byte *array; uintgo len; uintgo cap; } - if *xoffset&int64(Widthptr-1) != 0 { - Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) - } - bvset(bv, int32(*xoffset/int64(Widthptr))) // pointer in first slot (BitsPointer) - *xoffset += t.Width - } else { - for i := int64(0); i < t.NumElem(); i++ { - onebitwalktype1(t.Elem(), xoffset, bv) - } + for i := int64(0); i < t.NumElem(); i++ { + onebitwalktype1(t.Elem(), xoffset, bv) } case TSTRUCT: diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 96d7a82972..9d3f79cdce 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -49,7 +49,7 @@ func typecheckrange(n *Node) { Yyerror("cannot range over %v", Nconv(n.Right, FmtLong)) goto out - case TARRAY: + case TARRAY, TSLICE: t1 = Types[TINT] t2 = t.Elem() @@ -164,7 +164,7 @@ func walkrange(n *Node) { default: Fatalf("walkrange") - case TARRAY: + case TARRAY, TSLICE: if memclrrange(n, v1, v2, a) { lineno = lno return diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 4792f88abe..ac36f912b6 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -623,7 +623,7 @@ func typePkg(t *Type) *Pkg { tsym := t.Sym if tsym == nil { switch t.Etype { - case TARRAY, TPTR32, TPTR64, TCHAN: + case TARRAY, TSLICE, TPTR32, TPTR64, TCHAN: if t.Elem() != nil { tsym = t.Elem().Sym } @@ -689,6 +689,7 @@ var kinds = []int{ TCHAN: obj.KindChan, TMAP: obj.KindMap, TARRAY: obj.KindArray, + TSLICE: obj.KindArray, TFUNC: obj.KindFunc, TCOMPLEX64: obj.KindComplex64, TCOMPLEX128: obj.KindComplex128, @@ -701,11 +702,10 @@ func haspointers(t *Type) bool { TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL: return false - case TARRAY: - if t.IsSlice() { - return true - } + case TSLICE: + return true + case TARRAY: at := t.Extra.(*ArrayType) if at.Haspointers != 0 { return at.Haspointers-1 != 0 @@ -764,11 +764,11 @@ func typeptrdata(t *Type) int64 { // struct { Type *type; void *data; } return 2 * int64(Widthptr) + case TSLICE: + // struct { byte *array; uintgo len; uintgo cap; } + return int64(Widthptr) + case TARRAY: - if t.IsSlice() { - // struct { byte *array; uintgo len; uintgo cap; } - return int64(Widthptr) - } // haspointers already eliminated t.NumElem() == 0. return (t.NumElem()-1)*t.Elem().Width + typeptrdata(t.Elem()) @@ -1007,9 +1007,6 @@ func isreflexive(t *Type) bool { return false case TARRAY: - if t.IsSlice() { - Fatalf("slice can't be a map key: %v", t) - } return isreflexive(t.Elem()) case TSTRUCT: @@ -1057,9 +1054,6 @@ func needkeyupdate(t *Type) bool { return true case TARRAY: - if t.IsSlice() { - Fatalf("slice can't be a map key: %v", t) - } return needkeyupdate(t.Elem()) case TSTRUCT: @@ -1127,28 +1121,26 @@ ok: ot = dextratype(s, ot, t, 0) case TARRAY: - if t.IsArray() { - // ../../../../runtime/type.go:/arrayType - s1 := dtypesym(t.Elem()) - t2 := typSlice(t.Elem()) - s2 := dtypesym(t2) - ot = dcommontype(s, ot, t) - ot = dsymptr(s, ot, s1, 0) - ot = dsymptr(s, ot, s2, 0) - ot = duintptr(s, ot, uint64(t.NumElem())) - } else { - // ../../../../runtime/type.go:/sliceType - s1 := dtypesym(t.Elem()) + // ../../../../runtime/type.go:/arrayType + s1 := dtypesym(t.Elem()) + t2 := typSlice(t.Elem()) + s2 := dtypesym(t2) + ot = dcommontype(s, ot, t) + ot = dsymptr(s, ot, s1, 0) + ot = dsymptr(s, ot, s2, 0) + ot = duintptr(s, ot, uint64(t.NumElem())) + ot = dextratype(s, ot, t, 0) - ot = dcommontype(s, ot, t) - ot = dsymptr(s, ot, s1, 0) - } + case TSLICE: + // ../../../../runtime/type.go:/sliceType + s1 := dtypesym(t.Elem()) + ot = dcommontype(s, ot, t) + ot = dsymptr(s, ot, s1, 0) ot = dextratype(s, ot, t, 0) - // ../../../../runtime/type.go:/chanType case TCHAN: + // ../../../../runtime/type.go:/chanType s1 := dtypesym(t.Elem()) - ot = dcommontype(s, ot, t) ot = dsymptr(s, ot, s1, 0) ot = duintptr(s, ot, uint64(t.ChanDir())) @@ -1326,7 +1318,7 @@ ok: // functions must return the existing type structure rather // than creating a new one. switch t.Etype { - case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSTRUCT: + case TPTR32, TPTR64, TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRUCT: keep = true } } @@ -1654,11 +1646,10 @@ func (p *GCProg) emit(t *Type, offset int64) { p.w.Ptr(offset / int64(Widthptr)) p.w.Ptr(offset/int64(Widthptr) + 1) + case TSLICE: + p.w.Ptr(offset / int64(Widthptr)) + case TARRAY: - if t.IsSlice() { - p.w.Ptr(offset / int64(Widthptr)) - return - } if t.NumElem() == 0 { // should have been handled by haspointers check above Fatalf("GCProg.emit: empty array") diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 5a3a4dbe7f..71c06eb0a0 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -1103,13 +1103,13 @@ func anylit(ctxt int, n *Node, var_ *Node, init *Nodes) { structlit(ctxt, 3, n, var_, init) case OARRAYLIT: - if t.Etype != TARRAY { - Fatalf("anylit: not array") - } if t.IsSlice() { slicelit(ctxt, n, var_, init) break } + if !t.IsArray() { + Fatalf("anylit: not array") + } if var_.isSimpleName() && n.List.Len() > 4 { if ctxt == 0 { @@ -1414,7 +1414,7 @@ func genAsInitNoCheck(n *Node, reportOnly bool) bool { } // nr is the array being converted to a slice - if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.IsSlice() { + if nr.Type == nil || !nr.Type.IsArray() { return false } diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go index f2b1461bc8..a01da13883 100644 --- a/src/cmd/compile/internal/gc/sizeof_test.go +++ b/src/cmd/compile/internal/gc/sizeof_test.go @@ -41,7 +41,6 @@ func TestSizeof(t *testing.T) { {ChanArgsType{}, 4, 8}, {PtrType{}, 4, 8}, {SliceType{}, 4, 8}, - {DDDArrayType{}, 4, 8}, } for _, tt := range tests { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 4a33a3808e..ad665fbfbc 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1138,7 +1138,7 @@ var opToSSA = map[opAndType]ssa.Op{ opAndType{OEQ, TINT64}: ssa.OpEq64, opAndType{OEQ, TUINT64}: ssa.OpEq64, opAndType{OEQ, TINTER}: ssa.OpEqInter, - opAndType{OEQ, TARRAY}: ssa.OpEqSlice, + opAndType{OEQ, TSLICE}: ssa.OpEqSlice, opAndType{OEQ, TFUNC}: ssa.OpEqPtr, opAndType{OEQ, TMAP}: ssa.OpEqPtr, opAndType{OEQ, TCHAN}: ssa.OpEqPtr, @@ -1158,7 +1158,7 @@ var opToSSA = map[opAndType]ssa.Op{ opAndType{ONE, TINT64}: ssa.OpNeq64, opAndType{ONE, TUINT64}: ssa.OpNeq64, opAndType{ONE, TINTER}: ssa.OpNeqInter, - opAndType{ONE, TARRAY}: ssa.OpNeqSlice, + opAndType{ONE, TSLICE}: ssa.OpNeqSlice, opAndType{ONE, TFUNC}: ssa.OpNeqPtr, opAndType{ONE, TMAP}: ssa.OpNeqPtr, opAndType{ONE, TCHAN}: ssa.OpNeqPtr, @@ -2871,9 +2871,6 @@ func canSSAType(t *Type) bool { } switch t.Etype { case TARRAY: - if t.IsSlice() { - return true - } // We can't do arrays because dynamic indexing is // not supported on SSA variables. // TODO: maybe allow if length is <=1? All indexes diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 51a78317f2..cb0c86ee81 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -594,6 +594,7 @@ func methtype(t *Type, mustname int) *Type { case TSTRUCT, TARRAY, + TSLICE, TMAP, TCHAN, TSTRING, @@ -641,7 +642,7 @@ func eqtype1(t1, t2 *Type, assumedEqual map[typePair]struct{}) bool { if t1 == t2 { return true } - if t1 == nil || t2 == nil || t1.Etype != t2.Etype { + if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke || t2.Broke { return false } if t1.Sym != nil || t2.Sym != nil { @@ -836,18 +837,13 @@ func assignop(src *Type, dst *Type, why *string) Op { // 5. src is the predeclared identifier nil and dst is a nillable type. if src.Etype == TNIL { switch dst.Etype { - case TARRAY: - if !dst.IsSlice() { - break - } - fallthrough - case TPTR32, TPTR64, TFUNC, TMAP, TCHAN, - TINTER: + TINTER, + TSLICE: return OCONVNOP } } diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index 16399547c7..baac282c0a 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -44,6 +44,7 @@ const ( TPTR64 TFUNC + TSLICE TARRAY TSTRUCT TCHAN @@ -70,11 +71,6 @@ const ( NTYPE ) -const ( - sliceBound = -1 // slices have Bound=sliceBound - dddBound = -100 // arrays declared as [...]T start life with Bound=dddBound -) - // ChanDir is whether a channel can send, receive, or both. type ChanDir uint8 @@ -137,7 +133,8 @@ type Type struct { // TCHANARGS: ChanArgsType // TCHAN: *ChanType // TPTR32, TPTR64: PtrType - // TARRAY: *ArrayType, SliceType, or DDDArrayType + // TARRAY: *ArrayType + // TSLICE: SliceType Extra interface{} // Width is the width of this Type in bytes. @@ -273,10 +270,10 @@ func (t *Type) ChanType() *ChanType { return t.Extra.(*ChanType) } -// ArrayType contains Type fields specific to array types with known lengths. +// ArrayType contains Type fields specific to array types. type ArrayType struct { Elem *Type // element type - Bound int64 // number of elements; always >= 0; do not use with sliceBound or dddBound + Bound int64 // number of elements; <0 if unknown yet Haspointers uint8 // 0 unknown, 1 no, 2 yes } @@ -285,11 +282,6 @@ type SliceType struct { Elem *Type // element type } -// DDDArrayType contains Type fields specific to ddd array types. -type DDDArrayType struct { - Elem *Type // element type -} - // A Field represents a field in a struct or a method in an interface or // associated with a named type. type Field struct { @@ -399,6 +391,9 @@ func typ(et EType) *Type { // typArray returns a new fixed-length array Type. func typArray(elem *Type, bound int64) *Type { + if bound < 0 { + Fatalf("typArray: invalid bound %v", bound) + } t := typ(TARRAY) t.Extra = &ArrayType{Elem: elem, Bound: bound} return t @@ -406,7 +401,7 @@ func typArray(elem *Type, bound int64) *Type { // typSlice returns a new slice Type. func typSlice(elem *Type) *Type { - t := typ(TARRAY) + t := typ(TSLICE) t.Extra = SliceType{Elem: elem} return t } @@ -414,7 +409,7 @@ func typSlice(elem *Type) *Type { // typDDDArray returns a new [...]T array Type. func typDDDArray(elem *Type) *Type { t := typ(TARRAY) - t.Extra = DDDArrayType{Elem: elem} + t.Extra = &ArrayType{Elem: elem, Bound: -1} return t } @@ -519,16 +514,14 @@ func substAny(t *Type, types *[]*Type) *Type { elem := substAny(t.Elem(), types) if elem != t.Elem() { t = t.Copy() - switch x := t.Extra.(type) { - case *ArrayType: - x.Elem = elem - case SliceType: - t.Extra = SliceType{Elem: elem} - case DDDArrayType: - t.Extra = DDDArrayType{Elem: elem} - default: - Fatalf("substAny bad array elem type %T %v", x, t) - } + t.Extra.(*ArrayType).Elem = elem + } + + case TSLICE: + elem := substAny(t.Elem(), types) + if elem != t.Elem() { + t = t.Copy() + t.Extra = SliceType{Elem: elem} } case TCHAN: @@ -616,10 +609,8 @@ func (t *Type) Copy() *Type { x := *t.Extra.(*ChanType) nt.Extra = &x case TARRAY: - if arr, ok := t.Extra.(*ArrayType); ok { - x := *arr - nt.Extra = &x - } + x := *t.Extra.(*ArrayType) + nt.Extra = &x } // TODO(mdempsky): Find out why this is necessary and explain. if t.Orig == t { @@ -735,14 +726,9 @@ func (t *Type) Elem() *Type { case TPTR32, TPTR64: return t.Extra.(PtrType).Elem case TARRAY: - switch t := t.Extra.(type) { - case *ArrayType: - return t.Elem - case SliceType: - return t.Elem - case DDDArrayType: - return t.Elem - } + return t.Extra.(*ArrayType).Elem + case TSLICE: + return t.Extra.(SliceType).Elem case TCHAN: return t.Extra.(*ChanType).Elem } @@ -838,8 +824,7 @@ func (t *Type) isDDDArray() bool { if t.Etype != TARRAY { return false } - _, ok := t.Extra.(DDDArrayType) - return ok + return t.Extra.(*ArrayType).Bound < 0 } // ArgWidth returns the total aligned argument size for a function. @@ -982,8 +967,8 @@ func (t *Type) cmp(x *Type) ssa.Cmp { } return t.Val().cmp(x.Val()) - case TPTR32, TPTR64: - // No special cases for these two, they are handled + case TPTR32, TPTR64, TSLICE: + // No special cases for these, they are handled // by the general code after the switch. case TSTRUCT: @@ -1068,7 +1053,7 @@ func (t *Type) cmp(x *Type) ssa.Cmp { panic(e) } - // Common element type comparison for TARRAY, TCHAN, TPTR32, and TPTR64. + // Common element type comparison for TARRAY, TCHAN, TPTR32, TPTR64, and TSLICE. return t.Elem().cmp(x.Elem()) } @@ -1138,22 +1123,12 @@ func (t *Type) IsChan() bool { return t.Etype == TCHAN } -// TODO: Remove noinline when issue 15084 is resolved. -//go:noinline func (t *Type) IsSlice() bool { - if t.Etype != TARRAY { - return false - } - _, ok := t.Extra.(SliceType) - return ok + return t.Etype == TSLICE } func (t *Type) IsArray() bool { - if t.Etype != TARRAY { - return false - } - _, ok := t.Extra.(*ArrayType) - return ok + return t.Etype == TARRAY } func (t *Type) IsStruct() bool { @@ -1193,41 +1168,23 @@ func (t *Type) FieldName(i int) string { func (t *Type) NumElem() int64 { t.wantEtype(TARRAY) - switch t := t.Extra.(type) { - case *ArrayType: - return t.Bound - case SliceType: - return sliceBound - case DDDArrayType: - return dddBound + at := t.Extra.(*ArrayType) + if at.Bound < 0 { + Fatalf("NumElem array %v does not have bound yet", t) } - Fatalf("NumElem on non-array %T %v", t.Extra, t) - return 0 + return at.Bound } // SetNumElem sets the number of elements in an array type. -// It should not be used if at all possible. -// Create a new array/slice/dddArray with typX instead. -// The only allowed uses are: -// * array -> slice as a hack to suppress extra error output -// * ddd array -> array -// TODO(josharian): figure out how to get rid of this entirely. +// The only allowed use is on array types created with typDDDArray. +// For other uses, create a new array with typArray instead. func (t *Type) SetNumElem(n int64) { t.wantEtype(TARRAY) - switch { - case n >= 0: - if !t.isDDDArray() { - Fatalf("SetNumElem non-ddd -> array %v", t) - } - t.Extra = &ArrayType{Elem: t.Elem(), Bound: n} - case n == sliceBound: - if !t.IsArray() { - Fatalf("SetNumElem non-array -> slice %v", t) - } - t.Extra = SliceType{Elem: t.Elem()} - default: - Fatalf("SetNumElem %d %v", n, t) + at := t.Extra.(*ArrayType) + if at.Bound >= 0 { + Fatalf("SetNumElem array %v already has bound %d", t, at.Bound) } + at.Bound = n } // ChanDir returns the direction of a channel type t. diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index e158c87611..7a8c65dc58 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -76,6 +76,7 @@ var _typekind = []string{ TCHAN: "chan", TMAP: "map", TARRAY: "array", + TSLICE: "slice", TFUNC: "func", TNIL: "nil", TIDEAL: "untyped number", @@ -997,7 +998,7 @@ OpSwitch: n.Type = nil return n - case TSTRING, TARRAY: + case TSTRING, TARRAY, TSLICE: n.Right = indexlit(n.Right) if t.IsString() { n.Type = bytetype @@ -1005,12 +1006,10 @@ OpSwitch: n.Type = t.Elem() } why := "string" - if t.Etype == TARRAY { - if t.IsArray() { - why = "array" - } else { - why = "slice" - } + if t.IsArray() { + why = "array" + } else if t.IsSlice() { + why = "slice" } if n.Right.Type != nil && !n.Right.Type.IsInteger() { @@ -1422,9 +1421,6 @@ OpSwitch: } case TARRAY: - if t.IsSlice() { - break - } if callrecv(l) { // has call or receive break } @@ -1795,13 +1791,7 @@ OpSwitch: n.Type = nil return n - case TARRAY: - if !t.IsSlice() { - Yyerror("cannot make type %v", t) - n.Type = nil - return n - } - + case TSLICE: if i >= len(args) { Yyerror("missing len argument to make(%v)", t) n.Type = nil @@ -2848,19 +2838,19 @@ func indexdup(n *Node, hash map[int64]*Node) { hash[v] = n } +// iscomptype reports whether type t is a composite literal type +// or a pointer to one. func iscomptype(t *Type) bool { + if t.IsPtr() { + t = t.Elem() + } + switch t.Etype { - case TARRAY, TSTRUCT, TMAP: + case TARRAY, TSLICE, TSTRUCT, TMAP: return true - - case TPTR32, TPTR64: - switch t.Elem().Etype { - case TARRAY, TSTRUCT, TMAP: - return true - } + default: + return false } - - return false } func pushtype(n *Node, t *Type) { @@ -2943,7 +2933,7 @@ func typecheckcomplit(n *Node) *Node { Yyerror("invalid type for composite literal: %v", t) n.Type = nil - case TARRAY: + case TARRAY, TSLICE: // Only allocate hash if there are some key/value pairs. var hash map[int64]*Node for _, n1 := range n.List.Slice() { @@ -2954,6 +2944,7 @@ func typecheckcomplit(n *Node) *Node { } length := int64(0) i := 0 + checkBounds := t.IsArray() && !t.isDDDArray() for i2, n2 := range n.List.Slice() { l := n2 setlineno(l) @@ -2979,11 +2970,10 @@ func typecheckcomplit(n *Node) *Node { i++ if int64(i) > length { length = int64(i) - if t.IsArray() && length > t.NumElem() { + if checkBounds && length > t.NumElem() { setlineno(l) Yyerror("array index %d out of bounds [0:%d]", length-1, t.NumElem()) - // suppress any further errors out of bounds errors for the same type by pretending it is a slice - t.SetNumElem(sliceBound) + checkBounds = false } } diff --git a/src/cmd/compile/internal/gc/universe.go b/src/cmd/compile/internal/gc/universe.go index 3330fbbab2..84df22502f 100644 --- a/src/cmd/compile/internal/gc/universe.go +++ b/src/cmd/compile/internal/gc/universe.go @@ -228,6 +228,7 @@ func typeinit() { okforcap[TARRAY] = true okforcap[TCHAN] = true + okforcap[TSLICE] = true okforconst[TBOOL] = true okforconst[TSTRING] = true @@ -235,6 +236,7 @@ func typeinit() { okforlen[TARRAY] = true okforlen[TCHAN] = true okforlen[TMAP] = true + okforlen[TSLICE] = true okforlen[TSTRING] = true okforeq[TPTR32] = true @@ -246,8 +248,9 @@ func typeinit() { okforeq[TBOOL] = true okforeq[TMAP] = true // nil only; refined in typecheck okforeq[TFUNC] = true // nil only; refined in typecheck - okforeq[TARRAY] = true // nil slice only; refined in typecheck - okforeq[TSTRUCT] = true // it's complicated; refined in typecheck + okforeq[TSLICE] = true // nil only; refined in typecheck + okforeq[TARRAY] = true // only if element type is comparable; refined in typecheck + okforeq[TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck okforcmp[TSTRING] = true diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 8cce85de9a..0e74365c76 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3125,12 +3125,7 @@ func walkcompare(n *Node, init *Nodes) *Node { default: return n - case TARRAY: - if t.IsSlice() { - return n - } - - case TSTRUCT: + case TARRAY, TSTRUCT: break } -- cgit v1.3 From 8ad8d7d87edf0aec3b56c2e2d0139bc12531d359 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 21 Apr 2016 13:58:22 -0700 Subject: cmd/compile: Use pre-regalloc value ID in lateSpillUse The cached copy's ID is sometimes outside the bounds of the orig array. There's no reason to start at the cached copy and work backwards to the original value. We already have the original value ID at all the callsites. Fixes noopt build Change-Id: I313508a1917e838a87e8cc83b2ef3c2e4a8db304 Reviewed-on: https://go-review.googlesource.com/22355 Run-TryBot: Keith Randall Reviewed-by: David Chase TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/ssa/regalloc.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 2ac684f121..65c25dfc5a 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -1540,9 +1540,9 @@ func (s *regAllocState) isLoopSpillCandidate(loop *loop, v *Value) bool { return s.values[v.ID].needReg && !s.values[v.ID].spillUsed && s.loopnest.b2l[v.Block.ID] == loop } -// lateSpillUse notes a late (after stack allocation) use of spill c +// lateSpillUse notes a late (after stack allocation) use of the spill of value with ID vid. // This will inhibit spill sinking. -func (s *regAllocState) lateSpillUse(c *Value) { +func (s *regAllocState) lateSpillUse(vid ID) { // TODO investigate why this is necessary. // It appears that an outside-the-loop use of // an otherwise sinkable spill makes the spill @@ -1551,10 +1551,7 @@ func (s *regAllocState) lateSpillUse(c *Value) { // true when isLoopSpillCandidate was called, yet // it was shuffled). Such shuffling cuts the amount // of spill sinking by more than half (in make.bash) - v := s.orig[c.ID] - if v != nil { - s.values[v.ID].spillUsedShuffle = true - } + s.values[vid].spillUsedShuffle = true } // shuffle fixes up all the merge edges (those going into blocks of indegree > 1). @@ -1729,7 +1726,7 @@ func (e *edgeState) process() { if _, isReg := loc.(*Register); isReg { c = e.p.NewValue1(c.Line, OpCopy, c.Type, c) } else { - e.s.lateSpillUse(c) + e.s.lateSpillUse(vid) c = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c) } e.set(r, vid, c, false) @@ -1818,7 +1815,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool { } } else { if dstReg { - e.s.lateSpillUse(c) + e.s.lateSpillUse(vid) x = e.p.NewValue1(c.Line, OpLoadReg, c.Type, c) } else { // mem->mem. Use temp register. @@ -1836,7 +1833,7 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value) bool { e.erase(loc) r := e.findRegFor(c.Type) - e.s.lateSpillUse(c) + e.s.lateSpillUse(vid) t := e.p.NewValue1(c.Line, OpLoadReg, c.Type, c) e.set(r, vid, t, false) x = e.p.NewValue1(c.Line, OpStoreReg, loc.(LocalSlot).Type, t) -- cgit v1.3 From 1492e7db059ea7903110b0725d5ced3134558e73 Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Thu, 7 Apr 2016 16:29:16 -0400 Subject: cmd/compile, etc: use nameOff for rtype string linux/amd64: cmd/go: -8KB (basically nothing) linux/amd64 PIE: cmd/go: -191KB (1.6%) jujud: -1.5MB (1.9%) Updates #6853 Fixes #15064 Change-Id: I0adbb95685e28be92e8548741df0e11daa0a9b5f Reviewed-on: https://go-review.googlesource.com/21777 Reviewed-by: Ian Lance Taylor --- src/cmd/compile/internal/gc/reflect.go | 58 +++++++++------ src/cmd/link/internal/ld/data.go | 2 +- src/cmd/link/internal/ld/decodesym.go | 35 +++++---- src/reflect/all_test.go | 40 +++++++++- src/reflect/export_test.go | 8 +- src/reflect/type.go | 129 +++++++++++++++++---------------- src/runtime/alg.go | 8 +- src/runtime/error.go | 2 +- src/runtime/heapdump.go | 2 +- src/runtime/iface.go | 24 +++--- src/runtime/mbitmap.go | 10 +-- src/runtime/mfinal.go | 10 +-- src/runtime/mprof.go | 2 +- src/runtime/type.go | 46 ++++++++---- 14 files changed, 231 insertions(+), 145 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index ac36f912b6..1643c2ce4b 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -788,14 +788,21 @@ func typeptrdata(t *Type) int64 { } } -// tflag is documented in ../../../../reflect/type.go. -const tflagUncommon = 1 - -// commonType -// ../../../../runtime/type.go:/commonType +// tflag is documented in reflect/type.go. +// +// tflag values must be kept in sync with copies in: +// cmd/compile/internal/gc/reflect.go +// cmd/link/internal/ld/decodesym.go +// reflect/type.go +// runtime/type.go +const ( + tflagUncommon = 1 << 0 + tflagExtraStar = 1 << 1 +) var dcommontype_algarray *Sym +// dcommontype dumps the contents of a reflect.rtype (runtime._type). func dcommontype(s *Sym, ot int, t *Type) int { if ot != 0 { Fatalf("dcommontype %d", ot) @@ -836,7 +843,8 @@ func dcommontype(s *Sym, ot int, t *Type) int { // kind uint8 // alg *typeAlg // gcdata *byte - // string *string + // str nameOff + // _ int32 // } ot = duintptr(s, ot, uint64(t.Width)) ot = duintptr(s, ot, uint64(ptrdata)) @@ -847,6 +855,26 @@ func dcommontype(s *Sym, ot int, t *Type) int { if uncommonSize(t) != 0 { tflag |= tflagUncommon } + + exported := false + p := Tconv(t, FmtLeft|FmtUnsigned) + // If we're writing out type T, + // we are very likely to write out type *T as well. + // Use the string "*T"[1:] for "T", so that the two + // share storage. This is a cheap way to reduce the + // amount of space taken up by reflect strings. + if !strings.HasPrefix(p, "*") { + p = "*" + p + tflag |= tflagExtraStar + if t.Sym != nil { + exported = exportname(t.Sym.Name) + } + } else { + if t.Elem() != nil && t.Elem().Sym != nil { + exported = exportname(t.Elem().Sym.Name) + } + } + ot = duint8(s, ot, tflag) // runtime (and common sense) expects alignment to be a power of two. @@ -882,21 +910,9 @@ func dcommontype(s *Sym, ot int, t *Type) int { } ot = dsymptr(s, ot, gcsym, 0) // gcdata - p := Tconv(t, FmtLeft|FmtUnsigned) - - // If we're writing out type T, - // we are very likely to write out type *T as well. - // Use the string "*T"[1:] for "T", so that the two - // share storage. This is a cheap way to reduce the - // amount of space taken up by reflect strings. - prefix := 0 - if !strings.HasPrefix(p, "*") { - p = "*" + p - prefix = 1 - } - _, symdata := stringsym(p) // string - ot = dsymptrLSym(Linksym(s), ot, symdata, prefix) - ot = duintxx(s, ot, uint64(len(p)-prefix), Widthint) + nsym := dname(p, "", nil, exported) + ot = dsymptrOffLSym(Linksym(s), ot, nsym, 0) + ot = duint32(s, ot, 0) return ot } diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 63caf9cf79..dbd5ad0b75 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -1832,7 +1832,7 @@ func dodataSect(symn int, syms []*LSym) (result []*LSym, maxAlign int32) { case obj.STYPELINK: // Sort typelinks by the rtype.string field so the reflect // package can binary search type links. - symsSort[i].name = string(decodetype_string(s.R[0].Sym)) + symsSort[i].name = string(decodetype_str(s.R[0].Sym)) } } diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index b1c55cf787..330aa6dc13 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -16,6 +16,18 @@ import ( // ../../runtime/type.go, or more specifically, with what // ../gc/reflect.c stuffs in these. +// tflag is documented in reflect/type.go. +// +// tflag values must be kept in sync with copies in: +// cmd/compile/internal/gc/reflect.go +// cmd/link/internal/ld/decodesym.go +// reflect/type.go +// runtime/type.go +const ( + tflagUncommon = 1 << 0 + tflagExtraStar = 1 << 1 +) + func decode_reloc(s *LSym, off int32) *Reloc { for i := range s.R { if s.R[i].Off == off { @@ -47,9 +59,9 @@ func decode_inuxi(p []byte, sz int) uint64 { } } -func commonsize() int { return 6*SysArch.PtrSize + 8 } // runtime._type -func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield -func uncommonSize() int { return 2 * SysArch.PtrSize } // runtime.uncommontype +func commonsize() int { return 4*SysArch.PtrSize + 8 + 8 } // runtime._type +func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield +func uncommonSize() int { return 2 * SysArch.PtrSize } // runtime.uncommontype // Type.commonType.kind func decodetype_kind(s *LSym) uint8 { @@ -73,7 +85,6 @@ func decodetype_ptrdata(s *LSym) int64 { // Type.commonType.tflag func decodetype_hasUncommon(s *LSym) bool { - const tflagUncommon = 1 // see ../../../../reflect/type.go:/^type.tflag return s.P[2*SysArch.PtrSize+4]&tflagUncommon != 0 } @@ -211,16 +222,13 @@ func decodetype_structfieldarrayoff(s *LSym, i int) int { return off } -// decodetype_string returns the contents of an rtype's string field. -func decodetype_string(s *LSym) []byte { - off := 4*SysArch.PtrSize + 8 - strlen := int64(decode_inuxi(s.P[off+SysArch.PtrSize:], SysArch.IntSize)) - - r := decode_reloc(s, int32(off)) - if r == nil { - return nil +// decodetype_str returns the contents of an rtype's str field (a nameOff). +func decodetype_str(s *LSym) string { + str := decodetype_name(s, 4*SysArch.PtrSize+8) + if s.P[2*SysArch.PtrSize+4]&tflagExtraStar != 0 { + return str[1:] } - return r.Sym.P[r.Add : r.Add+strlen] + return str } // decodetype_name decodes the name from a reflect.name. @@ -233,7 +241,6 @@ func decodetype_name(s *LSym, off int) string { data := r.Sym.P namelen := int(uint16(data[1]<<8) | uint16(data[2])) return string(data[3 : 3+namelen]) - } func decodetype_structfieldname(s *LSym, i int) string { diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 4dfae2743d..e88bc880e2 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -4175,12 +4175,12 @@ func TestStructOfExportRules(t *testing.T) { }, { field: StructField{Name: "", Type: TypeOf(ΦType{})}, - mustPanic: true, // TODO(sbinet): creating a struct with UTF-8 fields not supported + mustPanic: false, exported: true, }, { field: StructField{Name: "", Type: TypeOf(φType{})}, - mustPanic: true, // TODO(sbinet): creating a struct with UTF-8 fields not supported + mustPanic: false, exported: false, }, { @@ -5674,6 +5674,42 @@ func TestNames(t *testing.T) { } } +func TestExported(t *testing.T) { + type ΦExported struct{} + type φUnexported struct{} + type BigP *big + type P int + type p *P + type P2 p + type p3 p + + type exportTest struct { + v interface{} + want bool + } + exportTests := []exportTest{ + {D1{}, true}, + {(*D1)(nil), true}, + {big{}, false}, + {(*big)(nil), false}, + {(BigP)(nil), true}, + {(*BigP)(nil), true}, + {ΦExported{}, true}, + {φUnexported{}, false}, + {P(0), true}, + {(p)(nil), false}, + {(P2)(nil), true}, + {(p3)(nil), false}, + } + + for i, test := range exportTests { + typ := TypeOf(test.v) + if got := IsExported(typ); got != test.want { + t.Errorf("%d: %s exported=%v, want %v", i, typ.Name(), got, test.want) + } + } +} + type embed struct { EmbedWithUnexpMeth } diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index f527434f0d..00189f3353 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -51,7 +51,7 @@ func TypeLinks() []string { rodata := sections[i] for _, off := range offs { typ := (*rtype)(resolveTypeOff(unsafe.Pointer(rodata), off)) - r = append(r, typ.string) + r = append(r, typ.String()) } } return r @@ -103,3 +103,9 @@ type OtherPkgFields struct { OtherExported int otherUnexported int } + +func IsExported(t Type) bool { + typ := t.(*rtype) + n := typ.nameOff(typ.str) + return n.isExported() +} diff --git a/src/reflect/type.go b/src/reflect/type.go index 0cae69a79c..b1758e6913 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -242,6 +242,11 @@ const ( // tflag is used by an rtype to signal what extra type information is // available in the memory directly following the rtype value. +// +// tflag values must be kept in sync with copies in: +// cmd/compile/internal/gc/reflect.go +// cmd/link/internal/ld/decodesym.go +// runtime/type.go type tflag uint8 const ( @@ -256,7 +261,13 @@ const ( // u uncommonType // } // u := &(*tUncommon)(unsafe.Pointer(t)).u - tflagUncommon tflag = 1 + tflagUncommon tflag = 1 << 0 + + // tflagExtraStar means the name in the str field has an + // extraneous '*' prefix. This is because for most types T in + // a program, the type *T also exists and reusing the str data + // saves binary size. + tflagExtraStar tflag = 1 << 1 ) // rtype is the common implementation of most values. @@ -273,7 +284,8 @@ type rtype struct { kind uint8 // enumeration for C alg *typeAlg // algorithm table gcdata *byte // garbage collection data - string string // string form; unnecessary but undeniably useful + str nameOff // string form + _ int32 // unused; keeps rtype always a multiple of ptrSize } // a copy of runtime.typeAlg @@ -420,6 +432,9 @@ type structType struct { // If the import path follows, then 4 bytes at the end of // the data form a nameOff. The import path is only set for concrete // methods that are defined in a different package than their type. +// +// If a name starts with "*", then the exported bit represents +// whether the pointed to type is exported. type name struct { bytes *byte } @@ -724,7 +739,13 @@ func (t *rtype) uncommon() *uncommonType { } } -func (t *rtype) String() string { return t.string } +func (t *rtype) String() string { + s := t.nameOff(t.str).name() + if t.tflag&tflagExtraStar != 0 { + return s[1:] + } + return s +} func (t *rtype) Size() uintptr { return t.size } @@ -833,33 +854,34 @@ func hasPrefix(s, prefix string) bool { } func (t *rtype) Name() string { - if hasPrefix(t.string, "map[") { + s := t.String() + if hasPrefix(s, "map[") { return "" } - if hasPrefix(t.string, "struct {") { + if hasPrefix(s, "struct {") { return "" } - if hasPrefix(t.string, "chan ") { + if hasPrefix(s, "chan ") { return "" } - if hasPrefix(t.string, "chan<-") { + if hasPrefix(s, "chan<-") { return "" } - if hasPrefix(t.string, "func(") { + if hasPrefix(s, "func(") { return "" } - switch t.string[0] { + switch s[0] { case '[', '*', '<': return "" } - i := len(t.string) - 1 + i := len(s) - 1 for i >= 0 { - if t.string[i] == '.' { + if s[i] == '.' { break } i-- } - return t.string[i+1:] + return s[i+1:] } func (t *rtype) ChanDir() ChanDir { @@ -1391,7 +1413,7 @@ func (t *rtype) ptrTo() *rtype { } // Look in known types. - s := "*" + t.string + s := "*" + t.String() for _, tt := range typesByString(s) { p = (*ptrType)(unsafe.Pointer(tt)) if p.elem == t { @@ -1408,7 +1430,7 @@ func (t *rtype) ptrTo() *rtype { prototype := *(**ptrType)(unsafe.Pointer(&iptr)) *p = *prototype - p.string = s + p.str = resolveReflectName(newName(s, "", "", false)) // For the type structures linked into the binary, the // compiler provides a good hash of the string. @@ -1645,7 +1667,7 @@ func haveIdenticalUnderlyingType(T, V *rtype) bool { // // and // -// t1.string < t2.string +// t1.String() < t2.String() // // Note that strings are not unique identifiers for types: // there can be more than one with a given string. @@ -1669,12 +1691,12 @@ func typesByString(s string) []*rtype { section := sections[offsI] // We are looking for the first index i where the string becomes >= s. - // This is a copy of sort.Search, with f(h) replaced by (*typ[h].string >= s). + // This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s). i, j := 0, len(offs) for i < j { h := i + (j-i)/2 // avoid overflow when computing h // i ≤ h < j - if !(rtypeOff(section, offs[h]).string >= s) { + if !(rtypeOff(section, offs[h]).String() >= s) { i = h + 1 // preserves f(i-1) == false } else { j = h // preserves f(j) == true @@ -1687,7 +1709,7 @@ func typesByString(s string) []*rtype { // to do a linear scan anyway. for j := i; j < len(offs); j++ { typ := rtypeOff(section, offs[j]) - if typ.string != s { + if typ.String() != s { break } ret = append(ret, typ) @@ -1783,11 +1805,11 @@ func ChanOf(dir ChanDir, t Type) Type { lookupCache.Unlock() panic("reflect.ChanOf: invalid dir") case SendDir: - s = "chan<- " + typ.string + s = "chan<- " + typ.String() case RecvDir: - s = "<-chan " + typ.string + s = "<-chan " + typ.String() case BothDir: - s = "chan " + typ.string + s = "chan " + typ.String() } for _, tt := range typesByString(s) { ch := (*chanType)(unsafe.Pointer(tt)) @@ -1802,7 +1824,7 @@ func ChanOf(dir ChanDir, t Type) Type { ch := new(chanType) *ch = *prototype ch.dir = uintptr(dir) - ch.string = s + ch.str = resolveReflectName(newName(s, "", "", false)) ch.hash = fnv1(typ.hash, 'c', byte(dir)) ch.elem = typ @@ -1832,7 +1854,7 @@ func MapOf(key, elem Type) Type { } // Look in known types. - s := "map[" + ktyp.string + "]" + etyp.string + s := "map[" + ktyp.String() + "]" + etyp.String() for _, tt := range typesByString(s) { mt := (*mapType)(unsafe.Pointer(tt)) if mt.key == ktyp && mt.elem == etyp { @@ -1844,7 +1866,7 @@ func MapOf(key, elem Type) Type { var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) mt := new(mapType) *mt = **(**mapType)(unsafe.Pointer(&imap)) - mt.string = s + mt.str = resolveReflectName(newName(s, "", "", false)) mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash)) mt.key = ktyp mt.elem = etyp @@ -2002,7 +2024,7 @@ func FuncOf(in, out []Type, variadic bool) Type { } // Populate the remaining fields of ft and store in cache. - ft.string = str + ft.str = resolveReflectName(newName(str, "", "", false)) funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype) return &ft.rtype @@ -2018,9 +2040,9 @@ func funcStr(ft *funcType) string { } if ft.IsVariadic() && i == int(ft.inCount)-1 { repr = append(repr, "..."...) - repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.string...) + repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...) } else { - repr = append(repr, t.string...) + repr = append(repr, t.String()...) } } repr = append(repr, ')') @@ -2034,7 +2056,7 @@ func funcStr(ft *funcType) string { if i > 0 { repr = append(repr, ", "...) } - repr = append(repr, t.string...) + repr = append(repr, t.String()...) } if len(out) > 1 { repr = append(repr, ')') @@ -2199,8 +2221,8 @@ func bucketOf(ktyp, etyp *rtype) *rtype { b.ptrdata = ptrdata b.kind = kind b.gcdata = gcdata - s := "bucket(" + ktyp.string + "," + etyp.string + ")" - b.string = s + s := "bucket(" + ktyp.String() + "," + etyp.String() + ")" + b.str = resolveReflectName(newName(s, "", "", false)) return b } @@ -2216,7 +2238,7 @@ func SliceOf(t Type) Type { } // Look in known types. - s := "[]" + typ.string + s := "[]" + typ.String() for _, tt := range typesByString(s) { slice := (*sliceType)(unsafe.Pointer(tt)) if slice.elem == typ { @@ -2229,7 +2251,7 @@ func SliceOf(t Type) Type { prototype := *(**sliceType)(unsafe.Pointer(&islice)) slice := new(sliceType) *slice = *prototype - slice.string = s + slice.str = resolveReflectName(newName(s, "", "", false)) slice.hash = fnv1(typ.hash, '[') slice.elem = typ @@ -2337,11 +2359,11 @@ func StructOf(fields []StructField) Type { // Embedded ** and *interface{} are illegal elem := ft.Elem() if k := elem.Kind(); k == Ptr || k == Interface { - panic("reflect.StructOf: illegal anonymous field type " + ft.string) + panic("reflect.StructOf: illegal anonymous field type " + ft.String()) } name = elem.String() } else { - name = ft.string + name = ft.String() } // TODO(sbinet) check for syntactically impossible type names? @@ -2463,7 +2485,7 @@ func StructOf(fields []StructField) Type { hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash)) - repr = append(repr, (" " + ft.string)...) + repr = append(repr, (" " + ft.String())...) if f.name.tagLen() > 0 { hash = fnv1(hash, []byte(f.name.tag())...) repr = append(repr, (" " + strconv.Quote(f.name.tag()))...) @@ -2579,7 +2601,7 @@ func StructOf(fields []StructField) Type { } } - typ.string = str + typ.str = resolveReflectName(newName(str, "", "", false)) typ.hash = hash typ.size = size typ.align = typalign @@ -2691,11 +2713,11 @@ func StructOf(fields []StructField) Type { func runtimeStructField(field StructField) structField { exported := field.PkgPath == "" if field.Name == "" { - t := field.Type + t := field.Type.(*rtype) if t.Kind() == Ptr { - t = t.Elem() + t = t.Elem().(*rtype) } - exported = isExported(t.Name()) + exported = t.nameOff(t.str).isExported() } else if exported { b0 := field.Name[0] if ('a' <= b0 && b0 <= 'z') || b0 == '_' { @@ -2711,25 +2733,6 @@ func runtimeStructField(field StructField) structField { } } -func isExported(s string) bool { - if s == "" { - return false - } - // FIXME(sbinet): handle utf8/runes (see https://golang.org/issue/15064) - // TODO: turn rtype.string into a reflect.name type, and put the exported - // bit on there which can be checked here with field.Type.(*rtype).string.isExported() - // When done, remove the documented limitation of StructOf. - r := s[0] - switch { - case 'A' <= r && r <= 'Z': - return true - case r == '_' || 'a' <= r && r <= 'z': - return false - default: - panic("reflect.StructOf: creating a struct with UTF-8 fields is not supported yet") - } -} - // typeptrdata returns the length in bytes of the prefix of t // containing pointer data. Anything after this offset is scalar data. // keep in sync with ../cmd/compile/internal/gc/reflect.go @@ -2779,7 +2782,7 @@ func ArrayOf(count int, elem Type) Type { } // Look in known types. - s := "[" + strconv.Itoa(count) + "]" + typ.string + s := "[" + strconv.Itoa(count) + "]" + typ.String() for _, tt := range typesByString(s) { array := (*arrayType)(unsafe.Pointer(tt)) if array.elem == typ { @@ -2792,7 +2795,7 @@ func ArrayOf(count int, elem Type) Type { prototype := *(**arrayType)(unsafe.Pointer(&iarray)) array := new(arrayType) *array = *prototype - array.string = s + array.str = resolveReflectName(newName(s, "", "", false)) array.hash = fnv1(typ.hash, '[') for n := uint32(count); n > 0; n >>= 8 { array.hash = fnv1(array.hash, byte(n)) @@ -3046,11 +3049,11 @@ func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uin var s string if rcvr != nil { - s = "methodargs(" + rcvr.string + ")(" + t.string + ")" + s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")" } else { - s = "funcargs(" + t.string + ")" + s = "funcargs(" + t.String() + ")" } - x.string = s + x.str = resolveReflectName(newName(s, "", "", false)) // cache result for future callers if layoutCache.m == nil { diff --git a/src/runtime/alg.go b/src/runtime/alg.go index 7aacc8cf9b..66943495b5 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -146,7 +146,7 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr { t := tab._type fn := t.alg.hash if fn == nil { - panic(errorString("hash of unhashable type " + t._string)) + panic(errorString("hash of unhashable type " + t.string())) } if isDirectIface(t) { return c1 * fn(unsafe.Pointer(&a.data), h^c0) @@ -163,7 +163,7 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { } fn := t.alg.hash if fn == nil { - panic(errorString("hash of unhashable type " + t._string)) + panic(errorString("hash of unhashable type " + t.string())) } if isDirectIface(t) { return c1 * fn(unsafe.Pointer(&a.data), h^c0) @@ -221,7 +221,7 @@ func efaceeq(x, y eface) bool { } eq := t.alg.equal if eq == nil { - panic(errorString("comparing uncomparable type " + t._string)) + panic(errorString("comparing uncomparable type " + t.string())) } if isDirectIface(t) { return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data))) @@ -239,7 +239,7 @@ func ifaceeq(x, y iface) bool { t := xtab._type eq := t.alg.equal if eq == nil { - panic(errorString("comparing uncomparable type " + t._string)) + panic(errorString("comparing uncomparable type " + t.string())) } if isDirectIface(t) { return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data))) diff --git a/src/runtime/error.go b/src/runtime/error.go index 15f6bdf014..0238c5e592 100644 --- a/src/runtime/error.go +++ b/src/runtime/error.go @@ -67,7 +67,7 @@ type stringer interface { func typestring(x interface{}) string { e := efaceOf(&x) - return e._type._string + return e._type.string() } // For calling from C. diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index adfd660847..1db29d7cb4 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -184,7 +184,7 @@ func dumptype(t *_type) { dumpint(uint64(uintptr(unsafe.Pointer(t)))) dumpint(uint64(t.size)) if x := t.uncommon(); x == nil || x.pkgpath.name() == "" { - dumpstr(t._string) + dumpstr(t.string()) } else { pkgpathstr := x.pkgpath.name() pkgpath := stringStructOf(&pkgpathstr) diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 352ff77465..007c1ed174 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -38,7 +38,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { return nil } name := inter.typ.nameOff(inter.mhdr[0].name) - panic(&TypeAssertionError{"", typ._string, inter.typ._string, name.name()}) + panic(&TypeAssertionError{"", typ.string(), inter.typ.string(), name.name()}) } h := itabhash(inter, typ) @@ -128,7 +128,7 @@ func additab(m *itab, locked, canfail bool) { if locked { unlock(&ifaceLock) } - panic(&TypeAssertionError{"", typ._string, inter.typ._string, iname}) + panic(&TypeAssertionError{"", typ.string(), inter.typ.string(), iname}) } m.bad = 1 break @@ -196,18 +196,18 @@ func convT2I(tab *itab, elem unsafe.Pointer, x unsafe.Pointer) (i iface) { func panicdottype(have, want, iface *_type) { haveString := "" if have != nil { - haveString = have._string + haveString = have.string() } - panic(&TypeAssertionError{iface._string, haveString, want._string, ""}) + panic(&TypeAssertionError{iface.string(), haveString, want.string(), ""}) } func assertI2T(t *_type, i iface, r unsafe.Pointer) { tab := i.tab if tab == nil { - panic(&TypeAssertionError{"", "", t._string, ""}) + panic(&TypeAssertionError{"", "", t.string(), ""}) } if tab._type != t { - panic(&TypeAssertionError{tab.inter.typ._string, tab._type._string, t._string, ""}) + panic(&TypeAssertionError{tab.inter.typ.string(), tab._type.string(), t.string(), ""}) } if r != nil { if isDirectIface(t) { @@ -238,10 +238,10 @@ func assertI2T2(t *_type, i iface, r unsafe.Pointer) bool { func assertE2T(t *_type, e eface, r unsafe.Pointer) { if e._type == nil { - panic(&TypeAssertionError{"", "", t._string, ""}) + panic(&TypeAssertionError{"", "", t.string(), ""}) } if e._type != t { - panic(&TypeAssertionError{"", e._type._string, t._string, ""}) + panic(&TypeAssertionError{"", e._type.string(), t.string(), ""}) } if r != nil { if isDirectIface(t) { @@ -285,7 +285,7 @@ func assertI2E(inter *interfacetype, i iface, r *eface) { tab := i.tab if tab == nil { // explicit conversions require non-nil interface value. - panic(&TypeAssertionError{"", "", inter.typ._string, ""}) + panic(&TypeAssertionError{"", "", inter.typ.string(), ""}) } r._type = tab._type r.data = i.data @@ -322,7 +322,7 @@ func assertI2I(inter *interfacetype, i iface, r *iface) { tab := i.tab if tab == nil { // explicit conversions require non-nil interface value. - panic(&TypeAssertionError{"", "", inter.typ._string, ""}) + panic(&TypeAssertionError{"", "", inter.typ.string(), ""}) } if tab.inter == inter { r.tab = tab @@ -361,7 +361,7 @@ func assertE2I(inter *interfacetype, e eface, r *iface) { t := e._type if t == nil { // explicit conversions require non-nil interface value. - panic(&TypeAssertionError{"", "", inter.typ._string, ""}) + panic(&TypeAssertionError{"", "", inter.typ.string(), ""}) } r.tab = getitab(inter, t, false) r.data = e.data @@ -402,7 +402,7 @@ func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) { func assertE2E(inter *interfacetype, e eface, r *eface) { if e._type == nil { // explicit conversions require non-nil interface value. - panic(&TypeAssertionError{"", "", inter.typ._string, ""}) + panic(&TypeAssertionError{"", "", inter.typ.string(), ""}) } *r = e } diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 685c29066b..f025ce1c68 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -461,11 +461,11 @@ func typeBitsBulkBarrier(typ *_type, p, size uintptr) { throw("runtime: typeBitsBulkBarrier without type") } if typ.size != size { - println("runtime: typeBitsBulkBarrier with type ", typ._string, " of size ", typ.size, " but memory size", size) + println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size) throw("runtime: invalid typeBitsBulkBarrier") } if typ.kind&kindGCProg != 0 { - println("runtime: typeBitsBulkBarrier with type ", typ._string, " with GC prog") + println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog") throw("runtime: invalid typeBitsBulkBarrier") } if !writeBarrier.needed { @@ -916,7 +916,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { } if nw == 0 { // No pointers! Caller was supposed to check. - println("runtime: invalid type ", typ._string) + println("runtime: invalid type ", typ.string()) throw("heapBitsSetType: called with non-pointer type") return } @@ -1100,7 +1100,7 @@ Phase4: if doubleCheck { end := heapBitsForAddr(x + size) if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) { - println("ended at wrong bitmap byte for", typ._string, "x", dataSize/typ.size) + println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size) print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n") print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n") h0 := heapBitsForAddr(x) @@ -1136,7 +1136,7 @@ Phase4: } } if have != want { - println("mismatch writing bits for", typ._string, "x", dataSize/typ.size) + println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size) print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n") print("kindGCProg=", typ.kind&kindGCProg != 0, "\n") print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n") diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index f698e72709..e81650d842 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -274,7 +274,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) { throw("runtime.SetFinalizer: first argument is nil") } if etyp.kind&kindMask != kindPtr { - throw("runtime.SetFinalizer: first argument is " + etyp._string + ", not pointer") + throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer") } ot := (*ptrtype)(unsafe.Pointer(etyp)) if ot.elem == nil { @@ -328,14 +328,14 @@ func SetFinalizer(obj interface{}, finalizer interface{}) { } if ftyp.kind&kindMask != kindFunc { - throw("runtime.SetFinalizer: second argument is " + ftyp._string + ", not a function") + throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function") } ft := (*functype)(unsafe.Pointer(ftyp)) if ft.dotdotdot() { - throw("runtime.SetFinalizer: cannot pass " + etyp._string + " to finalizer " + ftyp._string + " because dotdotdot") + throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string() + " because dotdotdot") } if ft.dotdotdot() || ft.inCount != 1 { - throw("runtime.SetFinalizer: cannot pass " + etyp._string + " to finalizer " + ftyp._string) + throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string()) } fint := ft.in()[0] switch { @@ -358,7 +358,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) { goto okarg } } - throw("runtime.SetFinalizer: cannot pass " + etyp._string + " to finalizer " + ftyp._string) + throw("runtime.SetFinalizer: cannot pass " + etyp.string() + " to finalizer " + ftyp.string()) okarg: // compute size needed for return parameters nret := uintptr(0) diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index f3b9b4bc78..c3e4e2cb87 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -624,7 +624,7 @@ func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) { if typ == nil { print("tracealloc(", p, ", ", hex(size), ")\n") } else { - print("tracealloc(", p, ", ", hex(size), ", ", typ._string, ")\n") + print("tracealloc(", p, ", ", hex(size), ", ", typ.string(), ")\n") } if gp.m.curg == nil || gp == gp.m.curg { goroutineheader(gp) diff --git a/src/runtime/type.go b/src/runtime/type.go index 31f7ff81b8..0b28fa6d43 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -8,10 +8,18 @@ package runtime import "unsafe" -// tflag is documented in ../reflect/type.go. +// tflag is documented in reflect/type.go. +// +// tflag values must be kept in sync with copies in: +// cmd/compile/internal/gc/reflect.go +// cmd/link/internal/ld/decodesym.go +// reflect/type.go type tflag uint8 -const tflagUncommon tflag = 1 +const ( + tflagUncommon tflag = 1 << 0 + tflagExtraStar tflag = 1 << 1 +) // Needs to be in sync with ../cmd/compile/internal/ld/decodesym.go:/^func.commonsize, // ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and @@ -28,8 +36,17 @@ type _type struct { // gcdata stores the GC type data for the garbage collector. // If the KindGCProg bit is set in kind, gcdata is a GC program. // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. - gcdata *byte - _string string + gcdata *byte + str nameOff + _ int32 +} + +func (t *_type) string() string { + s := t.nameOff(t.str).name() + if t.tflag&tflagExtraStar != 0 { + return s[1:] + } + return s } func (t *_type) uncommon() *uncommontype { @@ -99,33 +116,34 @@ func hasPrefix(s, prefix string) bool { } func (t *_type) name() string { - if hasPrefix(t._string, "map[") { + s := t.string() + if hasPrefix(s, "map[") { return "" } - if hasPrefix(t._string, "struct {") { + if hasPrefix(s, "struct {") { return "" } - if hasPrefix(t._string, "chan ") { + if hasPrefix(s, "chan ") { return "" } - if hasPrefix(t._string, "chan<-") { + if hasPrefix(s, "chan<-") { return "" } - if hasPrefix(t._string, "func(") { + if hasPrefix(s, "func(") { return "" } - switch t._string[0] { + switch s[0] { case '[', '*', '<': return "" } - i := len(t._string) - 1 + i := len(s) - 1 for i >= 0 { - if t._string[i] == '.' { + if s[i] == '.' { break } i-- } - return t._string[i+1:] + return s[i+1:] } // reflectOffs holds type offsets defined at run time by the reflect package. @@ -497,7 +515,7 @@ func typesEqual(t, v *_type) bool { if kind != v.kind&kindMask { return false } - if t._string != v._string { + if t.string() != v.string() { return false } ut := t.uncommon() -- cgit v1.3 From c165988360457553ccbfa4a09919de3262a4438a Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Thu, 7 Apr 2016 21:37:45 -0400 Subject: cmd/compile, etc: use nameOff in uncommonType linux/amd64 PIE: cmd/go: -62KB (0.5%) jujud: -550KB (0.7%) For #6853. Change-Id: Ieb67982abce5832e24b997506f0ae7108f747108 Reviewed-on: https://go-review.googlesource.com/22371 Run-TryBot: David Crawshaw TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/cmd/compile/internal/gc/reflect.go | 15 ++++++--------- src/cmd/link/internal/ld/decodesym.go | 6 +++--- src/cmd/link/internal/ld/symtab.go | 5 +++++ src/reflect/type.go | 21 +++++++++------------ src/runtime/heapdump.go | 4 ++-- src/runtime/iface.go | 2 +- src/runtime/type.go | 6 ++++-- 7 files changed, 30 insertions(+), 29 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 1643c2ce4b..3cd769fd2d 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -75,7 +75,7 @@ func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{}) if t.Sym == nil && len(methods(t)) == 0 { return 0 } - return 2 * Widthptr + return 4 + 2 + 2 } func makefield(name string, t *Type) *Field { @@ -463,6 +463,9 @@ func dgopkgpathLSym(s *obj.LSym, ot int, pkg *Pkg) int { // dgopkgpathOffLSym writes an offset relocation in s at offset ot to the pkg path symbol. func dgopkgpathOffLSym(s *obj.LSym, ot int, pkg *Pkg) int { + if pkg == nil { + return duintxxLSym(s, ot, 0, 4) + } if pkg == localpkg && myimportpath == "" { // If we don't know the full import path of the package being compiled // (i.e. -p was not passed on the compiler command line), emit a reference to @@ -597,12 +600,9 @@ func dextratype(s *Sym, ot int, t *Type, dataAdd int) int { dtypesym(a.type_) } - ot = dgopkgpath(s, ot, typePkg(t)) + ot = dgopkgpathOffLSym(Linksym(s), ot, typePkg(t)) - dataAdd += Widthptr + 2 + 2 - if Widthptr == 8 { - dataAdd += 4 - } + dataAdd += 4 + 2 + 2 mcount := len(m) if mcount != int(uint16(mcount)) { Fatalf("too many methods on %s: %d", t, mcount) @@ -613,9 +613,6 @@ func dextratype(s *Sym, ot int, t *Type, dataAdd int) int { ot = duint16(s, ot, uint16(mcount)) ot = duint16(s, ot, uint16(dataAdd)) - if Widthptr == 8 { - ot = duint32(s, ot, 0) // align for following pointers - } return ot } diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index 330aa6dc13..3ec488bbe8 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -61,7 +61,7 @@ func decode_inuxi(p []byte, sz int) uint64 { func commonsize() int { return 4*SysArch.PtrSize + 8 + 8 } // runtime._type func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield -func uncommonSize() int { return 2 * SysArch.PtrSize } // runtime.uncommontype +func uncommonSize() int { return 4 + 2 + 2 } // runtime.uncommontype // Type.commonType.kind func decodetype_kind(s *LSym) uint8 { @@ -361,8 +361,8 @@ func decodetype_methods(s *LSym) []methodsig { // just Sizeof(rtype) } - mcount := int(decode_inuxi(s.P[off+SysArch.PtrSize:], 2)) - moff := int(decode_inuxi(s.P[off+SysArch.PtrSize+2:], 2)) + mcount := int(decode_inuxi(s.P[off+4:], 2)) + moff := int(decode_inuxi(s.P[off+4+2:], 2)) off += moff // offset to array of reflect.method values const sizeofMethod = 4 * 4 // sizeof reflect.method in program return decode_methodsig(s, off, sizeofMethod, mcount) diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index acc238f698..94a6d0ab29 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -435,6 +435,11 @@ func symtab() { s.Outer = symtype } + case strings.HasPrefix(s.Name, "go.importpath.") && UseRelro(): + // Keep go.importpath symbols in the same section as types and + // names, as they can be referred to by a section offset. + s.Type = obj.STYPERELRO + case strings.HasPrefix(s.Name, "go.typelink."): ntypelinks++ s.Type = obj.STYPELINK diff --git a/src/reflect/type.go b/src/reflect/type.go index b1758e6913..ff6ff14c83 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -311,9 +311,9 @@ type method struct { // Using a pointer to this struct reduces the overall size required // to describe an unnamed type with no methods. type uncommonType struct { - pkgPath name // import path; empty for built-in types like int, string - mcount uint16 // number of methods - moff uint16 // offset from this uncommontype to [mcount]method + pkgPath nameOff // import path; empty for built-in types like int, string + mcount uint16 // number of methods + moff uint16 // offset from this uncommontype to [mcount]method } // ChanDir represents a channel type's direction. @@ -613,13 +613,6 @@ func (t *uncommonType) methods() []method { return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount] } -func (t *uncommonType) PkgPath() string { - if t == nil { - return "" - } - return t.pkgPath.name() -} - // resolveNameOff resolves a name offset from a base pointer. // The (*rtype).nameOff method is a convenience wrapper for this function. // Implemented in the runtime package. @@ -799,7 +792,7 @@ func (t *rtype) Method(i int) (m Method) { if !pname.isExported() { m.PkgPath = pname.pkgPath() if m.PkgPath == "" { - m.PkgPath = ut.pkgPath.name() + m.PkgPath = t.nameOff(ut.pkgPath).name() } fl |= flagStickyRO } @@ -846,7 +839,11 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) { } func (t *rtype) PkgPath() string { - return t.uncommon().PkgPath() + ut := t.uncommon() + if ut == nil { + return "" + } + return t.nameOff(ut.pkgPath).name() } func hasPrefix(s, prefix string) bool { diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 1db29d7cb4..0afab09095 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -183,10 +183,10 @@ func dumptype(t *_type) { dumpint(tagType) dumpint(uint64(uintptr(unsafe.Pointer(t)))) dumpint(uint64(t.size)) - if x := t.uncommon(); x == nil || x.pkgpath.name() == "" { + if x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == "" { dumpstr(t.string()) } else { - pkgpathstr := x.pkgpath.name() + pkgpathstr := t.nameOff(x.pkgpath).name() pkgpath := stringStructOf(&pkgpathstr) namestr := t.name() name := stringStructOf(&namestr) diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 007c1ed174..b57d1cc63c 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -112,7 +112,7 @@ func additab(m *itab, locked, canfail bool) { if typ.typeOff(t.mtyp) == itype && tname.name() == iname { pkgPath := tname.pkgPath() if pkgPath == "" { - pkgPath = x.pkgpath.name() + pkgPath = typ.nameOff(x.pkgpath).name() } if tname.isExported() || pkgPath == ipkg { if m != nil { diff --git a/src/runtime/type.go b/src/runtime/type.go index 0b28fa6d43..9e4c40553a 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -304,7 +304,7 @@ type method struct { } type uncommontype struct { - pkgpath name + pkgpath nameOff mcount uint16 // number of methods moff uint16 // offset from this uncommontype to [mcount]method } @@ -524,7 +524,9 @@ func typesEqual(t, v *_type) bool { if ut == nil || uv == nil { return false } - if ut.pkgpath.name() != uv.pkgpath.name() { + pkgpatht := t.nameOff(ut.pkgpath).name() + pkgpathv := v.nameOff(uv.pkgpath).name() + if pkgpatht != pkgpathv { return false } } -- cgit v1.3 From 3c1a4c1902711c16489ed0c3506df97439ffbd85 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 19 Apr 2016 21:06:53 -0700 Subject: cmd/compile: don't nilcheck newobject and return values from mapaccess{1,2} They are guaranteed to be non-nil, no point in inserting nil checks for them. Fixes #15390 Change-Id: I3b9a0f2319affc2139dcc446d0a56c6785ae5a86 Reviewed-on: https://go-review.googlesource.com/22291 Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/gc/cgen.go | 24 ++++++++++++++++++++---- src/cmd/compile/internal/gc/fmt.go | 6 ++++++ src/cmd/compile/internal/gc/ssa.go | 30 +++++++++++++++++------------- src/cmd/compile/internal/gc/syntax.go | 1 + src/cmd/compile/internal/gc/walk.go | 9 +++++---- src/cmd/compile/internal/ssa/nilcheck.go | 2 -- test/nilptr3.go | 21 +++++++++++++++++++++ test/nilptr3_ssa.go | 21 +++++++++++++++++++++ 8 files changed, 91 insertions(+), 23 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index 5c5bedaa31..a9393a6d9e 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -978,7 +978,11 @@ func Agenr(n *Node, a *Node, res *Node) { case OIND: Cgenr(n.Left, a, res) - Cgen_checknil(a) + if !n.Left.NonNil { + Cgen_checknil(a) + } else if Debug_checknil != 0 && n.Lineno > 1 { + Warnl(n.Lineno, "removed nil check") + } case OINDEX: if Ctxt.Arch.Family == sys.ARM { @@ -1587,7 +1591,11 @@ func Agen(n *Node, res *Node) { case OIND: Cgen(nl, res) - Cgen_checknil(res) + if !nl.NonNil { + Cgen_checknil(res) + } else if Debug_checknil != 0 && n.Lineno > 1 { + Warnl(n.Lineno, "removed nil check") + } case ODOT: Agen(nl, res) @@ -1597,7 +1605,11 @@ func Agen(n *Node, res *Node) { case ODOTPTR: Cgen(nl, res) - Cgen_checknil(res) + if !nl.NonNil { + Cgen_checknil(res) + } else if Debug_checknil != 0 && n.Lineno > 1 { + Warnl(n.Lineno, "removed nil check") + } if n.Xoffset != 0 { addOffset(res, n.Xoffset) } @@ -1658,7 +1670,11 @@ func Igen(n *Node, a *Node, res *Node) { case ODOTPTR: Cgenr(n.Left, a, res) - Cgen_checknil(a) + if !n.Left.NonNil { + Cgen_checknil(a) + } else if Debug_checknil != 0 && n.Lineno > 1 { + Warnl(n.Lineno, "removed nil check") + } a.Op = OINDREG a.Xoffset += n.Xoffset a.Type = n.Type diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index bfb031aac5..e5977c0905 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -319,6 +319,12 @@ func Jconv(n *Node, flag FmtFlag) string { if n.Assigned { buf.WriteString(" assigned") } + if n.Bounded { + buf.WriteString(" bounded") + } + if n.NonNil { + buf.WriteString(" nonnil") + } if c == 0 && n.Used { fmt.Fprintf(&buf, " used(%v)", n.Used) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index ad665fbfbc..218f720a61 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1938,8 +1938,7 @@ func (s *state) expr(n *Node) *ssa.Value { return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) case OIND: - p := s.expr(n.Left) - s.nilCheck(p) + p := s.exprPtr(n.Left, false, n.Lineno) return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) case ODOT: @@ -1952,8 +1951,7 @@ func (s *state) expr(n *Node) *ssa.Value { return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) case ODOTPTR: - p := s.expr(n.Left) - s.nilCheck(p) + p := s.exprPtr(n.Left, false, n.Lineno) p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p) return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()) @@ -2778,19 +2776,12 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value { return s.newValue2(ssa.OpPtrIndex, Ptrto(n.Left.Type.Elem()), a, i) } case OIND: - p := s.expr(n.Left) - if !bounded { - s.nilCheck(p) - } - return p + return s.exprPtr(n.Left, bounded, n.Lineno) case ODOT: p := s.addr(n.Left, bounded) return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) case ODOTPTR: - p := s.expr(n.Left) - if !bounded { - s.nilCheck(p) - } + p := s.exprPtr(n.Left, bounded, n.Lineno) return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p) case OCLOSUREVAR: return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, @@ -2892,6 +2883,19 @@ func canSSAType(t *Type) bool { } } +// exprPtr evaluates n to a pointer and nil-checks it. +func (s *state) exprPtr(n *Node, bounded bool, lineno int32) *ssa.Value { + p := s.expr(n) + if bounded || n.NonNil { + if s.f.Config.Debug_checknil() && lineno > 1 { + s.f.Config.Warnl(lineno, "removed nil check") + } + return p + } + s.nilCheck(p) + return p +} + // nilCheck generates nil pointer checking code. // Starts a new block on return, unless nil checks are disabled. // Used only for automatically inserted nil checks, diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 2f3b98a8ef..8a675ac157 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -54,6 +54,7 @@ type Node struct { Addable bool // addressable Etype EType // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg, ChanDir for OTCHAN Bounded bool // bounds check unnecessary + NonNil bool // guaranteed to be non-nil Class Class // PPARAM, PAUTO, PEXTERN, etc Embedded uint8 // ODCLFIELD embedded type Colas bool // OAS resulting from := diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 0e74365c76..27ff045028 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -886,6 +886,7 @@ opswitch: if !isblank(a) { var_ := temp(Ptrto(t.Val())) var_.Typecheck = 1 + var_.NonNil = true // mapaccess always returns a non-nil pointer n.List.SetIndex(0, var_) n = walkexpr(n, init) init.Append(n) @@ -895,8 +896,6 @@ opswitch: n = typecheck(n, Etop) n = walkexpr(n, init) - // TODO: ptr is always non-nil, so disable nil check for this OIND op. - case ODELETE: init.AppendNodes(&n.Ninit) map_ := n.List.First() @@ -1224,7 +1223,6 @@ opswitch: // standard version takes key by reference. // orderexpr made sure key is addressable. key = Nod(OADDR, n.Right, nil) - p = "mapaccess1" } @@ -1235,6 +1233,7 @@ opswitch: z := zeroaddr(w) n = mkcall1(mapfn(p, t), Ptrto(t.Val()), init, typename(t), n.Left, key, z) } + n.NonNil = true // mapaccess always returns a non-nil pointer n = Nod(OIND, n, nil) n.Type = t.Val() n.Typecheck = 1 @@ -2015,7 +2014,9 @@ func callnew(t *Type) *Node { dowidth(t) fn := syslook("newobject") fn = substArgTypes(fn, t) - return mkcall1(fn, Ptrto(t), nil, typename(t)) + v := mkcall1(fn, Ptrto(t), nil, typename(t)) + v.NonNil = true + return v } func iscallret(n *Node) bool { diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index 753e48aad5..62eb0c8ea6 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -4,8 +4,6 @@ package ssa -// TODO: return value from newobject/newarray is non-nil. - // nilcheckelim eliminates unnecessary nil checks. func nilcheckelim(f *Func) { // A nil check is redundant if the same nil check was successful in a diff --git a/test/nilptr3.go b/test/nilptr3.go index 817d2aec74..1bec833fe3 100644 --- a/test/nilptr3.go +++ b/test/nilptr3.go @@ -193,3 +193,24 @@ func f4(x *[10]int) { x = y _ = &x[9] // ERROR "removed repeated nil check" } + +func m1(m map[int][80]byte) byte { + v := m[3] // ERROR "removed nil check" + return v[5] +} +func m2(m map[int][800]byte) byte { + v := m[3] // ERROR "removed nil check" + return v[5] +} +func m3(m map[int][80]byte) (byte, bool) { + v, ok := m[3] // ERROR "removed nil check" + return v[5], ok +} +func m4(m map[int][800]byte) (byte, bool) { + v, ok := m[3] // ERROR "removed nil check" + return v[5], ok +} +func p1() byte { + p := new([100]byte) + return p[5] // ERROR "removed nil check" +} diff --git a/test/nilptr3_ssa.go b/test/nilptr3_ssa.go index ba60a64602..6eefbac7d8 100644 --- a/test/nilptr3_ssa.go +++ b/test/nilptr3_ssa.go @@ -207,3 +207,24 @@ func f6(p, q *T) { x := *p // ERROR "removed nil check" *q = x // ERROR "removed nil check" } + +func m1(m map[int][80]byte) byte { + v := m[3] // ERROR "removed nil check" + return v[5] +} +func m2(m map[int][800]byte) byte { + v := m[3] // ERROR "removed nil check" + return v[5] +} +func m3(m map[int][80]byte) (byte, bool) { + v, ok := m[3] // ERROR "removed nil check" + return v[5], ok +} +func m4(m map[int][800]byte) (byte, bool) { + v, ok := m[3] // ERROR "removed nil check" + return v[5], ok +} +func p1() byte { + p := new([100]byte) + return p[5] // ERROR "removed nil check" +} -- cgit v1.3 From caef4496fcdaca8dc5b86f60b07760e5434ca1f3 Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Fri, 22 Apr 2016 12:44:31 +0200 Subject: cmd/compile: convert some Phis into And8. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See discussion at [1]. True value must have a fixed non-zero representation meaning that a && b can be implemented as a & b. [1] https://groups.google.com/forum/#!topic/golang-dev/xV0vPuFP9Vg This change helps with m := a && b, but it's more common to see if a && b { do something } which is not handled. Change-Id: Ib6f9ff898a0a8c05d12466e2464e4fe781035394 Reviewed-on: https://go-review.googlesource.com/22313 Run-TryBot: Alexandru Moșoi TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/phiopt.go | 17 ++++++++++++++ test/phiopt.go | 43 ++++++++++++++++++++++++++++++---- 2 files changed, 55 insertions(+), 5 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go index 4efd497bdb..aae83bacf2 100644 --- a/src/cmd/compile/internal/ssa/phiopt.go +++ b/src/cmd/compile/internal/ssa/phiopt.go @@ -26,6 +26,7 @@ package ssa func phiopt(f *Func) { for _, b := range f.Blocks { if len(b.Preds) != 2 || len(b.Values) == 0 { + // TODO: handle more than 2 predecessors, e.g. a || b || c. continue } @@ -91,6 +92,22 @@ func phiopt(f *Func) { continue } } + + // Replaces + // if a { x = value } else { x = false } with x = a && value. + // Requires that value dominates x, meaning that regardless of a, + // value is always computed. This guarantees that the side effects + // of value are not seen if a is false. + if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 { + if tmp := v.Args[reverse]; f.sdom.isAncestorEq(tmp.Block, b) { + v.reset(OpAnd8) + v.SetArgs2(b0.Control, tmp) + if f.pass.debug > 0 { + f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) + } + continue + } + } } } diff --git a/test/phiopt.go b/test/phiopt.go index 37caab0b51..4347909752 100644 --- a/test/phiopt.go +++ b/test/phiopt.go @@ -53,23 +53,56 @@ func f4(a, b bool) bool { } //go:noinline -func f5(a int, b bool) bool { - x := b +func f5or(a int, b bool) bool { + var x bool if a == 0 { x = true + } else { + x = b } return x // ERROR "converted OpPhi to Or8$" } //go:noinline -func f6(a int, b bool) bool { +func f5and(a int, b bool) bool { + var x bool + if a == 0 { + x = b + } else { + x = false + } + return x // ERROR "converted OpPhi to And8$" +} + +//go:noinline +func f6or(a int, b bool) bool { x := b if a == 0 { - // f6 has side effects so the OpPhi should not be converted. - x = f6(a, b) + // f6or has side effects so the OpPhi should not be converted. + x = f6or(a, b) } return x } +//go:noinline +func f6and(a int, b bool) bool { + x := b + if a == 0 { + // f6and has side effects so the OpPhi should not be converted. + x = f6and(a, b) + } + return x +} + +//go:noinline +func f7or(a bool, b bool) bool { + return a || b // ERROR "converted OpPhi to Or8$" +} + +//go:noinline +func f7and(a bool, b bool) bool { + return a && b // ERROR "converted OpPhi to And8$" +} + func main() { } -- cgit v1.3 From 7879e9193b39e6455ae03f2baace9c41f6393ee4 Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Thu, 21 Apr 2016 10:11:33 +0200 Subject: cmd/compile: reenable phielim during rewrite MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the "optimization" that was causing the issue. For the following code the "optimization" was converting v to (OpCopy x) which is wrong because x doesn't dominate v. b1: y = ... First .. b3 b2: x = ... Goto b3 b3: v = phi x y ... use v ... That "optimization" is likely no longer needed because we now have a second opt pass with a dce in between which removes blocks of type First. For pkg/tools/linux_amd64/* the binary size drops from 82142886 to 82060034. Change-Id: I10428abbd8b32c5ca66fec3da2e6f3686dddbe31 Reviewed-on: https://go-review.googlesource.com/22312 Run-TryBot: Alexandru Moșoi TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/phielim.go | 6 +----- src/cmd/compile/internal/ssa/rewrite.go | 2 ++ 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/phielim.go b/src/cmd/compile/internal/ssa/phielim.go index ce3b5a199a..77013c6481 100644 --- a/src/cmd/compile/internal/ssa/phielim.go +++ b/src/cmd/compile/internal/ssa/phielim.go @@ -40,11 +40,7 @@ func phielimValue(v *Value) bool { // are not v itself, then the phi must remain. // Otherwise, we can replace it with a copy. var w *Value - for i, x := range v.Args { - if b := v.Block.Preds[i]; b.Kind == BlockFirst && b.Succs[1] == v.Block { - // This branch is never taken so we can just eliminate it. - continue - } + for _, x := range v.Args { if x == v { continue } diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 9c625825b9..c2f8ceadaf 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -40,6 +40,8 @@ func applyRewrite(f *Func, rb func(*Block) bool, rv func(*Value, *Config) bool) } curb = nil for _, v := range b.Values { + change = phielimValue(v) || change + // Eliminate copy inputs. // If any copy input becomes unused, mark it // as invalid and discard its argument. Repeat -- cgit v1.3 From d32229b3b1edd3d3b1e2dbb61bd6ae7cd8400d56 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 22 Apr 2016 12:15:08 -0400 Subject: cmd/compile: in a Tarjan algorithm, DFS should really be DFS Replaced incorrect recursion-free rendering of DFS with something that was correct. Enhanced test with all permutations of IF successors to ensure that all possible DFS traversals are exercised. Test is improved version of https://go-review.googlesource.com/#/c/22334 Update 15084. Change-Id: I6e944c41244e47fe5f568dfc2b360ff93b94079e Reviewed-on: https://go-review.googlesource.com/22347 Reviewed-by: Keith Randall Run-TryBot: David Chase --- src/cmd/compile/internal/ssa/dom.go | 39 +++++++++++++++------------ src/cmd/compile/internal/ssa/dom_test.go | 45 ++++++++++++++++++++++++++++++++ src/cmd/compile/internal/ssa/id.go | 2 +- 3 files changed, 68 insertions(+), 18 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/dom.go b/src/cmd/compile/internal/ssa/dom.go index fedaf602e4..86b170080a 100644 --- a/src/cmd/compile/internal/ssa/dom.go +++ b/src/cmd/compile/internal/ssa/dom.go @@ -5,11 +5,13 @@ package ssa // mark values +type markKind uint8 + const ( - notFound = 0 // block has not been discovered yet - notExplored = 1 // discovered and in queue, outedges not processed yet - explored = 2 // discovered and in queue, outedges processed - done = 3 // all done, in output ordering + notFound markKind = 0 // block has not been discovered yet + notExplored markKind = 1 // discovered and in queue, outedges not processed yet + explored markKind = 2 // discovered and in queue, outedges processed + done markKind = 3 // all done, in output ordering ) // This file contains code to compute the dominator tree @@ -18,7 +20,7 @@ const ( // postorder computes a postorder traversal ordering for the // basic blocks in f. Unreachable blocks will not appear. func postorder(f *Func) []*Block { - mark := make([]byte, f.NumBlocks()) + mark := make([]markKind, f.NumBlocks()) // result ordering var order []*Block @@ -96,7 +98,7 @@ func (cfg *Config) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g, h [ // dfs performs a depth first search over the blocks starting at the set of // blocks in the entries list (in arbitrary order). dfnum contains a mapping // from block id to an int indicating the order the block was reached or -// notFound if the block was not reached. order contains a mapping from dfnum +// 0 if the block was not reached. order contains a mapping from dfnum // to block. func (f *Func) dfs(entries []*Block, succFn linkedBlocks, dfnum, order, parent []ID) (fromID []*Block) { maxBlockID := entries[0].Func.NumBlocks() @@ -114,7 +116,7 @@ func (f *Func) dfs(entries []*Block, succFn linkedBlocks, dfnum, order, parent [ n := ID(0) s := make([]*Block, 0, 256) for _, entry := range entries { - if dfnum[entry.ID] != notFound { + if dfnum[entry.ID] != 0 { continue // already found from a previous entry } s = append(s, entry) @@ -122,18 +124,19 @@ func (f *Func) dfs(entries []*Block, succFn linkedBlocks, dfnum, order, parent [ for len(s) > 0 { node := s[len(s)-1] s = s[:len(s)-1] - + if dfnum[node.ID] != 0 { + continue // already found from a previous entry + } n++ + dfnum[node.ID] = n + order[n] = node.ID for _, w := range succFn(node) { // if it has a dfnum, we've already visited it - if dfnum[w.ID] == notFound { + if dfnum[w.ID] == 0 { s = append(s, w) - parent[w.ID] = node.ID - dfnum[w.ID] = notExplored + parent[w.ID] = node.ID // keep overwriting this till it is visited. } } - dfnum[node.ID] = n - order[n] = node.ID } } @@ -154,8 +157,6 @@ func dominators(f *Func) []*Block { // postDominators computes the post-dominator tree for f. func postDominators(f *Func) []*Block { - preds := func(b *Block) []*Block { return b.Preds } - succs := func(b *Block) []*Block { return b.Succs } if len(f.Blocks) == 0 { return nil @@ -170,6 +171,10 @@ func postDominators(f *Func) []*Block { } } + // TODO: postdominators is not really right, and it's not used yet + preds := func(b *Block) []*Block { return b.Preds } + succs := func(b *Block) []*Block { return b.Succs } + // infinite loop with no exit if exits == nil { return make([]*Block, f.NumBlocks()) @@ -214,7 +219,7 @@ func (f *Func) dominatorsLT(entries []*Block, predFn linkedBlocks, succFn linked continue } - if dfnum[w] == notFound { + if dfnum[w] == 0 { // skip unreachable node continue } @@ -236,7 +241,7 @@ func (f *Func) dominatorsLT(entries []*Block, predFn linkedBlocks, succFn linked var sp ID // calculate the semidominator of w for _, v := range predFn(fromID[w]) { - if dfnum[v.ID] == notFound { + if dfnum[v.ID] == 0 { // skip unreachable predecessor continue } diff --git a/src/cmd/compile/internal/ssa/dom_test.go b/src/cmd/compile/internal/ssa/dom_test.go index 9741edf331..19b898596c 100644 --- a/src/cmd/compile/internal/ssa/dom_test.go +++ b/src/cmd/compile/internal/ssa/dom_test.go @@ -420,3 +420,48 @@ func TestInfiniteLoop(t *testing.T) { postDoms := map[string]string{} verifyDominators(t, fun, postDominators, postDoms) } + +func TestDomTricky(t *testing.T) { + doms := map[string]string{ + "4": "1", + "2": "4", + "5": "4", + "11": "4", + "15": "4", // the incorrect answer is "5" + "10": "15", + "19": "15", + } + + if4 := [2]string{"2", "5"} + if5 := [2]string{"15", "11"} + if15 := [2]string{"19", "10"} + + for i := 0; i < 8; i++ { + a := 1 & i + b := 1 & i >> 1 + c := 1 & i >> 2 + + fun := Fun(testConfig(t), "1", + Bloc("1", + Valu("mem", OpInitMem, TypeMem, 0, nil), + Valu("p", OpConstBool, TypeBool, 1, nil), + Goto("4")), + Bloc("2", + Goto("11")), + Bloc("4", + If("p", if4[a], if4[1-a])), // 2, 5 + Bloc("5", + If("p", if5[b], if5[1-b])), //15, 11 + Bloc("10", + Exit("mem")), + Bloc("11", + Goto("15")), + Bloc("15", + If("p", if15[c], if15[1-c])), //19, 10 + Bloc("19", + Goto("10"))) + CheckFunc(fun.f) + verifyDominators(t, fun, dominators, doms) + verifyDominators(t, fun, dominatorsSimple, doms) + } +} diff --git a/src/cmd/compile/internal/ssa/id.go b/src/cmd/compile/internal/ssa/id.go index 367e687abf..725279e9fd 100644 --- a/src/cmd/compile/internal/ssa/id.go +++ b/src/cmd/compile/internal/ssa/id.go @@ -11,7 +11,7 @@ type idAlloc struct { last ID } -// get allocates an ID and returns it. +// get allocates an ID and returns it. IDs are always > 0. func (a *idAlloc) get() ID { x := a.last x++ -- cgit v1.3 From e05b9746ddc6e53864d1ab26fc172b09ccbe321c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Thu, 21 Apr 2016 20:58:35 -0700 Subject: cmd/compile: map TSLICE to obj.KindSlice directly Change-Id: Idab5f603c1743895b8f4edbcc55f7be83419a099 Reviewed-on: https://go-review.googlesource.com/22383 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick Reviewed-by: David Crawshaw --- src/cmd/compile/internal/gc/reflect.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 3cd769fd2d..49d55091ff 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -686,7 +686,7 @@ var kinds = []int{ TCHAN: obj.KindChan, TMAP: obj.KindMap, TARRAY: obj.KindArray, - TSLICE: obj.KindArray, + TSLICE: obj.KindSlice, TFUNC: obj.KindFunc, TCOMPLEX64: obj.KindComplex64, TCOMPLEX128: obj.KindComplex128, @@ -887,9 +887,6 @@ func dcommontype(s *Sym, ot int, t *Type) int { ot = duint8(s, ot, t.Align) // fieldAlign i = kinds[t.Etype] - if t.IsSlice() { - i = obj.KindSlice - } if !haspointers(t) { i |= obj.KindNoPointers } -- cgit v1.3 From 97360096e5e9fdea06be8c97f32bd83741f68adb Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 22 Apr 2016 12:27:29 -0700 Subject: cmd/compile: replace Ctype switches with type switches Instead of switching on Ctype (which internally uses a type switch) and then scattering lots of type assertions throughout the CTFOO case clauses, just use type switches directly on the underlying constant value. Passes toolstash/buildall. Change-Id: I9bc172cc67e5f391cddc15539907883b4010689e Reviewed-on: https://go-review.googlesource.com/22384 Run-TryBot: Matthew Dempsky Reviewed-by: Brad Fitzpatrick TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/const.go | 108 ++++++++++++++++------------------ src/cmd/compile/internal/gc/cplx.go | 5 +- src/cmd/compile/internal/gc/dcl.go | 14 ++--- src/cmd/compile/internal/gc/fmt.go | 45 +++++++------- src/cmd/compile/internal/gc/gsubr.go | 20 +++---- src/cmd/compile/internal/gc/obj.go | 21 ++++--- src/cmd/compile/internal/gc/parser.go | 19 +++--- src/cmd/compile/internal/gc/sinit.go | 30 ++++------ src/cmd/compile/internal/gc/ssa.go | 52 ++++++++-------- 9 files changed, 146 insertions(+), 168 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index c2ed0d31d8..68bcae3f4c 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -378,22 +378,22 @@ bad: } func copyval(v Val) Val { - switch v.Ctype() { - case CTINT, CTRUNE: + switch u := v.U.(type) { + case *Mpint: i := new(Mpint) - i.Set(v.U.(*Mpint)) - i.Rune = v.U.(*Mpint).Rune + i.Set(u) + i.Rune = u.Rune v.U = i - case CTFLT: + case *Mpflt: f := newMpflt() - f.Set(v.U.(*Mpflt)) + f.Set(u) v.U = f - case CTCPLX: + case *Mpcplx: c := new(Mpcplx) - c.Real.Set(&v.U.(*Mpcplx).Real) - c.Imag.Set(&v.U.(*Mpcplx).Imag) + c.Real.Set(&u.Real) + c.Imag.Set(&u.Imag) v.U = c } @@ -401,16 +401,16 @@ func copyval(v Val) Val { } func tocplx(v Val) Val { - switch v.Ctype() { - case CTINT, CTRUNE: + switch u := v.U.(type) { + case *Mpint: c := new(Mpcplx) - c.Real.SetInt(v.U.(*Mpint)) + c.Real.SetInt(u) c.Imag.SetFloat64(0.0) v.U = c - case CTFLT: + case *Mpflt: c := new(Mpcplx) - c.Real.Set(v.U.(*Mpflt)) + c.Real.Set(u) c.Imag.SetFloat64(0.0) v.U = c } @@ -419,17 +419,17 @@ func tocplx(v Val) Val { } func toflt(v Val) Val { - switch v.Ctype() { - case CTINT, CTRUNE: + switch u := v.U.(type) { + case *Mpint: f := newMpflt() - f.SetInt(v.U.(*Mpint)) + f.SetInt(u) v.U = f - case CTCPLX: + case *Mpcplx: f := newMpflt() - f.Set(&v.U.(*Mpcplx).Real) - if v.U.(*Mpcplx).Imag.CmpFloat64(0) != 0 { - Yyerror("constant %v%vi truncated to real", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp|FmtSign)) + f.Set(&u.Real) + if u.Imag.CmpFloat64(0) != 0 { + Yyerror("constant %v%vi truncated to real", Fconv(&u.Real, FmtSharp), Fconv(&u.Imag, FmtSharp|FmtSign)) } v.U = f } @@ -438,31 +438,33 @@ func toflt(v Val) Val { } func toint(v Val) Val { - switch v.Ctype() { - case CTRUNE: - i := new(Mpint) - i.Set(v.U.(*Mpint)) - v.U = i + switch u := v.U.(type) { + case *Mpint: + if u.Rune { + i := new(Mpint) + i.Set(u) + v.U = i + } - case CTFLT: + case *Mpflt: i := new(Mpint) - if f := v.U.(*Mpflt); i.SetFloat(f) < 0 { + if i.SetFloat(u) < 0 { msg := "constant %v truncated to integer" // provide better error message if SetFloat failed because f was too large - if f.Val.IsInt() { + if u.Val.IsInt() { msg = "constant %v overflows integer" } - Yyerror(msg, Fconv(f, FmtSharp)) + Yyerror(msg, Fconv(u, FmtSharp)) } v.U = i - case CTCPLX: + case *Mpcplx: i := new(Mpint) - if i.SetFloat(&v.U.(*Mpcplx).Real) < 0 { - Yyerror("constant %v%vi truncated to integer", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp|FmtSign)) + if i.SetFloat(&u.Real) < 0 { + Yyerror("constant %v%vi truncated to integer", Fconv(&u.Real, FmtSharp), Fconv(&u.Imag, FmtSharp|FmtSign)) } - if v.U.(*Mpcplx).Imag.CmpFloat64(0) != 0 { - Yyerror("constant %v%vi truncated to real", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp|FmtSign)) + if u.Imag.CmpFloat64(0) != 0 { + Yyerror("constant %v%vi truncated to real", Fconv(&u.Real, FmtSharp), Fconv(&u.Imag, FmtSharp|FmtSign)) } v.U = i } @@ -471,30 +473,25 @@ func toint(v Val) Val { } func doesoverflow(v Val, t *Type) bool { - switch v.Ctype() { - case CTINT, CTRUNE: + switch u := v.U.(type) { + case *Mpint: if !t.IsInteger() { Fatalf("overflow: %v integer constant", t) } - if v.U.(*Mpint).Cmp(Minintval[t.Etype]) < 0 || v.U.(*Mpint).Cmp(Maxintval[t.Etype]) > 0 { - return true - } + return u.Cmp(Minintval[t.Etype]) < 0 || u.Cmp(Maxintval[t.Etype]) > 0 - case CTFLT: + case *Mpflt: if !t.IsFloat() { Fatalf("overflow: %v floating-point constant", t) } - if v.U.(*Mpflt).Cmp(minfltval[t.Etype]) <= 0 || v.U.(*Mpflt).Cmp(maxfltval[t.Etype]) >= 0 { - return true - } + return u.Cmp(minfltval[t.Etype]) <= 0 || u.Cmp(maxfltval[t.Etype]) >= 0 - case CTCPLX: + case *Mpcplx: if !t.IsComplex() { Fatalf("overflow: %v complex constant", t) } - if v.U.(*Mpcplx).Real.Cmp(minfltval[t.Etype]) <= 0 || v.U.(*Mpcplx).Real.Cmp(maxfltval[t.Etype]) >= 0 || v.U.(*Mpcplx).Imag.Cmp(minfltval[t.Etype]) <= 0 || v.U.(*Mpcplx).Imag.Cmp(maxfltval[t.Etype]) >= 0 { - return true - } + return u.Real.Cmp(minfltval[t.Etype]) <= 0 || u.Real.Cmp(maxfltval[t.Etype]) >= 0 || + u.Imag.Cmp(minfltval[t.Etype]) <= 0 || u.Imag.Cmp(maxfltval[t.Etype]) >= 0 } return false @@ -518,21 +515,16 @@ func overflow(v Val, t *Type) { } func tostr(v Val) Val { - switch v.Ctype() { - case CTINT, CTRUNE: + switch u := v.U.(type) { + case *Mpint: var i int64 = 0xFFFD - if u := v.U.(*Mpint); u.Cmp(Minintval[TUINT32]) >= 0 && u.Cmp(Maxintval[TUINT32]) <= 0 { + if u.Cmp(Minintval[TUINT32]) >= 0 && u.Cmp(Maxintval[TUINT32]) <= 0 { i = u.Int64() } - v = Val{} v.U = string(i) - case CTFLT: - Yyerror("no float -> string") - fallthrough - - case CTNIL: - v = Val{} + case *NilVal: + // Can happen because of string([]byte(nil)). v.U = "" } diff --git a/src/cmd/compile/internal/gc/cplx.go b/src/cmd/compile/internal/gc/cplx.go index b0fa70b0ad..34fd0b96d9 100644 --- a/src/cmd/compile/internal/gc/cplx.go +++ b/src/cmd/compile/internal/gc/cplx.go @@ -89,8 +89,9 @@ func subnode(nr *Node, ni *Node, nc *Node) { t := Types[tc] if nc.Op == OLITERAL { - nodfconst(nr, t, &nc.Val().U.(*Mpcplx).Real) - nodfconst(ni, t, &nc.Val().U.(*Mpcplx).Imag) + u := nc.Val().U.(*Mpcplx) + nodfconst(nr, t, &u.Real) + nodfconst(ni, t, &u.Imag) return } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index e303f11c09..7f6e167488 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -755,17 +755,13 @@ func structfield(n *Node) *Field { f.Broke = true } - switch n.Val().Ctype() { - case CTSTR: - f.Note = new(string) - *f.Note = n.Val().U.(string) - + switch u := n.Val().U.(type) { + case string: + f.Note = &u default: Yyerror("field annotation must be string") - fallthrough - - case CTxxx: - f.Note = nil + case nil: + // noop } if n.Left != nil && n.Left.Op == ONAME { diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index e5977c0905..5f6edd1018 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -334,15 +334,16 @@ func Jconv(n *Node, flag FmtFlag) string { // Fmt "%V": Values func Vconv(v Val, flag FmtFlag) string { - switch v.Ctype() { - case CTINT: - if (flag&FmtSharp != 0) || fmtmode == FExp { - return Bconv(v.U.(*Mpint), FmtSharp) + switch u := v.U.(type) { + case *Mpint: + if !u.Rune { + if (flag&FmtSharp != 0) || fmtmode == FExp { + return Bconv(u, FmtSharp) + } + return Bconv(u, 0) } - return Bconv(v.U.(*Mpint), 0) - case CTRUNE: - x := v.U.(*Mpint).Int64() + x := u.Int64() if ' ' <= x && x < utf8.RuneSelf && x != '\\' && x != '\'' { return fmt.Sprintf("'%c'", int(x)) } @@ -352,39 +353,39 @@ func Vconv(v Val, flag FmtFlag) string { if 0 <= x && x <= utf8.MaxRune { return fmt.Sprintf("'\\U%08x'", uint64(x)) } - return fmt.Sprintf("('\\x00' + %v)", v.U.(*Mpint)) + return fmt.Sprintf("('\\x00' + %v)", u) - case CTFLT: + case *Mpflt: if (flag&FmtSharp != 0) || fmtmode == FExp { - return Fconv(v.U.(*Mpflt), 0) + return Fconv(u, 0) } - return Fconv(v.U.(*Mpflt), FmtSharp) + return Fconv(u, FmtSharp) - case CTCPLX: + case *Mpcplx: if (flag&FmtSharp != 0) || fmtmode == FExp { - return fmt.Sprintf("(%v+%vi)", &v.U.(*Mpcplx).Real, &v.U.(*Mpcplx).Imag) + return fmt.Sprintf("(%v+%vi)", &u.Real, &u.Imag) } if v.U.(*Mpcplx).Real.CmpFloat64(0) == 0 { - return fmt.Sprintf("%vi", Fconv(&v.U.(*Mpcplx).Imag, FmtSharp)) + return fmt.Sprintf("%vi", Fconv(&u.Imag, FmtSharp)) } if v.U.(*Mpcplx).Imag.CmpFloat64(0) == 0 { - return Fconv(&v.U.(*Mpcplx).Real, FmtSharp) + return Fconv(&u.Real, FmtSharp) } if v.U.(*Mpcplx).Imag.CmpFloat64(0) < 0 { - return fmt.Sprintf("(%v%vi)", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp)) + return fmt.Sprintf("(%v%vi)", Fconv(&u.Real, FmtSharp), Fconv(&u.Imag, FmtSharp)) } - return fmt.Sprintf("(%v+%vi)", Fconv(&v.U.(*Mpcplx).Real, FmtSharp), Fconv(&v.U.(*Mpcplx).Imag, FmtSharp)) + return fmt.Sprintf("(%v+%vi)", Fconv(&u.Real, FmtSharp), Fconv(&u.Imag, FmtSharp)) - case CTSTR: - return strconv.Quote(v.U.(string)) + case string: + return strconv.Quote(u) - case CTBOOL: - if v.U.(bool) { + case bool: + if u { return "true" } return "false" - case CTNIL: + case *NilVal: return "nil" } diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index bcfd3439a0..ff6fbe42fb 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -430,28 +430,28 @@ func Naddr(a *obj.Addr, n *Node) { if Thearch.LinkArch.Family == sys.I386 { a.Width = 0 } - switch n.Val().Ctype() { + switch u := n.Val().U.(type) { default: Fatalf("naddr: const %v", Tconv(n.Type, FmtLong)) - case CTFLT: + case *Mpflt: a.Type = obj.TYPE_FCONST - a.Val = n.Val().U.(*Mpflt).Float64() + a.Val = u.Float64() - case CTINT, CTRUNE: + case *Mpint: a.Sym = nil a.Type = obj.TYPE_CONST - a.Offset = n.Int64() + a.Offset = u.Int64() - case CTSTR: - datagostring(n.Val().U.(string), a) + case string: + datagostring(u, a) - case CTBOOL: + case bool: a.Sym = nil a.Type = obj.TYPE_CONST - a.Offset = int64(obj.Bool2int(n.Val().U.(bool))) + a.Offset = int64(obj.Bool2int(u)) - case CTNIL: + case *NilVal: a.Sym = nil a.Type = obj.TYPE_CONST a.Offset = 0 diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index fab611fdb5..378ac0d2c3 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -342,20 +342,23 @@ func gdata(nam *Node, nr *Node, wid int) { switch nr.Op { case OLITERAL: - switch nr.Val().Ctype() { - case CTCPLX: - gdatacomplex(nam, nr.Val().U.(*Mpcplx)) + switch u := nr.Val().U.(type) { + case *Mpcplx: + gdatacomplex(nam, u) - case CTSTR: - gdatastring(nam, nr.Val().U.(string)) + case string: + gdatastring(nam, u) - case CTINT, CTRUNE, CTBOOL: - i, _ := nr.IntLiteral() + case bool: + i := int64(obj.Bool2int(u)) Linksym(nam.Sym).WriteInt(Ctxt, nam.Xoffset, wid, i) - case CTFLT: + case *Mpint: + Linksym(nam.Sym).WriteInt(Ctxt, nam.Xoffset, wid, u.Int64()) + + case *Mpflt: s := Linksym(nam.Sym) - f := nr.Val().U.(*Mpflt).Float64() + f := u.Float64() switch nam.Type.Etype { case TFLOAT32: s.WriteFloat32(Ctxt, nam.Xoffset, float32(f)) diff --git a/src/cmd/compile/internal/gc/parser.go b/src/cmd/compile/internal/gc/parser.go index 6538877e68..ae4b497b7b 100644 --- a/src/cmd/compile/internal/gc/parser.go +++ b/src/cmd/compile/internal/gc/parser.go @@ -3246,17 +3246,14 @@ func (p *parser) hidden_literal() *Node { if p.tok == LLITERAL { ss := nodlit(p.val) p.next() - switch ss.Val().Ctype() { - case CTINT, CTRUNE: - ss.Val().U.(*Mpint).Neg() - break - case CTFLT: - ss.Val().U.(*Mpflt).Neg() - break - case CTCPLX: - ss.Val().U.(*Mpcplx).Real.Neg() - ss.Val().U.(*Mpcplx).Imag.Neg() - break + switch u := ss.Val().U.(type) { + case *Mpint: + u.Neg() + case *Mpflt: + u.Neg() + case *Mpcplx: + u.Real.Neg() + u.Imag.Neg() default: Yyerror("bad negated constant") } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 71c06eb0a0..5560415cab 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -1309,28 +1309,22 @@ func addvalue(p *InitPlan, xoffset int64, n *Node) { func iszero(n *Node) bool { switch n.Op { case OLITERAL: - switch n.Val().Ctype() { + switch u := n.Val().U.(type) { default: Dump("unexpected literal", n) Fatalf("iszero") - - case CTNIL: + case *NilVal: return true - - case CTSTR: - return n.Val().U.(string) == "" - - case CTBOOL: - return !n.Val().U.(bool) - - case CTINT, CTRUNE: - return n.Val().U.(*Mpint).CmpInt64(0) == 0 - - case CTFLT: - return n.Val().U.(*Mpflt).CmpFloat64(0) == 0 - - case CTCPLX: - return n.Val().U.(*Mpcplx).Real.CmpFloat64(0) == 0 && n.Val().U.(*Mpcplx).Imag.CmpFloat64(0) == 0 + case string: + return u == "" + case bool: + return !u + case *Mpint: + return u.CmpInt64(0) == 0 + case *Mpflt: + return u.CmpFloat64(0) == 0 + case *Mpcplx: + return u.Real.CmpFloat64(0) == 0 && u.Imag.CmpFloat64(0) == 0 } case OARRAYLIT: diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 218f720a61..f989ad0375 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1448,9 +1448,9 @@ func (s *state) expr(n *Node) *ssa.Value { addr := s.addr(n, false) return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem()) case OLITERAL: - switch n.Val().Ctype() { - case CTINT: - i := n.Int64() + switch u := n.Val().U.(type) { + case *Mpint: + i := u.Int64() switch n.Type.Size() { case 1: return s.constInt8(n.Type, int8(i)) @@ -1464,13 +1464,13 @@ func (s *state) expr(n *Node) *ssa.Value { s.Fatalf("bad integer size %d", n.Type.Size()) return nil } - case CTSTR: - if n.Val().U == "" { + case string: + if u == "" { return s.constEmptyString(n.Type) } - return s.entryNewValue0A(ssa.OpConstString, n.Type, n.Val().U) - case CTBOOL: - v := s.constBool(n.Val().U.(bool)) + return s.entryNewValue0A(ssa.OpConstString, n.Type, u) + case bool: + v := s.constBool(u) // For some reason the frontend gets the line numbers of // CTBOOL literals totally wrong. Fix it here by grabbing // the line number of the enclosing AST node. @@ -1478,7 +1478,7 @@ func (s *state) expr(n *Node) *ssa.Value { v.Line = s.line[len(s.line)-2] } return v - case CTNIL: + case *NilVal: t := n.Type switch { case t.IsSlice(): @@ -1488,36 +1488,30 @@ func (s *state) expr(n *Node) *ssa.Value { default: return s.constNil(t) } - case CTFLT: - f := n.Val().U.(*Mpflt) + case *Mpflt: switch n.Type.Size() { case 4: - return s.constFloat32(n.Type, f.Float32()) + return s.constFloat32(n.Type, u.Float32()) case 8: - return s.constFloat64(n.Type, f.Float64()) + return s.constFloat64(n.Type, u.Float64()) default: s.Fatalf("bad float size %d", n.Type.Size()) return nil } - case CTCPLX: - c := n.Val().U.(*Mpcplx) - r := &c.Real - i := &c.Imag + case *Mpcplx: + r := &u.Real + i := &u.Imag switch n.Type.Size() { case 8: - { - pt := Types[TFLOAT32] - return s.newValue2(ssa.OpComplexMake, n.Type, - s.constFloat32(pt, r.Float32()), - s.constFloat32(pt, i.Float32())) - } + pt := Types[TFLOAT32] + return s.newValue2(ssa.OpComplexMake, n.Type, + s.constFloat32(pt, r.Float32()), + s.constFloat32(pt, i.Float32())) case 16: - { - pt := Types[TFLOAT64] - return s.newValue2(ssa.OpComplexMake, n.Type, - s.constFloat64(pt, r.Float64()), - s.constFloat64(pt, i.Float64())) - } + pt := Types[TFLOAT64] + return s.newValue2(ssa.OpComplexMake, n.Type, + s.constFloat64(pt, r.Float64()), + s.constFloat64(pt, i.Float64())) default: s.Fatalf("bad float size %d", n.Type.Size()) return nil -- cgit v1.3 From 889c0a66fc7a43b23cc02ee42cfa17d221fce3c4 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Fri, 22 Apr 2016 14:50:20 -0700 Subject: cmd/compile: don't export pos info in new export format for now Exporting filenames as part of the position information can lead to different object files which breaks tests. Change-Id: Ia678ab64293ebf04bf83601e6ba72919d05762a4 Reviewed-on: https://go-review.googlesource.com/22385 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 15 +++++++++++++++ src/cmd/compile/internal/gc/bimport.go | 11 +++++++++-- src/go/internal/gcimporter/bimport.go | 11 +++++++++-- 3 files changed, 33 insertions(+), 4 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 7aa6c9ce6f..496491131a 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -112,6 +112,14 @@ import ( // (suspected) format errors, and whenever a change is made to the format. const debugFormat = false // default: false +// If posInfoFormat is set, position information (file, lineno) is written +// for each exported object, including methods and struct fields. Currently +// disabled because it may lead to different object files depending on which +// directory they are built under, which causes tests checking for hermetic +// builds to fail (e.g. TestCgoConsistentResults for cmd/go). +// TODO(gri) determine what to do here. +const posInfoFormat = false + // TODO(gri) remove eventually const forceNewExport = false // force new export format - DO NOT SUBMIT with this flag set @@ -160,6 +168,9 @@ func export(out *bufio.Writer, trace bool) int { } p.rawByte(format) + // posInfo exported or not? + p.bool(posInfoFormat) + // --- generic export data --- if p.trace { @@ -493,6 +504,10 @@ func (p *exporter) obj(sym *Sym) { } func (p *exporter) pos(n *Node) { + if !posInfoFormat { + return + } + var file string var line int if n != nil { diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index ef89f9ad0a..e05329bb12 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -30,8 +30,9 @@ type importer struct { funcList []*Node // nil entry means already declared // position encoding - prevFile string - prevLine int + posInfoFormat bool + prevFile string + prevLine int // debugging support debugFormat bool @@ -55,6 +56,8 @@ func Import(in *bufio.Reader) { Fatalf("importer: invalid encoding format in export data: got %q; want 'c' or 'd'", format) } + p.posInfoFormat = p.bool() + // --- generic export data --- if v := p.string(); v != exportVersion { @@ -279,6 +282,10 @@ func (p *importer) obj(tag int) { } func (p *importer) pos() { + if !p.posInfoFormat { + return + } + file := p.prevFile line := p.prevLine diff --git a/src/go/internal/gcimporter/bimport.go b/src/go/internal/gcimporter/bimport.go index d75e533e97..f2080ffe59 100644 --- a/src/go/internal/gcimporter/bimport.go +++ b/src/go/internal/gcimporter/bimport.go @@ -27,8 +27,9 @@ type importer struct { typList []types.Type // in order of appearance // position encoding - prevFile string - prevLine int + posInfoFormat bool + prevFile string + prevLine int // debugging support debugFormat bool @@ -57,6 +58,8 @@ func BImportData(imports map[string]*types.Package, data []byte, path string) (i return p.read, nil, fmt.Errorf("invalid encoding format in export data: got %q; want 'c' or 'd'", format) } + p.posInfoFormat = p.int() != 0 + // --- generic export data --- if v := p.string(); v != "v0" { @@ -194,6 +197,10 @@ func (p *importer) obj(tag int) { } func (p *importer) pos() { + if !p.posInfoFormat { + return + } + file := p.prevFile line := p.prevLine -- cgit v1.3 From f058ab09fb14afe3a51b880a6895b96aa3e07c85 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 22 Apr 2016 15:45:24 -0700 Subject: cmd/compile: remove redundant "// fallthrough" comments Change-Id: Ia3f262f06592b66447c213e2350402cd5e6e2ccd Reviewed-on: https://go-review.googlesource.com/22389 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/cmd/compile/internal/gc/const.go | 9 ++------- src/cmd/compile/internal/gc/init.go | 2 -- src/cmd/compile/internal/gc/sinit.go | 4 ---- src/cmd/compile/internal/gc/typecheck.go | 5 ----- 4 files changed, 2 insertions(+), 18 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 68bcae3f4c..99b48f5ffe 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -226,16 +226,13 @@ func convlit1(n *Node, t *Type, explicit bool, reuse canReuseNode) *Node { case OCOMPLEX: if n.Type.Etype == TIDEAL { switch t.Etype { - // If trying to convert to non-complex type, - // leave as complex128 and let typechecker complain. default: + // If trying to convert to non-complex type, + // leave as complex128 and let typechecker complain. t = Types[TCOMPLEX128] fallthrough - - //fallthrough case TCOMPLEX128: n.Type = t - n.Left = convlit(n.Left, Types[TFLOAT64]) n.Right = convlit(n.Right, Types[TFLOAT64]) @@ -711,8 +708,6 @@ func evconst(n *Node) { break } fallthrough - - // fall through case OCONV_ | CTINT_, OCONV_ | CTRUNE_, OCONV_ | CTFLT_, diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index d355a46557..6c9223b57a 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -58,8 +58,6 @@ func anyinit(n []*Node) bool { break } fallthrough - - // fall through default: return true } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 5560415cab..2c2ade06f5 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -342,8 +342,6 @@ func staticcopy(l *Node, r *Node, out *[]*Node) bool { return true } fallthrough - - // fall through case OSTRUCTLIT: p := initplans[r] @@ -1332,8 +1330,6 @@ func iszero(n *Node) bool { break } fallthrough - - // fall through case OSTRUCTLIT: for _, n1 := range n.List.Slice() { if !iszero(n1.Right) { diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 7a8c65dc58..9bf4f58412 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3153,8 +3153,6 @@ func islvalue(n *Node) bool { return false } fallthrough - - // fall through case OIND, ODOTPTR, OCLOSUREVAR, OPARAM: return true @@ -3858,11 +3856,8 @@ func markbreak(n *Node, implicit *Node) { ORANGE: implicit = n fallthrough - - // fall through default: markbreak(n.Left, implicit) - markbreak(n.Right, implicit) markbreaklist(n.Ninit, implicit) markbreaklist(n.Nbody, implicit) -- cgit v1.3 From 217c284995400bb761e5718782c8a90748c75aef Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Wed, 20 Apr 2016 15:02:48 -0700 Subject: cmd/compile: combine stores into larger widths Combine stores into larger widths when it is safe to do so. Add clobber() function so stray dead uses do not impede the above rewrites. Fix bug in loads where all intermediate values depending on a small load (not just the load itself) must have no other uses. We really need the small load to be dead after the rewrite.. Fixes #14267 Change-Id: Ib25666cb19777f65082c76238fba51a76beb5d74 Reviewed-on: https://go-review.googlesource.com/22326 Run-TryBot: Keith Randall Reviewed-by: Todd Neal --- src/cmd/compile/internal/gc/testdata/dupLoad.go | 43 +- src/cmd/compile/internal/ssa/gen/AMD64.rules | 354 +++++- src/cmd/compile/internal/ssa/rewrite.go | 12 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 1390 ++++++++++++++++++++--- 4 files changed, 1582 insertions(+), 217 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/testdata/dupLoad.go b/src/cmd/compile/internal/gc/testdata/dupLoad.go index d12c26355a..d18dc733e1 100644 --- a/src/cmd/compile/internal/gc/testdata/dupLoad.go +++ b/src/cmd/compile/internal/gc/testdata/dupLoad.go @@ -12,7 +12,7 @@ package main import "fmt" //go:noinline -func read(b []byte) (uint16, uint16) { +func read1(b []byte) (uint16, uint16) { // There is only a single read of b[0]. The two // returned values must have the same low byte. v := b[0] @@ -21,7 +21,7 @@ func read(b []byte) (uint16, uint16) { const N = 100000 -func main() { +func main1() { done := make(chan struct{}) b := make([]byte, 2) go func() { @@ -33,7 +33,7 @@ func main() { }() go func() { for i := 0; i < N; i++ { - x, y := read(b) + x, y := read1(b) if byte(x) != byte(y) { fmt.Printf("x=%x y=%x\n", x, y) panic("bad") @@ -44,3 +44,40 @@ func main() { <-done <-done } + +//go:noinline +func read2(b []byte) (uint16, uint16) { + // There is only a single read of b[1]. The two + // returned values must have the same high byte. + v := uint16(b[1]) << 8 + return v, uint16(b[0]) | v +} + +func main2() { + done := make(chan struct{}) + b := make([]byte, 2) + go func() { + for i := 0; i < N; i++ { + b[0] = byte(i) + b[1] = byte(i) + } + done <- struct{}{} + }() + go func() { + for i := 0; i < N; i++ { + x, y := read2(b) + if x&0xff00 != y&0xff00 { + fmt.Printf("x=%x y=%x\n", x, y) + panic("bad") + } + } + done <- struct{}{} + }() + <-done + <-done +} + +func main() { + main1() + main2() +} diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 21c74a9c1c..3cdac6f416 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -689,18 +689,18 @@ // Make sure we don't combine these ops if the load has another use. // This prevents a single load from being split into multiple loads // which then might return different values. See test/atomicload.go. -(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVBQSXload [off] {sym} ptr mem) -(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVBload [off] {sym} ptr mem) -(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVWQSXload [off] {sym} ptr mem) -(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVWload [off] {sym} ptr mem) -(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVLQSXload [off] {sym} ptr mem) -(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 -> @x.Block (MOVLload [off] {sym} ptr mem) - -(MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem) -(MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) -(MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) -(MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVLloadidx1 [off] {sym} ptr idx mem) -(MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 -> @x.Block (MOVLloadidx4 [off] {sym} ptr idx mem) +(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) +(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWQSXload [off] {sym} ptr mem) +(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) +(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLQSXload [off] {sym} ptr mem) +(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload [off] {sym} ptr mem) + +(MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem) +(MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) +(MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) +(MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx1 [off] {sym} ptr idx mem) +(MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLloadidx4 [off] {sym} ptr idx mem) // replace load from same location as preceding store with copy (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x @@ -1368,40 +1368,296 @@ // Combining byte loads into larger (unaligned) loads. // There are many ways these combinations could occur. This is // designed to match the way encoding/binary.LittleEndian does it. -(ORW x0:(MOVBload [i] {s} p mem) - (SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && mergePoint(b,x0,x1) != nil -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) - -(ORL (ORL (ORL - x0:(MOVBload [i] {s} p mem) - (SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) - (SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) - (SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil -> @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) - -(ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ - x0:(MOVBload [i] {s} p mem) - (SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) - (SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) - (SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) - (SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) - (SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) - (SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) - (SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) - -(ORW x0:(MOVBloadidx1 [i] {s} p idx mem) - (SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && mergePoint(b,x0,x1) != nil -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) - -(ORL (ORL (ORL - x0:(MOVBloadidx1 [i] {s} p idx mem) - (SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) - (SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) - (SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil -> @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) - -(ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ - x0:(MOVBloadidx1 [i] {s} p idx mem) - (SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) - (SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) - (SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) - (SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) - (SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) - (SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) - (SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 [i] {s} p idx mem) +(ORW x0:(MOVBload [i] {s} p mem) + s0:(SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0) + && clobber(x1) + && clobber(s0) + -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) + +(ORL o0:(ORL o1:(ORL + x0:(MOVBload [i] {s} p mem) + s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) + s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) + s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && mergePoint(b,x0,x1,x2,x3) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(o0) + && clobber(o1) + -> @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) + +(ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ + x0:(MOVBload [i] {s} p mem) + s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) + s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) + s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) + s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) + s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) + s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) + s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) + +(ORW x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && s0.Uses == 1 + && mergePoint(b,x0,x1) != nil + && clobber(x0) + && clobber(x1) + && clobber(s0) + -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) + +(ORL o0:(ORL o1:(ORL + x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) + s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && mergePoint(b,x0,x1,x2,x3) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(o0) + && clobber(o1) + -> @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) + +(ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ + x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) + s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) + s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) + s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) + s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) + s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) + && x0.Uses == 1 + && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 + && s0.Uses == 1 + && s1.Uses == 1 + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + && clobber(x0) + && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) + && clobber(s0) + && clobber(s1) + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 [i] {s} p idx mem) + +// Combine constant stores into larger (unaligned) stores. +(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) + && x.Uses == 1 + && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() + && clobber(x) + -> (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) +(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) + && x.Uses == 1 + && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() + && clobber(x) + -> (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) +(MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) + && x.Uses == 1 + && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() + && clobber(x) + -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + +(MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) + && x.Uses == 1 + && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() + && clobber(x) + -> (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) +(MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) + && x.Uses == 1 + && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() + && clobber(x) + -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) +(MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) + && x.Uses == 1 + && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() + && clobber(x) + -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + +(MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) + && x.Uses == 1 + && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() + && clobber(x) + -> (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst [1] i) mem) +(MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) + && x.Uses == 1 + && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() + && clobber(x) + -> (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + +// Combine stores into larger (unaligned) stores. +(MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVWstore [i-1] {s} p w mem) +(MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVWstore [i-1] {s} p w0 mem) +(MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVLstore [i-2] {s} p w mem) +(MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVLstore [i-2] {s} p w0 mem) +(MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVQstore [i-4] {s} p w mem) +(MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVQstore [i-4] {s} p w0 mem) + +(MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVWstoreidx1 [i-1] {s} p idx w mem) +(MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVWstoreidx1 [i-1] {s} p idx w0 mem) +(MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVLstoreidx1 [i-2] {s} p idx w mem) +(MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVLstoreidx1 [i-2] {s} p idx w0 mem) +(MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVQstoreidx1 [i-4] {s} p idx w mem) +(MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVQstoreidx1 [i-4] {s} p idx w0 mem) + +(MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst [1] idx) w mem) +(MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVLstoreidx1 [i-2] {s} p (SHLQconst [1] idx) w0 mem) +(MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w mem) +(MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) + && x.Uses == 1 + && clobber(x) + -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w0 mem) diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index c2f8ceadaf..e9b408a86c 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -90,7 +90,7 @@ func applyRewrite(f *Func, rb func(*Block) bool, rv func(*Value, *Config) bool) break } } - // remove clobbered copies + // remove clobbered values for _, b := range f.Blocks { j := 0 for i, v := range b.Values { @@ -367,3 +367,13 @@ found: } return nil // too far away } + +// clobber invalidates v. Returns true. +// clobber is used by rewrite rules to: +// A) make sure v is really dead and never used again. +// B) decrement use counts of v's args. +func clobber(v *Value) bool { + v.reset(OpInvalid) + // Note: leave v.Block intact. The Block field is used after clobber. + return true +} diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d1793ad8c0..8507959f96 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -6354,7 +6354,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { b := v.Block _ = b // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBQSXload [off] {sym} ptr mem) for { x := v.Args[0] @@ -6365,7 +6365,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { sym := x.Aux ptr := x.Args[0] mem := x.Args[1] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -6431,7 +6431,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { b := v.Block _ = b // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBload [off] {sym} ptr mem) for { x := v.Args[0] @@ -6442,7 +6442,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { sym := x.Aux ptr := x.Args[0] mem := x.Args[1] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -6456,7 +6456,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { return true } // match: (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem) for { x := v.Args[0] @@ -6468,7 +6468,7 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { ptr := x.Args[0] idx := x.Args[1] mem := x.Args[2] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -6846,6 +6846,97 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-1] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + if v_1.AuxInt != 8 { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVBstore { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if w != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-1] {s} p w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVBstore { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + w0 := x.Args[1] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-8 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { @@ -6945,6 +7036,35 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) + // result: (MOVWstoreconst [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVBstoreconst { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { @@ -6994,6 +7114,40 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool v.AddArg(mem) return true } + // match: (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 1 == ValAndOff(c).Off() && clobber(x) + // result: (MOVWstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xff | ValAndOff(c).Val()<<8, ValAndOff(a).Off())] {s} p i mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVBstoreconstidx1 { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if i != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstoreconstidx1) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(i) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { @@ -7047,13 +7201,114 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + if v_2.AuxInt != 8 { + break + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVBstoreidx1 { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVBstoreidx1 { + break + } + if x.AuxInt != i-1 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-8 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { b := v.Block _ = b // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) for { x := v.Args[0] @@ -7064,7 +7319,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { sym := x.Aux ptr := x.Args[0] mem := x.Args[1] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -7130,7 +7385,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { b := v.Block _ = b // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLload [off] {sym} ptr mem) for { x := v.Args[0] @@ -7141,7 +7396,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { sym := x.Aux ptr := x.Args[0] mem := x.Args[1] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -7155,7 +7410,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { return true } // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLloadidx1 [off] {sym} ptr idx mem) for { x := v.Args[0] @@ -7167,7 +7422,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { ptr := x.Args[0] idx := x.Args[1] mem := x.Args[2] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -7182,7 +7437,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { return true } // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLloadidx4 [off] {sym} ptr idx mem) for { x := v.Args[0] @@ -7194,7 +7449,7 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { ptr := x.Args[0] idx := x.Args[1] mem := x.Args[2] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -7702,6 +7957,97 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstore [i-4] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + if v_1.AuxInt != 32 { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVLstore { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if w != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstore [i-4] {s} p w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVLstore { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + w0 := x.Args[1] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-32 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { @@ -7827,6 +8173,38 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVLstoreconst { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) + v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v.AddArg(v0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool { @@ -7900,6 +8278,43 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value, config *Config) bool v.AddArg(mem) return true } + // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVLstoreconstidx1 { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if i != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v.AddArg(i) + v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) + v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v.AddArg(v0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { @@ -7949,6 +8364,46 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool v.AddArg(mem) return true } + // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVLstoreconstidx4 { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if i != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) + v0.AuxInt = 2 + v0.AddArg(i) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) + v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v.AddArg(v1) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { @@ -8028,6 +8483,107 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + if v_2.AuxInt != 32 { + break + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVLstoreidx1 { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVLstoreidx1 { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-32 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { @@ -8081,6 +8637,113 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + if v_2.AuxInt != 32 { + break + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVLstoreidx4 { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) + v0.AuxInt = 2 + v0.AddArg(idx) + v.AddArg(v0) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVLstoreidx4 { + break + } + if x.AuxInt != i-4 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-32 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) + v0.AuxInt = 2 + v0.AddArg(idx) + v.AddArg(v0) + v.AddArg(w0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { @@ -9948,7 +10611,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { b := v.Block _ = b // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) for { x := v.Args[0] @@ -9959,7 +10622,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { sym := x.Aux ptr := x.Args[0] mem := x.Args[1] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -10025,7 +10688,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { b := v.Block _ = b // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWload [off] {sym} ptr mem) for { x := v.Args[0] @@ -10036,7 +10699,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { sym := x.Aux ptr := x.Args[0] mem := x.Args[1] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -10050,7 +10713,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { return true } // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) for { x := v.Args[0] @@ -10062,7 +10725,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { ptr := x.Args[0] idx := x.Args[1] mem := x.Args[2] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -10077,7 +10740,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { return true } // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 + // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) for { x := v.Args[0] @@ -10089,7 +10752,7 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { ptr := x.Args[0] idx := x.Args[1] mem := x.Args[2] - if !(x.Uses == 1) { + if !(x.Uses == 1 && clobber(x)) { break } b = x.Block @@ -10594,6 +11257,97 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstore [i-2] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + if v_1.AuxInt != 16 { + break + } + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVWstore { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if w != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstore [i-2] {s} p w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { + break + } + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVWstore { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + w0 := x.Args[1] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-16 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { @@ -10719,6 +11473,35 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVWstoreconst { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool { @@ -10792,6 +11575,40 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value, config *Config) bool v.AddArg(mem) return true } + // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVWstoreconstidx1 { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if i != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstoreconstidx1) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(i) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { @@ -10841,6 +11658,43 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool v.AddArg(mem) return true } + // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst [1] i) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVWstoreconstidx2 { + break + } + a := x.AuxInt + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if i != x.Args[1] { + break + } + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstoreconstidx1) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, i.Type) + v0.AuxInt = 1 + v0.AddArg(i) + v.AddArg(v0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { @@ -10920,6 +11774,107 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + if v_2.AuxInt != 16 { + break + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVWstoreidx1 { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVWstoreidx1 { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-16 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { @@ -10973,6 +11928,113 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { v.AddArg(mem) return true } + // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst [1] idx) w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + if v_2.AuxInt != 16 { + break + } + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVWstoreidx2 { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + if w != x.Args[2] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) + v0.AuxInt = 1 + v0.AddArg(idx) + v.AddArg(v0) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst [1] idx) w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVWstoreidx2 { + break + } + if x.AuxInt != i-2 { + break + } + if x.Aux != s { + break + } + if p != x.Args[0] { + break + } + if idx != x.Args[1] { + break + } + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { + break + } + if w0.AuxInt != j-16 { + break + } + if w != w0.Args[0] { + break + } + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Line, OpAMD64SHLQconst, idx.Type) + v0.AuxInt = 1 + v0.AddArg(idx) + v.AddArg(v0) + v.AddArg(w0) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MULB(v *Value, config *Config) bool { @@ -12665,19 +13727,19 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (ORL (ORL (ORL x0:(MOVBload [i] {s} p mem) (SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) (SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) (SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil + // match: (ORL o0:(ORL o1:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) // result: @mergePoint(b,x0,x1,x2,x3) (MOVLload [i] {s} p mem) for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ORL { + o0 := v.Args[0] + if o0.Op != OpAMD64ORL { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64ORL { + o1 := o0.Args[0] + if o1.Op != OpAMD64ORL { break } - x0 := v_0_0.Args[0] + x0 := o1.Args[0] if x0.Op != OpAMD64MOVBload { break } @@ -12685,14 +13747,14 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { s := x0.Aux p := x0.Args[0] mem := x0.Args[1] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64SHLLconst { + s0 := o1.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - if v_0_0_1.AuxInt != 8 { + if s0.AuxInt != 8 { break } - x1 := v_0_0_1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBload { break } @@ -12708,14 +13770,14 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x1.Args[1] { break } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLLconst { + s1 := o0.Args[1] + if s1.Op != OpAMD64SHLLconst { break } - if v_0_1.AuxInt != 16 { + if s1.AuxInt != 16 { break } - x2 := v_0_1.Args[0] + x2 := s1.Args[0] if x2.Op != OpAMD64MOVBload { break } @@ -12731,14 +13793,14 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x2.Args[1] { break } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { + s2 := v.Args[1] + if s2.Op != OpAMD64SHLLconst { break } - if v_1.AuxInt != 24 { + if s2.AuxInt != 24 { break } - x3 := v_1.Args[0] + x3 := s2.Args[0] if x3.Op != OpAMD64MOVBload { break } @@ -12754,7 +13816,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x3.Args[1] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { break } b = mergePoint(b, x0, x1, x2, x3) @@ -12767,19 +13829,19 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (ORL (ORL (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) (SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) (SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil + // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ORL { + o0 := v.Args[0] + if o0.Op != OpAMD64ORL { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64ORL { + o1 := o0.Args[0] + if o1.Op != OpAMD64ORL { break } - x0 := v_0_0.Args[0] + x0 := o1.Args[0] if x0.Op != OpAMD64MOVBloadidx1 { break } @@ -12788,14 +13850,14 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { p := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64SHLLconst { + s0 := o1.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - if v_0_0_1.AuxInt != 8 { + if s0.AuxInt != 8 { break } - x1 := v_0_0_1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBloadidx1 { break } @@ -12814,14 +13876,14 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x1.Args[2] { break } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLLconst { + s1 := o0.Args[1] + if s1.Op != OpAMD64SHLLconst { break } - if v_0_1.AuxInt != 16 { + if s1.AuxInt != 16 { break } - x2 := v_0_1.Args[0] + x2 := s1.Args[0] if x2.Op != OpAMD64MOVBloadidx1 { break } @@ -12840,14 +13902,14 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x2.Args[2] { break } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { + s2 := v.Args[1] + if s2.Op != OpAMD64SHLLconst { break } - if v_1.AuxInt != 24 { + if s2.AuxInt != 24 { break } - x3 := v_1.Args[0] + x3 := s2.Args[0] if x3.Op != OpAMD64MOVBloadidx1 { break } @@ -12866,7 +13928,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x3.Args[2] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { break } b = mergePoint(b, x0, x1, x2, x3) @@ -12979,35 +14041,35 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ x0:(MOVBload [i] {s} p mem) (SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) (SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) (SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) (SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) (SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) (SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) (SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ORQ { + o0 := v.Args[0] + if o0.Op != OpAMD64ORQ { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64ORQ { + o1 := o0.Args[0] + if o1.Op != OpAMD64ORQ { break } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64ORQ { + o2 := o1.Args[0] + if o2.Op != OpAMD64ORQ { break } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64ORQ { + o3 := o2.Args[0] + if o3.Op != OpAMD64ORQ { break } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ORQ { + o4 := o3.Args[0] + if o4.Op != OpAMD64ORQ { break } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ORQ { + o5 := o4.Args[0] + if o5.Op != OpAMD64ORQ { break } - x0 := v_0_0_0_0_0_0.Args[0] + x0 := o5.Args[0] if x0.Op != OpAMD64MOVBload { break } @@ -13015,14 +14077,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { s := x0.Aux p := x0.Args[0] mem := x0.Args[1] - v_0_0_0_0_0_0_1 := v_0_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_0_1.Op != OpAMD64SHLQconst { + s0 := o5.Args[1] + if s0.Op != OpAMD64SHLQconst { break } - if v_0_0_0_0_0_0_1.AuxInt != 8 { + if s0.AuxInt != 8 { break } - x1 := v_0_0_0_0_0_0_1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBload { break } @@ -13038,14 +14100,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x1.Args[1] { break } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpAMD64SHLQconst { + s1 := o4.Args[1] + if s1.Op != OpAMD64SHLQconst { break } - if v_0_0_0_0_0_1.AuxInt != 16 { + if s1.AuxInt != 16 { break } - x2 := v_0_0_0_0_0_1.Args[0] + x2 := s1.Args[0] if x2.Op != OpAMD64MOVBload { break } @@ -13061,14 +14123,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x2.Args[1] { break } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpAMD64SHLQconst { + s2 := o3.Args[1] + if s2.Op != OpAMD64SHLQconst { break } - if v_0_0_0_0_1.AuxInt != 24 { + if s2.AuxInt != 24 { break } - x3 := v_0_0_0_0_1.Args[0] + x3 := s2.Args[0] if x3.Op != OpAMD64MOVBload { break } @@ -13084,14 +14146,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x3.Args[1] { break } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpAMD64SHLQconst { + s3 := o2.Args[1] + if s3.Op != OpAMD64SHLQconst { break } - if v_0_0_0_1.AuxInt != 32 { + if s3.AuxInt != 32 { break } - x4 := v_0_0_0_1.Args[0] + x4 := s3.Args[0] if x4.Op != OpAMD64MOVBload { break } @@ -13107,14 +14169,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x4.Args[1] { break } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64SHLQconst { + s4 := o1.Args[1] + if s4.Op != OpAMD64SHLQconst { break } - if v_0_0_1.AuxInt != 40 { + if s4.AuxInt != 40 { break } - x5 := v_0_0_1.Args[0] + x5 := s4.Args[0] if x5.Op != OpAMD64MOVBload { break } @@ -13130,14 +14192,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x5.Args[1] { break } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQconst { + s5 := o0.Args[1] + if s5.Op != OpAMD64SHLQconst { break } - if v_0_1.AuxInt != 48 { + if s5.AuxInt != 48 { break } - x6 := v_0_1.Args[0] + x6 := s5.Args[0] if x6.Op != OpAMD64MOVBload { break } @@ -13153,14 +14215,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x6.Args[1] { break } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { + s6 := v.Args[1] + if s6.Op != OpAMD64SHLQconst { break } - if v_1.AuxInt != 56 { + if s6.AuxInt != 56 { break } - x7 := v_1.Args[0] + x7 := s6.Args[0] if x7.Op != OpAMD64MOVBload { break } @@ -13176,7 +14238,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x7.Args[1] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) @@ -13189,35 +14251,35 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) (SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) (SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) (SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) (SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) (SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) (SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil + // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 [i] {s} p idx mem) for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ORQ { + o0 := v.Args[0] + if o0.Op != OpAMD64ORQ { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64ORQ { + o1 := o0.Args[0] + if o1.Op != OpAMD64ORQ { break } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64ORQ { + o2 := o1.Args[0] + if o2.Op != OpAMD64ORQ { break } - v_0_0_0_0 := v_0_0_0.Args[0] - if v_0_0_0_0.Op != OpAMD64ORQ { + o3 := o2.Args[0] + if o3.Op != OpAMD64ORQ { break } - v_0_0_0_0_0 := v_0_0_0_0.Args[0] - if v_0_0_0_0_0.Op != OpAMD64ORQ { + o4 := o3.Args[0] + if o4.Op != OpAMD64ORQ { break } - v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0] - if v_0_0_0_0_0_0.Op != OpAMD64ORQ { + o5 := o4.Args[0] + if o5.Op != OpAMD64ORQ { break } - x0 := v_0_0_0_0_0_0.Args[0] + x0 := o5.Args[0] if x0.Op != OpAMD64MOVBloadidx1 { break } @@ -13226,14 +14288,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { p := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - v_0_0_0_0_0_0_1 := v_0_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_0_1.Op != OpAMD64SHLQconst { + s0 := o5.Args[1] + if s0.Op != OpAMD64SHLQconst { break } - if v_0_0_0_0_0_0_1.AuxInt != 8 { + if s0.AuxInt != 8 { break } - x1 := v_0_0_0_0_0_0_1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBloadidx1 { break } @@ -13252,14 +14314,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x1.Args[2] { break } - v_0_0_0_0_0_1 := v_0_0_0_0_0.Args[1] - if v_0_0_0_0_0_1.Op != OpAMD64SHLQconst { + s1 := o4.Args[1] + if s1.Op != OpAMD64SHLQconst { break } - if v_0_0_0_0_0_1.AuxInt != 16 { + if s1.AuxInt != 16 { break } - x2 := v_0_0_0_0_0_1.Args[0] + x2 := s1.Args[0] if x2.Op != OpAMD64MOVBloadidx1 { break } @@ -13278,14 +14340,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x2.Args[2] { break } - v_0_0_0_0_1 := v_0_0_0_0.Args[1] - if v_0_0_0_0_1.Op != OpAMD64SHLQconst { + s2 := o3.Args[1] + if s2.Op != OpAMD64SHLQconst { break } - if v_0_0_0_0_1.AuxInt != 24 { + if s2.AuxInt != 24 { break } - x3 := v_0_0_0_0_1.Args[0] + x3 := s2.Args[0] if x3.Op != OpAMD64MOVBloadidx1 { break } @@ -13304,14 +14366,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x3.Args[2] { break } - v_0_0_0_1 := v_0_0_0.Args[1] - if v_0_0_0_1.Op != OpAMD64SHLQconst { + s3 := o2.Args[1] + if s3.Op != OpAMD64SHLQconst { break } - if v_0_0_0_1.AuxInt != 32 { + if s3.AuxInt != 32 { break } - x4 := v_0_0_0_1.Args[0] + x4 := s3.Args[0] if x4.Op != OpAMD64MOVBloadidx1 { break } @@ -13330,14 +14392,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x4.Args[2] { break } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpAMD64SHLQconst { + s4 := o1.Args[1] + if s4.Op != OpAMD64SHLQconst { break } - if v_0_0_1.AuxInt != 40 { + if s4.AuxInt != 40 { break } - x5 := v_0_0_1.Args[0] + x5 := s4.Args[0] if x5.Op != OpAMD64MOVBloadidx1 { break } @@ -13356,14 +14418,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x5.Args[2] { break } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQconst { + s5 := o0.Args[1] + if s5.Op != OpAMD64SHLQconst { break } - if v_0_1.AuxInt != 48 { + if s5.AuxInt != 48 { break } - x6 := v_0_1.Args[0] + x6 := s5.Args[0] if x6.Op != OpAMD64MOVBloadidx1 { break } @@ -13382,14 +14444,14 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x6.Args[2] { break } - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { + s6 := v.Args[1] + if s6.Op != OpAMD64SHLQconst { break } - if v_1.AuxInt != 56 { + if s6.AuxInt != 56 { break } - x7 := v_1.Args[0] + x7 := s6.Args[0] if x7.Op != OpAMD64MOVBloadidx1 { break } @@ -13408,7 +14470,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { if mem != x7.Args[2] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) @@ -13513,8 +14575,8 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (ORW x0:(MOVBload [i] {s} p mem) (SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && mergePoint(b,x0,x1) != nil + // match: (ORW x0:(MOVBload [i] {s} p mem) s0:(SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) for { x0 := v.Args[0] @@ -13525,14 +14587,14 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { s := x0.Aux p := x0.Args[0] mem := x0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLWconst { + s0 := v.Args[1] + if s0.Op != OpAMD64SHLWconst { break } - if v_1.AuxInt != 8 { + if s0.AuxInt != 8 { break } - x1 := v_1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBload { break } @@ -13548,7 +14610,7 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { if mem != x1.Args[1] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && mergePoint(b, x0, x1) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) @@ -13561,8 +14623,8 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) (SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && mergePoint(b,x0,x1) != nil + // match: (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) for { x0 := v.Args[0] @@ -13574,14 +14636,14 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { p := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLWconst { + s0 := v.Args[1] + if s0.Op != OpAMD64SHLWconst { break } - if v_1.AuxInt != 8 { + if s0.AuxInt != 8 { break } - x1 := v_1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBloadidx1 { break } @@ -13600,7 +14662,7 @@ func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { if mem != x1.Args[2] { break } - if !(x0.Uses == 1 && x1.Uses == 1 && mergePoint(b, x0, x1) != nil) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) -- cgit v1.3 From 9e3c68f1e02021a845c452ae347d06332e4ed79d Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Fri, 22 Apr 2016 13:09:18 -0700 Subject: cmd/compile: get rid of most byte and word insns for amd64 Now that we're using 32-bit ops for 8/16-bit logical operations (to avoid partial register stalls), there's really no need to keep track of the 8/16-bit ops at all. Convert everything we can to 32-bit ops. This CL is the obvious stuff. I might think a bit more about whether we can get rid of weirder stuff like HMULWU. The only downside to this CL is that we lose some information about constants. If we had source like: var a byte = ... a += 128 a += 128 We will convert that to a += 256, when we could get rid of the add altogether. This seems like a fairly unusual scenario and I'm happy with forgoing that optimization. Change-Id: Ia7c1e5203d0d110807da69ed646535194a3efba1 Reviewed-on: https://go-review.googlesource.com/22382 Reviewed-by: Todd Neal --- src/cmd/compile/internal/amd64/ssa.go | 36 +- src/cmd/compile/internal/ssa/gen/AMD64.rules | 391 ++-- src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 34 - src/cmd/compile/internal/ssa/opGen.go | 572 ----- src/cmd/compile/internal/ssa/regalloc_test.go | 4 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 2767 +++++-------------------- 6 files changed, 635 insertions(+), 3169 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 21dbc6238c..6557287caa 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -62,7 +62,7 @@ func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) { } for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] - if flive && (v.Op == ssa.OpAMD64MOVBconst || v.Op == ssa.OpAMD64MOVWconst || v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) { + if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) { // The "mark" is any non-nil Aux value. v.Aux = v } @@ -160,7 +160,7 @@ func opregreg(op obj.As, dest, src int16) *obj.Prog { func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { s.SetLineno(v.Line) switch v.Op { - case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL, ssa.OpAMD64ADDW, ssa.OpAMD64ADDB: + case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL: r := gc.SSARegNum(v) r1 := gc.SSARegNum(v.Args[0]) r2 := gc.SSARegNum(v.Args[1]) @@ -193,12 +193,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = r } // 2-address opcode arithmetic - case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL, ssa.OpAMD64SUBW, ssa.OpAMD64SUBB, - ssa.OpAMD64MULQ, ssa.OpAMD64MULL, ssa.OpAMD64MULW, ssa.OpAMD64MULB, - ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL, ssa.OpAMD64ANDW, ssa.OpAMD64ANDB, - ssa.OpAMD64ORQ, ssa.OpAMD64ORL, ssa.OpAMD64ORW, ssa.OpAMD64ORB, - ssa.OpAMD64XORQ, ssa.OpAMD64XORL, ssa.OpAMD64XORW, ssa.OpAMD64XORB, - ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL, ssa.OpAMD64SHLW, ssa.OpAMD64SHLB, + case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL, + ssa.OpAMD64MULQ, ssa.OpAMD64MULL, + ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL, + ssa.OpAMD64ORQ, ssa.OpAMD64ORL, + ssa.OpAMD64XORQ, ssa.OpAMD64XORL, + ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL, ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB, ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB, ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD, @@ -335,7 +335,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r - case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst, ssa.OpAMD64ADDWconst, ssa.OpAMD64ADDBconst: + case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst: r := gc.SSARegNum(v) a := gc.SSARegNum(v.Args[0]) if r == a { @@ -408,7 +408,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r - case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst, ssa.OpAMD64MULWconst, ssa.OpAMD64MULBconst: + case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst: r := gc.SSARegNum(v) if r != gc.SSARegNum(v.Args[0]) { v.Fatalf("input[0] and output not in same register %s", v.LongString()) @@ -424,11 +424,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { //p.From3.Type = obj.TYPE_REG //p.From3.Reg = gc.SSARegNum(v.Args[0]) - case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, ssa.OpAMD64SUBWconst, ssa.OpAMD64SUBBconst, - ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst, ssa.OpAMD64ANDWconst, ssa.OpAMD64ANDBconst, - ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst, ssa.OpAMD64ORWconst, ssa.OpAMD64ORBconst, - ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst, ssa.OpAMD64XORWconst, ssa.OpAMD64XORBconst, - ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst, ssa.OpAMD64SHLWconst, ssa.OpAMD64SHLBconst, + case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst, + ssa.OpAMD64ANDQconst, ssa.OpAMD64ANDLconst, + ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst, + ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst, + ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst, ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst, ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst, ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst: @@ -497,7 +497,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v.Args[0]) - case ssa.OpAMD64MOVBconst, ssa.OpAMD64MOVWconst, ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: + case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: x := gc.SSARegNum(v) p := gc.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST @@ -812,9 +812,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { if gc.Maxarg < v.AuxInt { gc.Maxarg = v.AuxInt } - case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64NEGW, ssa.OpAMD64NEGB, + case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL, - ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL, ssa.OpAMD64NOTW, ssa.OpAMD64NOTB: + ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL: r := gc.SSARegNum(v) if r != gc.SSARegNum(v.Args[0]) { v.Fatalf("input[0] and output not in same register %s", v.LongString()) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 3cdac6f416..c0e83d7adc 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -6,23 +6,23 @@ (Add64 x y) -> (ADDQ x y) (AddPtr x y) -> (ADDQ x y) (Add32 x y) -> (ADDL x y) -(Add16 x y) -> (ADDW x y) -(Add8 x y) -> (ADDB x y) +(Add16 x y) -> (ADDL x y) +(Add8 x y) -> (ADDL x y) (Add32F x y) -> (ADDSS x y) (Add64F x y) -> (ADDSD x y) (Sub64 x y) -> (SUBQ x y) (SubPtr x y) -> (SUBQ x y) (Sub32 x y) -> (SUBL x y) -(Sub16 x y) -> (SUBW x y) -(Sub8 x y) -> (SUBB x y) +(Sub16 x y) -> (SUBL x y) +(Sub8 x y) -> (SUBL x y) (Sub32F x y) -> (SUBSS x y) (Sub64F x y) -> (SUBSD x y) (Mul64 x y) -> (MULQ x y) (Mul32 x y) -> (MULL x y) -(Mul16 x y) -> (MULW x y) -(Mul8 x y) -> (MULB x y) +(Mul16 x y) -> (MULL x y) +(Mul8 x y) -> (MULL x y) (Mul32F x y) -> (MULSS x y) (Mul64F x y) -> (MULSD x y) @@ -60,30 +60,30 @@ (And64 x y) -> (ANDQ x y) (And32 x y) -> (ANDL x y) -(And16 x y) -> (ANDW x y) -(And8 x y) -> (ANDB x y) +(And16 x y) -> (ANDL x y) +(And8 x y) -> (ANDL x y) (Or64 x y) -> (ORQ x y) (Or32 x y) -> (ORL x y) -(Or16 x y) -> (ORW x y) -(Or8 x y) -> (ORB x y) +(Or16 x y) -> (ORL x y) +(Or8 x y) -> (ORL x y) (Xor64 x y) -> (XORQ x y) (Xor32 x y) -> (XORL x y) -(Xor16 x y) -> (XORW x y) -(Xor8 x y) -> (XORB x y) +(Xor16 x y) -> (XORL x y) +(Xor8 x y) -> (XORL x y) (Neg64 x) -> (NEGQ x) (Neg32 x) -> (NEGL x) -(Neg16 x) -> (NEGW x) -(Neg8 x) -> (NEGB x) +(Neg16 x) -> (NEGL x) +(Neg8 x) -> (NEGL x) (Neg32F x) -> (PXOR x (MOVSSconst [f2i(math.Copysign(0, -1))])) (Neg64F x) -> (PXOR x (MOVSDconst [f2i(math.Copysign(0, -1))])) (Com64 x) -> (NOTQ x) (Com32 x) -> (NOTL x) -(Com16 x) -> (NOTW x) -(Com8 x) -> (NOTB x) +(Com16 x) -> (NOTL x) +(Com8 x) -> (NOTL x) // CMPQconst 0 below is redundant because BSF sets Z but how to remove? (Ctz64 x) -> (CMOVQEQconst (BSFQ x) (CMPQconst x [0]) [64]) @@ -169,15 +169,15 @@ (Lsh32x16 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) (Lsh32x8 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) -(Lsh16x64 x y) -> (ANDW (SHLW x y) (SBBLcarrymask (CMPQconst y [16]))) -(Lsh16x32 x y) -> (ANDW (SHLW x y) (SBBLcarrymask (CMPLconst y [16]))) -(Lsh16x16 x y) -> (ANDW (SHLW x y) (SBBLcarrymask (CMPWconst y [16]))) -(Lsh16x8 x y) -> (ANDW (SHLW x y) (SBBLcarrymask (CMPBconst y [16]))) +(Lsh16x64 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) +(Lsh16x32 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) +(Lsh16x16 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) +(Lsh16x8 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) -(Lsh8x64 x y) -> (ANDB (SHLB x y) (SBBLcarrymask (CMPQconst y [8]))) -(Lsh8x32 x y) -> (ANDB (SHLB x y) (SBBLcarrymask (CMPLconst y [8]))) -(Lsh8x16 x y) -> (ANDB (SHLB x y) (SBBLcarrymask (CMPWconst y [8]))) -(Lsh8x8 x y) -> (ANDB (SHLB x y) (SBBLcarrymask (CMPBconst y [8]))) +(Lsh8x64 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) +(Lsh8x32 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) +(Lsh8x16 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) +(Lsh8x8 x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) (Lrot64 x [c]) -> (ROLQconst [c&63] x) (Lrot32 x [c]) -> (ROLLconst [c&31] x) @@ -194,38 +194,38 @@ (Rsh32Ux16 x y) -> (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) (Rsh32Ux8 x y) -> (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) -(Rsh16Ux64 x y) -> (ANDW (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) -(Rsh16Ux32 x y) -> (ANDW (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) -(Rsh16Ux16 x y) -> (ANDW (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) -(Rsh16Ux8 x y) -> (ANDW (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) +(Rsh16Ux64 x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) +(Rsh16Ux32 x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) +(Rsh16Ux16 x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) +(Rsh16Ux8 x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) -(Rsh8Ux64 x y) -> (ANDB (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) -(Rsh8Ux32 x y) -> (ANDB (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) -(Rsh8Ux16 x y) -> (ANDB (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) -(Rsh8Ux8 x y) -> (ANDB (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) +(Rsh8Ux64 x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) +(Rsh8Ux32 x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) +(Rsh8Ux16 x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) +(Rsh8Ux8 x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. // Note: for small shift widths we generate 32 bits of mask even when we don't need it all. (Rsh64x64 x y) -> (SARQ x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [64]))))) (Rsh64x32 x y) -> (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [64]))))) -(Rsh64x16 x y) -> (SARQ x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) -(Rsh64x8 x y) -> (SARQ x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) +(Rsh64x16 x y) -> (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) +(Rsh64x8 x y) -> (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) (Rsh32x64 x y) -> (SARL x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [32]))))) (Rsh32x32 x y) -> (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) -(Rsh32x16 x y) -> (SARL x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) -(Rsh32x8 x y) -> (SARL x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) +(Rsh32x16 x y) -> (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) +(Rsh32x8 x y) -> (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) (Rsh16x64 x y) -> (SARW x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [16]))))) (Rsh16x32 x y) -> (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) -(Rsh16x16 x y) -> (SARW x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) -(Rsh16x8 x y) -> (SARW x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) +(Rsh16x16 x y) -> (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) +(Rsh16x8 x y) -> (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) (Rsh8x64 x y) -> (SARB x (ORQ y (NOTQ (SBBQcarrymask (CMPQconst y [8]))))) (Rsh8x32 x y) -> (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) -(Rsh8x16 x y) -> (SARB x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) -(Rsh8x8 x y) -> (SARB x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) +(Rsh8x16 x y) -> (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) +(Rsh8x8 x y) -> (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) (Less64 x y) -> (SETL (CMPQ x y)) (Less32 x y) -> (SETL (CMPL x y)) @@ -366,19 +366,19 @@ (Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 -> (REPMOVSQ dst src (MOVQconst [size/8]) mem) -(Not x) -> (XORBconst [1] x) +(Not x) -> (XORLconst [1] x) (OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr) (OffPtr [off] ptr) -> (ADDQ (MOVQconst [off]) ptr) -(Const8 [val]) -> (MOVBconst [val]) -(Const16 [val]) -> (MOVWconst [val]) +(Const8 [val]) -> (MOVLconst [val]) +(Const16 [val]) -> (MOVLconst [val]) (Const32 [val]) -> (MOVLconst [val]) (Const64 [val]) -> (MOVQconst [val]) (Const32F [val]) -> (MOVSSconst [val]) (Const64F [val]) -> (MOVSDconst [val]) (ConstNil) -> (MOVQconst [0]) -(ConstBool [b]) -> (MOVBconst [b]) +(ConstBool [b]) -> (MOVLconst [b]) (Addr {sym} base) -> (LEAQ {sym} base) @@ -439,44 +439,22 @@ (ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x) (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) (ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) -(ADDW x (MOVWconst [c])) -> (ADDWconst [c] x) -(ADDW (MOVWconst [c]) x) -> (ADDWconst [c] x) -(ADDB x (MOVBconst [c])) -> (ADDBconst [c] x) -(ADDB (MOVBconst [c]) x) -> (ADDBconst [c] x) (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c]) (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst x [c])) (SUBL x (MOVLconst [c])) -> (SUBLconst x [c]) (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst x [c])) -(SUBW x (MOVWconst [c])) -> (SUBWconst x [c]) -(SUBW (MOVWconst [c]) x) -> (NEGW (SUBWconst x [c])) -(SUBB x (MOVBconst [c])) -> (SUBBconst x [c]) -(SUBB (MOVBconst [c]) x) -> (NEGB (SUBBconst x [c])) (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x) (MULQ (MOVQconst [c]) x) && is32Bit(c) -> (MULQconst [c] x) (MULL x (MOVLconst [c])) -> (MULLconst [c] x) (MULL (MOVLconst [c]) x) -> (MULLconst [c] x) -(MULW x (MOVWconst [c])) -> (MULWconst [c] x) -(MULW (MOVWconst [c]) x) -> (MULWconst [c] x) -(MULB x (MOVBconst [c])) -> (MULBconst [c] x) -(MULB (MOVBconst [c]) x) -> (MULBconst [c] x) (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x) (ANDQ (MOVQconst [c]) x) && is32Bit(c) -> (ANDQconst [c] x) (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x) (ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x) -(ANDW x (MOVLconst [c])) -> (ANDWconst [c] x) -(ANDW (MOVLconst [c]) x) -> (ANDWconst [c] x) -(ANDW x (MOVWconst [c])) -> (ANDWconst [c] x) -(ANDW (MOVWconst [c]) x) -> (ANDWconst [c] x) -(ANDB x (MOVLconst [c])) -> (ANDBconst [c] x) -(ANDB (MOVLconst [c]) x) -> (ANDBconst [c] x) -(ANDB x (MOVBconst [c])) -> (ANDBconst [c] x) -(ANDB (MOVBconst [c]) x) -> (ANDBconst [c] x) - -(ANDBconst [c] (ANDBconst [d] x)) -> (ANDBconst [c & d] x) -(ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x) + (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x) (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x) @@ -484,108 +462,64 @@ (ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x) (ORL x (MOVLconst [c])) -> (ORLconst [c] x) (ORL (MOVLconst [c]) x) -> (ORLconst [c] x) -(ORW x (MOVWconst [c])) -> (ORWconst [c] x) -(ORW (MOVWconst [c]) x) -> (ORWconst [c] x) -(ORB x (MOVBconst [c])) -> (ORBconst [c] x) -(ORB (MOVBconst [c]) x) -> (ORBconst [c] x) (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x) (XORQ (MOVQconst [c]) x) && is32Bit(c) -> (XORQconst [c] x) (XORL x (MOVLconst [c])) -> (XORLconst [c] x) (XORL (MOVLconst [c]) x) -> (XORLconst [c] x) -(XORW x (MOVWconst [c])) -> (XORWconst [c] x) -(XORW (MOVWconst [c]) x) -> (XORWconst [c] x) -(XORB x (MOVBconst [c])) -> (XORBconst [c] x) -(XORB (MOVBconst [c]) x) -> (XORBconst [c] x) (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x) (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x) -(SHLQ x (MOVWconst [c])) -> (SHLQconst [c&63] x) -(SHLQ x (MOVBconst [c])) -> (SHLQconst [c&63] x) (SHLL x (MOVQconst [c])) -> (SHLLconst [c&31] x) (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x) -(SHLL x (MOVWconst [c])) -> (SHLLconst [c&31] x) -(SHLL x (MOVBconst [c])) -> (SHLLconst [c&31] x) - -(SHLW x (MOVQconst [c])) -> (SHLWconst [c&31] x) -(SHLW x (MOVLconst [c])) -> (SHLWconst [c&31] x) -(SHLW x (MOVWconst [c])) -> (SHLWconst [c&31] x) -(SHLW x (MOVBconst [c])) -> (SHLWconst [c&31] x) - -(SHLB x (MOVQconst [c])) -> (SHLBconst [c&31] x) -(SHLB x (MOVLconst [c])) -> (SHLBconst [c&31] x) -(SHLB x (MOVWconst [c])) -> (SHLBconst [c&31] x) -(SHLB x (MOVBconst [c])) -> (SHLBconst [c&31] x) (SHRQ x (MOVQconst [c])) -> (SHRQconst [c&63] x) (SHRQ x (MOVLconst [c])) -> (SHRQconst [c&63] x) -(SHRQ x (MOVWconst [c])) -> (SHRQconst [c&63] x) -(SHRQ x (MOVBconst [c])) -> (SHRQconst [c&63] x) (SHRL x (MOVQconst [c])) -> (SHRLconst [c&31] x) (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x) -(SHRL x (MOVWconst [c])) -> (SHRLconst [c&31] x) -(SHRL x (MOVBconst [c])) -> (SHRLconst [c&31] x) (SHRW x (MOVQconst [c])) -> (SHRWconst [c&31] x) (SHRW x (MOVLconst [c])) -> (SHRWconst [c&31] x) -(SHRW x (MOVWconst [c])) -> (SHRWconst [c&31] x) -(SHRW x (MOVBconst [c])) -> (SHRWconst [c&31] x) (SHRB x (MOVQconst [c])) -> (SHRBconst [c&31] x) (SHRB x (MOVLconst [c])) -> (SHRBconst [c&31] x) -(SHRB x (MOVWconst [c])) -> (SHRBconst [c&31] x) -(SHRB x (MOVBconst [c])) -> (SHRBconst [c&31] x) (SARQ x (MOVQconst [c])) -> (SARQconst [c&63] x) (SARQ x (MOVLconst [c])) -> (SARQconst [c&63] x) -(SARQ x (MOVWconst [c])) -> (SARQconst [c&63] x) -(SARQ x (MOVBconst [c])) -> (SARQconst [c&63] x) (SARL x (MOVQconst [c])) -> (SARLconst [c&31] x) (SARL x (MOVLconst [c])) -> (SARLconst [c&31] x) -(SARL x (MOVWconst [c])) -> (SARLconst [c&31] x) -(SARL x (MOVBconst [c])) -> (SARLconst [c&31] x) (SARW x (MOVQconst [c])) -> (SARWconst [c&31] x) (SARW x (MOVLconst [c])) -> (SARWconst [c&31] x) -(SARW x (MOVWconst [c])) -> (SARWconst [c&31] x) -(SARW x (MOVBconst [c])) -> (SARWconst [c&31] x) (SARB x (MOVQconst [c])) -> (SARBconst [c&31] x) (SARB x (MOVLconst [c])) -> (SARBconst [c&31] x) -(SARB x (MOVWconst [c])) -> (SARBconst [c&31] x) -(SARB x (MOVBconst [c])) -> (SARBconst [c&31] x) -(SARB x (ANDBconst [31] y)) -> (SARB x y) -(SARW x (ANDWconst [31] y)) -> (SARW x y) (SARL x (ANDLconst [31] y)) -> (SARL x y) (SARQ x (ANDQconst [63] y)) -> (SARQ x y) -(SHLB x (ANDBconst [31] y)) -> (SHLB x y) -(SHLW x (ANDWconst [31] y)) -> (SHLW x y) (SHLL x (ANDLconst [31] y)) -> (SHLL x y) (SHLQ x (ANDQconst [63] y)) -> (SHLQ x y) -(SHRB x (ANDBconst [31] y)) -> (SHRB x y) -(SHRW x (ANDWconst [31] y)) -> (SHRW x y) (SHRL x (ANDLconst [31] y)) -> (SHRL x y) (SHRQ x (ANDQconst [63] y)) -> (SHRQ x y) // Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) // because the x86 instructions are defined to use all 5 bits of the shift even // for the small shifts. I don't think we'll ever generate a weird shift (e.g. -// (SHLW x (MOVWconst [24])), but just in case. +// (SHRW x (MOVLconst [24])), but just in case. (CMPQ x (MOVQconst [c])) && is32Bit(c) -> (CMPQconst x [c]) (CMPQ (MOVQconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPQconst x [c])) (CMPL x (MOVLconst [c])) -> (CMPLconst x [c]) (CMPL (MOVLconst [c]) x) -> (InvertFlags (CMPLconst x [c])) -(CMPW x (MOVWconst [c])) -> (CMPWconst x [c]) -(CMPW (MOVWconst [c]) x) -> (InvertFlags (CMPWconst x [c])) -(CMPB x (MOVBconst [c])) -> (CMPBconst x [c]) -(CMPB (MOVBconst [c]) x) -> (InvertFlags (CMPBconst x [c])) +(CMPW x (MOVLconst [c])) -> (CMPWconst x [int64(int16(c))]) +(CMPW (MOVLconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int16(c))])) +(CMPB x (MOVLconst [c])) -> (CMPBconst x [int64(int8(c))]) +(CMPB (MOVLconst [c]) x) -> (InvertFlags (CMPBconst x [int64(int8(c))])) // Using MOVBQZX instead of ANDQ is cheaper. (ANDQconst [0xFF] x) -> (MOVBQZX x) @@ -709,12 +643,12 @@ (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x // Fold extensions and ANDs together. -(MOVBQZX (ANDBconst [c] x)) -> (ANDQconst [c & 0xff] x) -(MOVWQZX (ANDWconst [c] x)) -> (ANDQconst [c & 0xffff] x) -(MOVLQZX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x) -(MOVBQSX (ANDBconst [c] x)) && c & 0x80 == 0 -> (ANDQconst [c & 0x7f] x) -(MOVWQSX (ANDWconst [c] x)) && c & 0x8000 == 0 -> (ANDQconst [c & 0x7fff] x) -(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDQconst [c & 0x7fffffff] x) +(MOVBQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xff] x) +(MOVWQZX (ANDLconst [c] x)) -> (ANDLconst [c & 0xffff] x) +(MOVLQZX (ANDLconst [c] x)) -> (ANDLconst [c] x) +(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 -> (ANDLconst [c & 0x7f] x) +(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 -> (ANDLconst [c & 0x7fff] x) +(MOVLQSX (ANDLconst [c] x)) && c & 0x80000000 == 0 -> (ANDLconst [c & 0x7fffffff] x) // Don't extend before storing (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) -> (MOVLstore [off] {sym} ptr x mem) @@ -750,9 +684,9 @@ (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) -(MOVWstore [off] {sym} ptr (MOVWconst [c]) mem) && validOff(off) -> +(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) -(MOVBstore [off] {sym} ptr (MOVBconst [c]) mem) && validOff(off) -> +(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) && validOff(off) -> (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) // Fold address offsets into constant stores. @@ -1086,16 +1020,16 @@ (CMPLconst (MOVLconst [x]) [y]) && int32(x)uint32(y) -> (FlagLT_UGT) (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x) (FlagGT_ULT) (CMPLconst (MOVLconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) -(CMPWconst (MOVWconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ) -(CMPWconst (MOVWconst [x]) [y]) && int16(x) (FlagLT_ULT) -(CMPWconst (MOVWconst [x]) [y]) && int16(x)uint16(y) -> (FlagLT_UGT) -(CMPWconst (MOVWconst [x]) [y]) && int16(x)>int16(y) && uint16(x) (FlagGT_ULT) -(CMPWconst (MOVWconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT) -(CMPBconst (MOVBconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ) -(CMPBconst (MOVBconst [x]) [y]) && int8(x) (FlagLT_ULT) -(CMPBconst (MOVBconst [x]) [y]) && int8(x)uint8(y) -> (FlagLT_UGT) -(CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x) (FlagGT_ULT) -(CMPBconst (MOVBconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)==int16(y) -> (FlagEQ) +(CMPWconst (MOVLconst [x]) [y]) && int16(x) (FlagLT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)uint16(y) -> (FlagLT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x) (FlagGT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>int16(y) && uint16(x)>uint16(y) -> (FlagGT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)==int8(y) -> (FlagEQ) +(CMPBconst (MOVLconst [x]) [y]) && int8(x) (FlagLT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)uint8(y) -> (FlagLT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x) (FlagGT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>int8(y) && uint8(x)>uint8(y) -> (FlagGT_UGT) // Other known comparisons. (CMPQconst (MOVBQZX _) [c]) && 0xFF < c -> (FlagLT_ULT) @@ -1105,8 +1039,8 @@ (CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1< (FlagLT_ULT) (CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n -> (FlagLT_ULT) (CMPLconst (ANDLconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT) -(CMPWconst (ANDWconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) -(CMPBconst (ANDBconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT) +(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < int16(n) -> (FlagLT_ULT) +(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < int8(n) -> (FlagLT_ULT) // TODO: DIVxU also. // Absorb flag constants into SBB ops. @@ -1183,181 +1117,140 @@ (UGE (FlagGT_UGT) yes no) -> (First nil yes no) // Absorb flag constants into SETxx ops. -(SETEQ (FlagEQ)) -> (MOVBconst [1]) -(SETEQ (FlagLT_ULT)) -> (MOVBconst [0]) -(SETEQ (FlagLT_UGT)) -> (MOVBconst [0]) -(SETEQ (FlagGT_ULT)) -> (MOVBconst [0]) -(SETEQ (FlagGT_UGT)) -> (MOVBconst [0]) - -(SETNE (FlagEQ)) -> (MOVBconst [0]) -(SETNE (FlagLT_ULT)) -> (MOVBconst [1]) -(SETNE (FlagLT_UGT)) -> (MOVBconst [1]) -(SETNE (FlagGT_ULT)) -> (MOVBconst [1]) -(SETNE (FlagGT_UGT)) -> (MOVBconst [1]) - -(SETL (FlagEQ)) -> (MOVBconst [0]) -(SETL (FlagLT_ULT)) -> (MOVBconst [1]) -(SETL (FlagLT_UGT)) -> (MOVBconst [1]) -(SETL (FlagGT_ULT)) -> (MOVBconst [0]) -(SETL (FlagGT_UGT)) -> (MOVBconst [0]) - -(SETLE (FlagEQ)) -> (MOVBconst [1]) -(SETLE (FlagLT_ULT)) -> (MOVBconst [1]) -(SETLE (FlagLT_UGT)) -> (MOVBconst [1]) -(SETLE (FlagGT_ULT)) -> (MOVBconst [0]) -(SETLE (FlagGT_UGT)) -> (MOVBconst [0]) - -(SETG (FlagEQ)) -> (MOVBconst [0]) -(SETG (FlagLT_ULT)) -> (MOVBconst [0]) -(SETG (FlagLT_UGT)) -> (MOVBconst [0]) -(SETG (FlagGT_ULT)) -> (MOVBconst [1]) -(SETG (FlagGT_UGT)) -> (MOVBconst [1]) - -(SETGE (FlagEQ)) -> (MOVBconst [1]) -(SETGE (FlagLT_ULT)) -> (MOVBconst [0]) -(SETGE (FlagLT_UGT)) -> (MOVBconst [0]) -(SETGE (FlagGT_ULT)) -> (MOVBconst [1]) -(SETGE (FlagGT_UGT)) -> (MOVBconst [1]) - -(SETB (FlagEQ)) -> (MOVBconst [0]) -(SETB (FlagLT_ULT)) -> (MOVBconst [1]) -(SETB (FlagLT_UGT)) -> (MOVBconst [0]) -(SETB (FlagGT_ULT)) -> (MOVBconst [1]) -(SETB (FlagGT_UGT)) -> (MOVBconst [0]) - -(SETBE (FlagEQ)) -> (MOVBconst [1]) -(SETBE (FlagLT_ULT)) -> (MOVBconst [1]) -(SETBE (FlagLT_UGT)) -> (MOVBconst [0]) -(SETBE (FlagGT_ULT)) -> (MOVBconst [1]) -(SETBE (FlagGT_UGT)) -> (MOVBconst [0]) - -(SETA (FlagEQ)) -> (MOVBconst [0]) -(SETA (FlagLT_ULT)) -> (MOVBconst [0]) -(SETA (FlagLT_UGT)) -> (MOVBconst [1]) -(SETA (FlagGT_ULT)) -> (MOVBconst [0]) -(SETA (FlagGT_UGT)) -> (MOVBconst [1]) - -(SETAE (FlagEQ)) -> (MOVBconst [1]) -(SETAE (FlagLT_ULT)) -> (MOVBconst [0]) -(SETAE (FlagLT_UGT)) -> (MOVBconst [1]) -(SETAE (FlagGT_ULT)) -> (MOVBconst [0]) -(SETAE (FlagGT_UGT)) -> (MOVBconst [1]) +(SETEQ (FlagEQ)) -> (MOVLconst [1]) +(SETEQ (FlagLT_ULT)) -> (MOVLconst [0]) +(SETEQ (FlagLT_UGT)) -> (MOVLconst [0]) +(SETEQ (FlagGT_ULT)) -> (MOVLconst [0]) +(SETEQ (FlagGT_UGT)) -> (MOVLconst [0]) + +(SETNE (FlagEQ)) -> (MOVLconst [0]) +(SETNE (FlagLT_ULT)) -> (MOVLconst [1]) +(SETNE (FlagLT_UGT)) -> (MOVLconst [1]) +(SETNE (FlagGT_ULT)) -> (MOVLconst [1]) +(SETNE (FlagGT_UGT)) -> (MOVLconst [1]) + +(SETL (FlagEQ)) -> (MOVLconst [0]) +(SETL (FlagLT_ULT)) -> (MOVLconst [1]) +(SETL (FlagLT_UGT)) -> (MOVLconst [1]) +(SETL (FlagGT_ULT)) -> (MOVLconst [0]) +(SETL (FlagGT_UGT)) -> (MOVLconst [0]) + +(SETLE (FlagEQ)) -> (MOVLconst [1]) +(SETLE (FlagLT_ULT)) -> (MOVLconst [1]) +(SETLE (FlagLT_UGT)) -> (MOVLconst [1]) +(SETLE (FlagGT_ULT)) -> (MOVLconst [0]) +(SETLE (FlagGT_UGT)) -> (MOVLconst [0]) + +(SETG (FlagEQ)) -> (MOVLconst [0]) +(SETG (FlagLT_ULT)) -> (MOVLconst [0]) +(SETG (FlagLT_UGT)) -> (MOVLconst [0]) +(SETG (FlagGT_ULT)) -> (MOVLconst [1]) +(SETG (FlagGT_UGT)) -> (MOVLconst [1]) + +(SETGE (FlagEQ)) -> (MOVLconst [1]) +(SETGE (FlagLT_ULT)) -> (MOVLconst [0]) +(SETGE (FlagLT_UGT)) -> (MOVLconst [0]) +(SETGE (FlagGT_ULT)) -> (MOVLconst [1]) +(SETGE (FlagGT_UGT)) -> (MOVLconst [1]) + +(SETB (FlagEQ)) -> (MOVLconst [0]) +(SETB (FlagLT_ULT)) -> (MOVLconst [1]) +(SETB (FlagLT_UGT)) -> (MOVLconst [0]) +(SETB (FlagGT_ULT)) -> (MOVLconst [1]) +(SETB (FlagGT_UGT)) -> (MOVLconst [0]) + +(SETBE (FlagEQ)) -> (MOVLconst [1]) +(SETBE (FlagLT_ULT)) -> (MOVLconst [1]) +(SETBE (FlagLT_UGT)) -> (MOVLconst [0]) +(SETBE (FlagGT_ULT)) -> (MOVLconst [1]) +(SETBE (FlagGT_UGT)) -> (MOVLconst [0]) + +(SETA (FlagEQ)) -> (MOVLconst [0]) +(SETA (FlagLT_ULT)) -> (MOVLconst [0]) +(SETA (FlagLT_UGT)) -> (MOVLconst [1]) +(SETA (FlagGT_ULT)) -> (MOVLconst [0]) +(SETA (FlagGT_UGT)) -> (MOVLconst [1]) + +(SETAE (FlagEQ)) -> (MOVLconst [1]) +(SETAE (FlagLT_ULT)) -> (MOVLconst [0]) +(SETAE (FlagLT_UGT)) -> (MOVLconst [1]) +(SETAE (FlagGT_ULT)) -> (MOVLconst [0]) +(SETAE (FlagGT_UGT)) -> (MOVLconst [1]) // Remove redundant *const ops (ADDQconst [0] x) -> x (ADDLconst [c] x) && int32(c)==0 -> x -(ADDWconst [c] x) && int16(c)==0 -> x -(ADDBconst [c] x) && int8(c)==0 -> x (SUBQconst [0] x) -> x (SUBLconst [c] x) && int32(c) == 0 -> x -(SUBWconst [c] x) && int16(c) == 0 -> x -(SUBBconst [c] x) && int8(c) == 0 -> x (ANDQconst [0] _) -> (MOVQconst [0]) (ANDLconst [c] _) && int32(c)==0 -> (MOVLconst [0]) -(ANDWconst [c] _) && int16(c)==0 -> (MOVWconst [0]) -(ANDBconst [c] _) && int8(c)==0 -> (MOVBconst [0]) (ANDQconst [-1] x) -> x (ANDLconst [c] x) && int32(c)==-1 -> x -(ANDWconst [c] x) && int16(c)==-1 -> x -(ANDBconst [c] x) && int8(c)==-1 -> x (ORQconst [0] x) -> x (ORLconst [c] x) && int32(c)==0 -> x -(ORWconst [c] x) && int16(c)==0 -> x -(ORBconst [c] x) && int8(c)==0 -> x (ORQconst [-1] _) -> (MOVQconst [-1]) (ORLconst [c] _) && int32(c)==-1 -> (MOVLconst [-1]) -(ORWconst [c] _) && int16(c)==-1 -> (MOVWconst [-1]) -(ORBconst [c] _) && int8(c)==-1 -> (MOVBconst [-1]) (XORQconst [0] x) -> x (XORLconst [c] x) && int32(c)==0 -> x -(XORWconst [c] x) && int16(c)==0 -> x -(XORBconst [c] x) && int8(c)==0 -> x +// TODO: since we got rid of the W/B versions, we might miss +// things like (ANDLconst [0x100] x) which were formerly +// (ANDBconst [0] x). Probably doesn't happen very often. +// If we cared, we might do: +// (ANDLconst [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0]) // Convert constant subtracts to constant adds (SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x) (SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x) -(SUBWconst [c] x) -> (ADDWconst [int64(int16(-c))] x) -(SUBBconst [c] x) -> (ADDBconst [int64(int8(-c))] x) // generic constant folding // TODO: more of this (ADDQconst [c] (MOVQconst [d])) -> (MOVQconst [c+d]) (ADDLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c+d))]) -(ADDWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c+d))]) -(ADDBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c+d))]) (ADDQconst [c] (ADDQconst [d] x)) && is32Bit(c+d) -> (ADDQconst [c+d] x) (ADDLconst [c] (ADDLconst [d] x)) -> (ADDLconst [int64(int32(c+d))] x) -(ADDWconst [c] (ADDWconst [d] x)) -> (ADDWconst [int64(int16(c+d))] x) -(ADDBconst [c] (ADDBconst [d] x)) -> (ADDBconst [int64(int8(c+d))] x) (SUBQconst (MOVQconst [d]) [c]) -> (MOVQconst [d-c]) (SUBLconst (MOVLconst [d]) [c]) -> (MOVLconst [int64(int32(d-c))]) -(SUBWconst (MOVWconst [d]) [c]) -> (MOVWconst [int64(int16(d-c))]) -(SUBBconst (MOVBconst [d]) [c]) -> (MOVBconst [int64(int8(d-c))]) (SUBQconst (SUBQconst x [d]) [c]) && is32Bit(-c-d) -> (ADDQconst [-c-d] x) (SUBLconst (SUBLconst x [d]) [c]) -> (ADDLconst [int64(int32(-c-d))] x) -(SUBWconst (SUBWconst x [d]) [c]) -> (ADDWconst [int64(int16(-c-d))] x) -(SUBBconst (SUBBconst x [d]) [c]) -> (ADDBconst [int64(int8(-c-d))] x) (SARQconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARLconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARWconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (SARBconst [c] (MOVQconst [d])) -> (MOVQconst [d>>uint64(c)]) (NEGQ (MOVQconst [c])) -> (MOVQconst [-c]) (NEGL (MOVLconst [c])) -> (MOVLconst [int64(int32(-c))]) -(NEGW (MOVWconst [c])) -> (MOVWconst [int64(int16(-c))]) -(NEGB (MOVBconst [c])) -> (MOVBconst [int64(int8(-c))]) (MULQconst [c] (MOVQconst [d])) -> (MOVQconst [c*d]) (MULLconst [c] (MOVLconst [d])) -> (MOVLconst [int64(int32(c*d))]) -(MULWconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int16(c*d))]) -(MULBconst [c] (MOVBconst [d])) -> (MOVBconst [int64(int8(c*d))]) (ANDQconst [c] (MOVQconst [d])) -> (MOVQconst [c&d]) (ANDLconst [c] (MOVLconst [d])) -> (MOVLconst [c&d]) -(ANDWconst [c] (MOVWconst [d])) -> (MOVWconst [c&d]) -(ANDBconst [c] (MOVBconst [d])) -> (MOVBconst [c&d]) (ORQconst [c] (MOVQconst [d])) -> (MOVQconst [c|d]) (ORLconst [c] (MOVLconst [d])) -> (MOVLconst [c|d]) -(ORWconst [c] (MOVWconst [d])) -> (MOVWconst [c|d]) -(ORBconst [c] (MOVBconst [d])) -> (MOVBconst [c|d]) (XORQconst [c] (MOVQconst [d])) -> (MOVQconst [c^d]) (XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d]) -(XORWconst [c] (MOVWconst [d])) -> (MOVWconst [c^d]) -(XORBconst [c] (MOVBconst [d])) -> (MOVBconst [c^d]) (NOTQ (MOVQconst [c])) -> (MOVQconst [^c]) (NOTL (MOVLconst [c])) -> (MOVLconst [^c]) -(NOTW (MOVWconst [c])) -> (MOVWconst [^c]) -(NOTB (MOVBconst [c])) -> (MOVBconst [^c]) // generic simplifications // TODO: more of this (ADDQ x (NEGQ y)) -> (SUBQ x y) (ADDL x (NEGL y)) -> (SUBL x y) -(ADDW x (NEGW y)) -> (SUBW x y) -(ADDB x (NEGB y)) -> (SUBB x y) (SUBQ x x) -> (MOVQconst [0]) (SUBL x x) -> (MOVLconst [0]) -(SUBW x x) -> (MOVWconst [0]) -(SUBB x x) -> (MOVBconst [0]) (ANDQ x x) -> x (ANDL x x) -> x -(ANDW x x) -> x -(ANDB x x) -> x (ORQ x x) -> x (ORL x x) -> x -(ORW x x) -> x -(ORB x x) -> x (XORQ x x) -> (MOVQconst [0]) (XORL x x) -> (MOVLconst [0]) -(XORW x x) -> (MOVWconst [0]) -(XORB x x) -> (MOVBconst [0]) // checking AND against 0. (CMPQconst (ANDQ x y) [0]) -> (TESTQ x y) (CMPLconst (ANDL x y) [0]) -> (TESTL x y) -(CMPWconst (ANDW x y) [0]) -> (TESTW x y) -(CMPBconst (ANDB x y) [0]) -> (TESTB x y) +(CMPWconst (ANDL x y) [0]) -> (TESTW x y) +(CMPBconst (ANDL x y) [0]) -> (TESTB x y) (CMPQconst (ANDQconst [c] x) [0]) -> (TESTQconst [c] x) (CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x) -(CMPWconst (ANDWconst [c] x) [0]) -> (TESTWconst [c] x) -(CMPBconst (ANDBconst [c] x) [0]) -> (TESTBconst [c] x) +(CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x) +(CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x) // TEST %reg,%reg is shorter than CMP (CMPQconst x [0]) -> (TESTQ x x) @@ -1368,8 +1261,8 @@ // Combining byte loads into larger (unaligned) loads. // There are many ways these combinations could occur. This is // designed to match the way encoding/binary.LittleEndian does it. -(ORW x0:(MOVBload [i] {s} p mem) - s0:(SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) +(ORL x0:(MOVBload [i] {s} p mem) + s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 @@ -1459,8 +1352,8 @@ && clobber(o5) -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) -(ORW x0:(MOVBloadidx1 [i] {s} p idx mem) - s0:(SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) +(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 88bb6bc542..35eeb61941 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -190,30 +190,18 @@ func init() { // binary ops {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true}, // arg0 + arg1 {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 - {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 - {name: "ADDB", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true}, // arg0 + arg1 {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64"}, // arg0 + auxint {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32"}, // arg0 + auxint - {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int16"}, // arg0 + auxint - {name: "ADDBconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int8"}, // arg0 + auxint {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true}, // arg0 - arg1 {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1 - {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1 - {name: "SUBB", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true}, // arg0 - arg1 {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true}, // arg0 - auxint {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0 - auxint - {name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int16", resultInArg0: true}, // arg0 - auxint - {name: "SUBBconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int8", resultInArg0: true}, // arg0 - auxint {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true}, // arg0 * arg1 {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true}, // arg0 * arg1 - {name: "MULW", argLength: 2, reg: gp21, asm: "IMULW", commutative: true, resultInArg0: true}, // arg0 * arg1 - {name: "MULB", argLength: 2, reg: gp21, asm: "IMULW", commutative: true, resultInArg0: true}, // arg0 * arg1 {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true}, // arg0 * auxint {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true}, // arg0 * auxint - {name: "MULWconst", argLength: 1, reg: gp11, asm: "IMULW", aux: "Int16", resultInArg0: true}, // arg0 * auxint - {name: "MULBconst", argLength: 1, reg: gp11, asm: "IMULW", aux: "Int8", resultInArg0: true}, // arg0 * auxint {name: "HMULQ", argLength: 2, reg: gp11hmul, asm: "IMULQ"}, // (arg0 * arg1) >> width {name: "HMULL", argLength: 2, reg: gp11hmul, asm: "IMULL"}, // (arg0 * arg1) >> width @@ -242,30 +230,18 @@ func init() { {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true}, // arg0 & arg1 {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true}, // arg0 & arg1 - {name: "ANDW", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true}, // arg0 & arg1 - {name: "ANDB", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true}, // arg0 & arg1 {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true}, // arg0 & auxint {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true}, // arg0 & auxint - {name: "ANDWconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int16", resultInArg0: true}, // arg0 & auxint - {name: "ANDBconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int8", resultInArg0: true}, // arg0 & auxint {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true}, // arg0 | arg1 {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true}, // arg0 | arg1 - {name: "ORW", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true}, // arg0 | arg1 - {name: "ORB", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true}, // arg0 | arg1 {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true}, // arg0 | auxint {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true}, // arg0 | auxint - {name: "ORWconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int16", resultInArg0: true}, // arg0 | auxint - {name: "ORBconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int8", resultInArg0: true}, // arg0 | auxint {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true}, // arg0 ^ arg1 {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true}, // arg0 ^ arg1 - {name: "XORW", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true}, // arg0 ^ arg1 - {name: "XORB", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true}, // arg0 ^ arg1 {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true}, // arg0 ^ auxint {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true}, // arg0 ^ auxint - {name: "XORWconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int16", resultInArg0: true}, // arg0 ^ auxint - {name: "XORBconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int8", resultInArg0: true}, // arg0 ^ auxint {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1 @@ -290,12 +266,8 @@ func init() { {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true}, // arg0 << arg1, shift amount is mod 64 {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true}, // arg0 << arg1, shift amount is mod 32 - {name: "SHLW", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true}, // arg0 << arg1, shift amount is mod 32 - {name: "SHLB", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true}, // arg0 << arg1, shift amount is mod 32 {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int64", resultInArg0: true}, // arg0 << auxint, shift amount 0-63 {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true}, // arg0 << auxint, shift amount 0-31 - {name: "SHLWconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int16", resultInArg0: true}, // arg0 << auxint, shift amount 0-31 - {name: "SHLBconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int8", resultInArg0: true}, // arg0 << auxint, shift amount 0-31 // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount! {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true}, // unsigned arg0 >> arg1, shift amount is mod 64 @@ -324,13 +296,9 @@ func init() { // unary ops {name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true}, // -arg0 {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true}, // -arg0 - {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true}, // -arg0 - {name: "NEGB", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true}, // -arg0 {name: "NOTQ", argLength: 1, reg: gp11, asm: "NOTQ", resultInArg0: true}, // ^arg0 {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0 - {name: "NOTW", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0 - {name: "NOTB", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0 {name: "BSFQ", argLength: 1, reg: gp11, asm: "BSFQ"}, // arg0 # of low-order zeroes ; undef if zero {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL"}, // arg0 # of low-order zeroes ; undef if zero @@ -385,8 +353,6 @@ func init() { {name: "MOVLQSX", argLength: 1, reg: gp11nf, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64 {name: "MOVLQZX", argLength: 1, reg: gp11nf, asm: "MOVLQZX"}, // zero extend arg0 from int32 to int64 - {name: "MOVBconst", reg: gp01, asm: "MOVB", typ: "UInt8", aux: "Int8", rematerializeable: true}, // 8 low bits of auxint - {name: "MOVWconst", reg: gp01, asm: "MOVW", typ: "UInt16", aux: "Int16", rematerializeable: true}, // 16 low bits of auxint {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint {name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 381422adfd..70af757194 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -120,28 +120,16 @@ const ( OpAMD64MOVSDstoreidx8 OpAMD64ADDQ OpAMD64ADDL - OpAMD64ADDW - OpAMD64ADDB OpAMD64ADDQconst OpAMD64ADDLconst - OpAMD64ADDWconst - OpAMD64ADDBconst OpAMD64SUBQ OpAMD64SUBL - OpAMD64SUBW - OpAMD64SUBB OpAMD64SUBQconst OpAMD64SUBLconst - OpAMD64SUBWconst - OpAMD64SUBBconst OpAMD64MULQ OpAMD64MULL - OpAMD64MULW - OpAMD64MULB OpAMD64MULQconst OpAMD64MULLconst - OpAMD64MULWconst - OpAMD64MULBconst OpAMD64HMULQ OpAMD64HMULL OpAMD64HMULW @@ -165,28 +153,16 @@ const ( OpAMD64MODWU OpAMD64ANDQ OpAMD64ANDL - OpAMD64ANDW - OpAMD64ANDB OpAMD64ANDQconst OpAMD64ANDLconst - OpAMD64ANDWconst - OpAMD64ANDBconst OpAMD64ORQ OpAMD64ORL - OpAMD64ORW - OpAMD64ORB OpAMD64ORQconst OpAMD64ORLconst - OpAMD64ORWconst - OpAMD64ORBconst OpAMD64XORQ OpAMD64XORL - OpAMD64XORW - OpAMD64XORB OpAMD64XORQconst OpAMD64XORLconst - OpAMD64XORWconst - OpAMD64XORBconst OpAMD64CMPQ OpAMD64CMPL OpAMD64CMPW @@ -207,12 +183,8 @@ const ( OpAMD64TESTBconst OpAMD64SHLQ OpAMD64SHLL - OpAMD64SHLW - OpAMD64SHLB OpAMD64SHLQconst OpAMD64SHLLconst - OpAMD64SHLWconst - OpAMD64SHLBconst OpAMD64SHRQ OpAMD64SHRL OpAMD64SHRW @@ -235,12 +207,8 @@ const ( OpAMD64ROLBconst OpAMD64NEGQ OpAMD64NEGL - OpAMD64NEGW - OpAMD64NEGB OpAMD64NOTQ OpAMD64NOTL - OpAMD64NOTW - OpAMD64NOTB OpAMD64BSFQ OpAMD64BSFL OpAMD64BSFW @@ -280,8 +248,6 @@ const ( OpAMD64MOVWQZX OpAMD64MOVLQSX OpAMD64MOVLQZX - OpAMD64MOVBconst - OpAMD64MOVWconst OpAMD64MOVLconst OpAMD64MOVQconst OpAMD64CVTTSD2SL @@ -1002,38 +968,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADDW", - argLen: 2, - commutative: true, - asm: x86.AADDL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ADDB", - argLen: 2, - commutative: true, - asm: x86.AADDL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "ADDQconst", auxType: auxInt64, @@ -1064,36 +998,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ADDWconst", - auxType: auxInt16, - argLen: 1, - asm: x86.AADDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ADDBconst", - auxType: auxInt8, - argLen: 1, - asm: x86.AADDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "SUBQ", argLen: 2, @@ -1126,38 +1030,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "SUBW", - argLen: 2, - resultInArg0: true, - asm: x86.ASUBL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "SUBB", - argLen: 2, - resultInArg0: true, - asm: x86.ASUBL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "SUBQconst", auxType: auxInt64, @@ -1190,38 +1062,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "SUBWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.ASUBL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "SUBBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.ASUBL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "MULQ", argLen: 2, @@ -1256,40 +1096,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "MULW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AIMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "MULB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AIMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "MULQconst", auxType: auxInt64, @@ -1322,38 +1128,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "MULWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AIMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "MULBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AIMULW, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "HMULQ", argLen: 2, @@ -1704,40 +1478,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ANDW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AANDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ANDB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AANDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "ANDQconst", auxType: auxInt64, @@ -1770,38 +1510,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ANDWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AANDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ANDBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AANDL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "ORQ", argLen: 2, @@ -1836,40 +1544,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ORW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ORB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "ORQconst", auxType: auxInt64, @@ -1902,38 +1576,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ORWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "ORBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "XORQ", argLen: 2, @@ -1968,40 +1610,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "XORW", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AXORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "XORB", - argLen: 2, - commutative: true, - resultInArg0: true, - asm: x86.AXORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - {1, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "XORQconst", auxType: auxInt64, @@ -2034,38 +1642,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "XORWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.AXORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "XORBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.AXORL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "CMPQ", argLen: 2, @@ -2350,38 +1926,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "SHLW", - argLen: 2, - resultInArg0: true, - asm: x86.ASHLL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "SHLB", - argLen: 2, - resultInArg0: true, - asm: x86.ASHLL, - reg: regInfo{ - inputs: []inputInfo{ - {1, 2}, // CX - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "SHLQconst", auxType: auxInt64, @@ -2414,38 +1958,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "SHLWconst", - auxType: auxInt16, - argLen: 1, - resultInArg0: true, - asm: x86.ASHLL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "SHLBconst", - auxType: auxInt8, - argLen: 1, - resultInArg0: true, - asm: x86.ASHLL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "SHRQ", argLen: 2, @@ -2796,36 +2308,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "NEGW", - argLen: 1, - resultInArg0: true, - asm: x86.ANEGL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "NEGB", - argLen: 1, - resultInArg0: true, - asm: x86.ANEGL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "NOTQ", argLen: 1, @@ -2856,36 +2338,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "NOTW", - argLen: 1, - resultInArg0: true, - asm: x86.ANOTL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "NOTB", - argLen: 1, - resultInArg0: true, - asm: x86.ANOTL, - reg: regInfo{ - inputs: []inputInfo{ - {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - clobbers: 8589934592, // FLAGS - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "BSFQ", argLen: 1, @@ -3429,30 +2881,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "MOVBconst", - auxType: auxInt8, - argLen: 0, - rematerializeable: true, - asm: x86.AMOVB, - reg: regInfo{ - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, - { - name: "MOVWconst", - auxType: auxInt16, - argLen: 0, - rematerializeable: true, - asm: x86.AMOVW, - reg: regInfo{ - outputs: []regMask{ - 65519, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 - }, - }, - }, { name: "MOVLconst", auxType: auxInt32, diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go index 6f3f690f1e..cf8f452d12 100644 --- a/src/cmd/compile/internal/ssa/regalloc_test.go +++ b/src/cmd/compile/internal/ssa/regalloc_test.go @@ -11,8 +11,8 @@ func TestLiveControlOps(t *testing.T) { f := Fun(c, "entry", Bloc("entry", Valu("mem", OpInitMem, TypeMem, 0, nil), - Valu("x", OpAMD64MOVBconst, TypeInt8, 1, nil), - Valu("y", OpAMD64MOVBconst, TypeInt8, 2, nil), + Valu("x", OpAMD64MOVLconst, TypeInt8, 1, nil), + Valu("y", OpAMD64MOVLconst, TypeInt8, 2, nil), Valu("a", OpAMD64TESTB, TypeFlags, 0, nil, "x", "y"), Valu("b", OpAMD64TESTB, TypeFlags, 0, nil, "y", "x"), Eq("a", "if", "exit"), diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 8507959f96..e2c4240ae3 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -8,10 +8,6 @@ import "math" var _ = math.MinInt8 // in case not otherwise used func rewriteValueAMD64(v *Value, config *Config) bool { switch v.Op { - case OpAMD64ADDB: - return rewriteValueAMD64_OpAMD64ADDB(v, config) - case OpAMD64ADDBconst: - return rewriteValueAMD64_OpAMD64ADDBconst(v, config) case OpAMD64ADDL: return rewriteValueAMD64_OpAMD64ADDL(v, config) case OpAMD64ADDLconst: @@ -20,14 +16,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64ADDQ(v, config) case OpAMD64ADDQconst: return rewriteValueAMD64_OpAMD64ADDQconst(v, config) - case OpAMD64ADDW: - return rewriteValueAMD64_OpAMD64ADDW(v, config) - case OpAMD64ADDWconst: - return rewriteValueAMD64_OpAMD64ADDWconst(v, config) - case OpAMD64ANDB: - return rewriteValueAMD64_OpAMD64ANDB(v, config) - case OpAMD64ANDBconst: - return rewriteValueAMD64_OpAMD64ANDBconst(v, config) case OpAMD64ANDL: return rewriteValueAMD64_OpAMD64ANDL(v, config) case OpAMD64ANDLconst: @@ -36,10 +24,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64ANDQ(v, config) case OpAMD64ANDQconst: return rewriteValueAMD64_OpAMD64ANDQconst(v, config) - case OpAMD64ANDW: - return rewriteValueAMD64_OpAMD64ANDW(v, config) - case OpAMD64ANDWconst: - return rewriteValueAMD64_OpAMD64ANDWconst(v, config) case OpAdd16: return rewriteValueAMD64_OpAdd16(v, config) case OpAdd32: @@ -458,10 +442,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64MOVWstoreidx1(v, config) case OpAMD64MOVWstoreidx2: return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) - case OpAMD64MULB: - return rewriteValueAMD64_OpAMD64MULB(v, config) - case OpAMD64MULBconst: - return rewriteValueAMD64_OpAMD64MULBconst(v, config) case OpAMD64MULL: return rewriteValueAMD64_OpAMD64MULL(v, config) case OpAMD64MULLconst: @@ -470,10 +450,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64MULQ(v, config) case OpAMD64MULQconst: return rewriteValueAMD64_OpAMD64MULQconst(v, config) - case OpAMD64MULW: - return rewriteValueAMD64_OpAMD64MULW(v, config) - case OpAMD64MULWconst: - return rewriteValueAMD64_OpAMD64MULWconst(v, config) case OpMod16: return rewriteValueAMD64_OpMod16(v, config) case OpMod16u: @@ -504,22 +480,14 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpMul64F(v, config) case OpMul8: return rewriteValueAMD64_OpMul8(v, config) - case OpAMD64NEGB: - return rewriteValueAMD64_OpAMD64NEGB(v, config) case OpAMD64NEGL: return rewriteValueAMD64_OpAMD64NEGL(v, config) case OpAMD64NEGQ: return rewriteValueAMD64_OpAMD64NEGQ(v, config) - case OpAMD64NEGW: - return rewriteValueAMD64_OpAMD64NEGW(v, config) - case OpAMD64NOTB: - return rewriteValueAMD64_OpAMD64NOTB(v, config) case OpAMD64NOTL: return rewriteValueAMD64_OpAMD64NOTL(v, config) case OpAMD64NOTQ: return rewriteValueAMD64_OpAMD64NOTQ(v, config) - case OpAMD64NOTW: - return rewriteValueAMD64_OpAMD64NOTW(v, config) case OpNeg16: return rewriteValueAMD64_OpNeg16(v, config) case OpNeg32: @@ -550,10 +518,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpNilCheck(v, config) case OpNot: return rewriteValueAMD64_OpNot(v, config) - case OpAMD64ORB: - return rewriteValueAMD64_OpAMD64ORB(v, config) - case OpAMD64ORBconst: - return rewriteValueAMD64_OpAMD64ORBconst(v, config) case OpAMD64ORL: return rewriteValueAMD64_OpAMD64ORL(v, config) case OpAMD64ORLconst: @@ -562,10 +526,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64ORQ(v, config) case OpAMD64ORQconst: return rewriteValueAMD64_OpAMD64ORQconst(v, config) - case OpAMD64ORW: - return rewriteValueAMD64_OpAMD64ORW(v, config) - case OpAMD64ORWconst: - return rewriteValueAMD64_OpAMD64ORWconst(v, config) case OpOffPtr: return rewriteValueAMD64_OpOffPtr(v, config) case OpOr16: @@ -680,14 +640,10 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64SETLE(v, config) case OpAMD64SETNE: return rewriteValueAMD64_OpAMD64SETNE(v, config) - case OpAMD64SHLB: - return rewriteValueAMD64_OpAMD64SHLB(v, config) case OpAMD64SHLL: return rewriteValueAMD64_OpAMD64SHLL(v, config) case OpAMD64SHLQ: return rewriteValueAMD64_OpAMD64SHLQ(v, config) - case OpAMD64SHLW: - return rewriteValueAMD64_OpAMD64SHLW(v, config) case OpAMD64SHRB: return rewriteValueAMD64_OpAMD64SHRB(v, config) case OpAMD64SHRL: @@ -696,10 +652,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64SHRQ(v, config) case OpAMD64SHRW: return rewriteValueAMD64_OpAMD64SHRW(v, config) - case OpAMD64SUBB: - return rewriteValueAMD64_OpAMD64SUBB(v, config) - case OpAMD64SUBBconst: - return rewriteValueAMD64_OpAMD64SUBBconst(v, config) case OpAMD64SUBL: return rewriteValueAMD64_OpAMD64SUBL(v, config) case OpAMD64SUBLconst: @@ -708,10 +660,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64SUBQ(v, config) case OpAMD64SUBQconst: return rewriteValueAMD64_OpAMD64SUBQconst(v, config) - case OpAMD64SUBW: - return rewriteValueAMD64_OpAMD64SUBW(v, config) - case OpAMD64SUBWconst: - return rewriteValueAMD64_OpAMD64SUBWconst(v, config) case OpSignExt16to32: return rewriteValueAMD64_OpSignExt16to32(v, config) case OpSignExt16to64: @@ -756,10 +704,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpTrunc64to32(v, config) case OpTrunc64to8: return rewriteValueAMD64_OpTrunc64to8(v, config) - case OpAMD64XORB: - return rewriteValueAMD64_OpAMD64XORB(v, config) - case OpAMD64XORBconst: - return rewriteValueAMD64_OpAMD64XORBconst(v, config) case OpAMD64XORL: return rewriteValueAMD64_OpAMD64XORL(v, config) case OpAMD64XORLconst: @@ -768,10 +712,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAMD64XORQ(v, config) case OpAMD64XORQconst: return rewriteValueAMD64_OpAMD64XORQconst(v, config) - case OpAMD64XORW: - return rewriteValueAMD64_OpAMD64XORW(v, config) - case OpAMD64XORWconst: - return rewriteValueAMD64_OpAMD64XORWconst(v, config) case OpXor16: return rewriteValueAMD64_OpXor16(v, config) case OpXor32: @@ -797,105 +737,6 @@ func rewriteValueAMD64(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64ADDB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ADDB x (MOVBconst [c])) - // cond: - // result: (ADDBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ADDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDB (MOVBconst [c]) x) - // cond: - // result: (ADDBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ADDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDB x (NEGB y)) - // cond: - // result: (SUBB x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64NEGB { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SUBB) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ADDBconst [c] x) - // cond: int8(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ADDBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [int64(int8(c+d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = int64(int8(c + d)) - return true - } - // match: (ADDBconst [c] (ADDBconst [d] x)) - // cond: - // result: (ADDBconst [int64(int8(c+d))] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ADDBconst) - v.AuxInt = int64(int8(c + d)) - v.AddArg(x) - return true - } - return false -} func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { b := v.Block _ = b @@ -1418,244 +1259,6 @@ func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64ADDW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ADDW x (MOVWconst [c])) - // cond: - // result: (ADDWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ADDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDW (MOVWconst [c]) x) - // cond: - // result: (ADDWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ADDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ADDW x (NEGW y)) - // cond: - // result: (SUBW x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64NEGW { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SUBW) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ADDWconst [c] x) - // cond: int16(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ADDWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [int64(int16(c+d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = int64(int16(c + d)) - return true - } - // match: (ADDWconst [c] (ADDWconst [d] x)) - // cond: - // result: (ADDWconst [int64(int16(c+d))] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ADDWconst) - v.AuxInt = int64(int16(c + d)) - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ANDB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ANDB x (MOVLconst [c])) - // cond: - // result: (ANDBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ANDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDB (MOVLconst [c]) x) - // cond: - // result: (ANDBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ANDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDB x (MOVBconst [c])) - // cond: - // result: (ANDBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ANDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDB (MOVBconst [c]) x) - // cond: - // result: (ANDBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ANDBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDB x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ANDBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ANDBconst [c] (ANDBconst [d] x)) - // cond: - // result: (ANDBconst [c & d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ANDBconst) - v.AuxInt = c & d - v.AddArg(x) - return true - } - // match: (ANDBconst [c] _) - // cond: int8(c)==0 - // result: (MOVBconst [0]) - for { - c := v.AuxInt - if !(int8(c) == 0) { - break - } - v.reset(OpAMD64MOVBconst) - v.AuxInt = 0 - return true - } - // match: (ANDBconst [c] x) - // cond: int8(c)==-1 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == -1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ANDBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [c&d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = c & d - return true - } - return false -} func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { b := v.Block _ = b @@ -1914,155 +1517,16 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64ANDW(v *Value, config *Config) bool { +func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { b := v.Block _ = b - // match: (ANDW x (MOVLconst [c])) + // match: (Add16 x y) // cond: - // result: (ANDWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ANDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDW (MOVLconst [c]) x) - // cond: - // result: (ANDWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ANDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDW x (MOVWconst [c])) - // cond: - // result: (ANDWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ANDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDW (MOVWconst [c]) x) - // cond: - // result: (ANDWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ANDWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ANDW x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ANDWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ANDWconst [c] (ANDWconst [d] x)) - // cond: - // result: (ANDWconst [c & d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ANDWconst) - v.AuxInt = c & d - v.AddArg(x) - return true - } - // match: (ANDWconst [c] _) - // cond: int16(c)==0 - // result: (MOVWconst [0]) - for { - c := v.AuxInt - if !(int16(c) == 0) { - break - } - v.reset(OpAMD64MOVWconst) - v.AuxInt = 0 - return true - } - // match: (ANDWconst [c] x) - // cond: int16(c)==-1 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == -1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ANDWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [c&d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = c & d - return true - } - return false -} -func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Add16 x y) - // cond: - // result: (ADDW x y) + // result: (ADDL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ADDW) + v.reset(OpAMD64ADDL) v.AddArg(x) v.AddArg(y) return true @@ -2138,11 +1602,11 @@ func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { _ = b // match: (Add8 x y) // cond: - // result: (ADDB x y) + // result: (ADDL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ADDB) + v.reset(OpAMD64ADDL) v.AddArg(x) v.AddArg(y) return true @@ -2186,11 +1650,11 @@ func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { _ = b // match: (And16 x y) // cond: - // result: (ANDW x y) + // result: (ANDL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v.AddArg(x) v.AddArg(y) return true @@ -2234,11 +1698,11 @@ func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { _ = b // match: (And8 x y) // cond: - // result: (ANDB x y) + // result: (ANDL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v.AddArg(x) v.AddArg(y) return true @@ -2565,27 +2029,27 @@ func rewriteValueAMD64_OpAMD64CMOVWEQconst(v *Value, config *Config) bool { func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { b := v.Block _ = b - // match: (CMPB x (MOVBconst [c])) + // match: (CMPB x (MOVLconst [c])) // cond: - // result: (CMPBconst x [c]) + // result: (CMPBconst x [int64(int8(c))]) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt v.reset(OpAMD64CMPBconst) v.AddArg(x) - v.AuxInt = c + v.AuxInt = int64(int8(c)) return true } - // match: (CMPB (MOVBconst [c]) x) + // match: (CMPB (MOVLconst [c]) x) // cond: - // result: (InvertFlags (CMPBconst x [c])) + // result: (InvertFlags (CMPBconst x [int64(int8(c))])) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } c := v_0.AuxInt @@ -2593,7 +2057,7 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) v0.AddArg(x) - v0.AuxInt = c + v0.AuxInt = int64(int8(c)) v.AddArg(v0) return true } @@ -2602,12 +2066,12 @@ func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { b := v.Block _ = b - // match: (CMPBconst (MOVBconst [x]) [y]) + // match: (CMPBconst (MOVLconst [x]) [y]) // cond: int8(x)==int8(y) // result: (FlagEQ) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -2618,12 +2082,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagEQ) return true } - // match: (CMPBconst (MOVBconst [x]) [y]) + // match: (CMPBconst (MOVLconst [x]) [y]) // cond: int8(x)uint8(y) // result: (FlagLT_UGT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -2650,12 +2114,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagLT_UGT) return true } - // match: (CMPBconst (MOVBconst [x]) [y]) + // match: (CMPBconst (MOVLconst [x]) [y]) // cond: int8(x)>int8(y) && uint8(x)int8(y) && uint8(x)>uint8(y) // result: (FlagGT_UGT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -2682,12 +2146,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagGT_UGT) return true } - // match: (CMPBconst (ANDBconst _ [m]) [n]) + // match: (CMPBconst (ANDLconst _ [m]) [n]) // cond: 0 <= int8(m) && int8(m) < int8(n) // result: (FlagLT_ULT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { + if v_0.Op != OpAMD64ANDLconst { break } m := v_0.AuxInt @@ -2698,12 +2162,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagLT_ULT) return true } - // match: (CMPBconst (ANDB x y) [0]) + // match: (CMPBconst (ANDL x y) [0]) // cond: // result: (TESTB x y) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDB { + if v_0.Op != OpAMD64ANDL { break } x := v_0.Args[0] @@ -2716,12 +2180,12 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { v.AddArg(y) return true } - // match: (CMPBconst (ANDBconst [c] x) [0]) + // match: (CMPBconst (ANDLconst [c] x) [0]) // cond: - // result: (TESTBconst [c] x) + // result: (TESTBconst [int64(int8(c))] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt @@ -2730,7 +2194,7 @@ func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { break } v.reset(OpAMD64TESTBconst) - v.AuxInt = c + v.AuxInt = int64(int8(c)) v.AddArg(x) return true } @@ -3209,27 +2673,27 @@ func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { b := v.Block _ = b - // match: (CMPW x (MOVWconst [c])) + // match: (CMPW x (MOVLconst [c])) // cond: - // result: (CMPWconst x [c]) + // result: (CMPWconst x [int64(int16(c))]) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt v.reset(OpAMD64CMPWconst) v.AddArg(x) - v.AuxInt = c + v.AuxInt = int64(int16(c)) return true } - // match: (CMPW (MOVWconst [c]) x) + // match: (CMPW (MOVLconst [c]) x) // cond: - // result: (InvertFlags (CMPWconst x [c])) + // result: (InvertFlags (CMPWconst x [int64(int16(c))])) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { + if v_0.Op != OpAMD64MOVLconst { break } c := v_0.AuxInt @@ -3237,7 +2701,7 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { v.reset(OpAMD64InvertFlags) v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) v0.AddArg(x) - v0.AuxInt = c + v0.AuxInt = int64(int16(c)) v.AddArg(v0) return true } @@ -3246,12 +2710,12 @@ func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { b := v.Block _ = b - // match: (CMPWconst (MOVWconst [x]) [y]) + // match: (CMPWconst (MOVLconst [x]) [y]) // cond: int16(x)==int16(y) // result: (FlagEQ) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -3262,12 +2726,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagEQ) return true } - // match: (CMPWconst (MOVWconst [x]) [y]) + // match: (CMPWconst (MOVLconst [x]) [y]) // cond: int16(x)uint16(y) // result: (FlagLT_UGT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -3294,12 +2758,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagLT_UGT) return true } - // match: (CMPWconst (MOVWconst [x]) [y]) + // match: (CMPWconst (MOVLconst [x]) [y]) // cond: int16(x)>int16(y) && uint16(x)int16(y) && uint16(x)>uint16(y) // result: (FlagGT_UGT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { + if v_0.Op != OpAMD64MOVLconst { break } x := v_0.AuxInt @@ -3326,12 +2790,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagGT_UGT) return true } - // match: (CMPWconst (ANDWconst _ [m]) [n]) + // match: (CMPWconst (ANDLconst _ [m]) [n]) // cond: 0 <= int16(m) && int16(m) < int16(n) // result: (FlagLT_ULT) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { + if v_0.Op != OpAMD64ANDLconst { break } m := v_0.AuxInt @@ -3342,12 +2806,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.reset(OpAMD64FlagLT_ULT) return true } - // match: (CMPWconst (ANDW x y) [0]) + // match: (CMPWconst (ANDL x y) [0]) // cond: // result: (TESTW x y) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDW { + if v_0.Op != OpAMD64ANDL { break } x := v_0.Args[0] @@ -3360,12 +2824,12 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { v.AddArg(y) return true } - // match: (CMPWconst (ANDWconst [c] x) [0]) + // match: (CMPWconst (ANDLconst [c] x) [0]) // cond: - // result: (TESTWconst [c] x) + // result: (TESTWconst [int64(int16(c))] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt @@ -3374,7 +2838,7 @@ func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { break } v.reset(OpAMD64TESTWconst) - v.AuxInt = c + v.AuxInt = int64(int16(c)) v.AddArg(x) return true } @@ -3418,10 +2882,10 @@ func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { _ = b // match: (Com16 x) // cond: - // result: (NOTW x) + // result: (NOTL x) for { x := v.Args[0] - v.reset(OpAMD64NOTW) + v.reset(OpAMD64NOTL) v.AddArg(x) return true } @@ -3460,10 +2924,10 @@ func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { _ = b // match: (Com8 x) // cond: - // result: (NOTB x) + // result: (NOTL x) for { x := v.Args[0] - v.reset(OpAMD64NOTB) + v.reset(OpAMD64NOTL) v.AddArg(x) return true } @@ -3474,10 +2938,10 @@ func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { _ = b // match: (Const16 [val]) // cond: - // result: (MOVWconst [val]) + // result: (MOVLconst [val]) for { val := v.AuxInt - v.reset(OpAMD64MOVWconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = val return true } @@ -3544,10 +3008,10 @@ func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { _ = b // match: (Const8 [val]) // cond: - // result: (MOVBconst [val]) + // result: (MOVLconst [val]) for { val := v.AuxInt - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = val return true } @@ -3558,10 +3022,10 @@ func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { _ = b // match: (ConstBool [b]) // cond: - // result: (MOVBconst [b]) + // result: (MOVLconst [b]) for { b := v.AuxInt - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = b return true } @@ -5955,20 +5419,20 @@ func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { _ = b // match: (Lsh16x16 x y) // cond: - // result: (ANDW (SHLW x y) (SBBLcarrymask (CMPWconst y [16]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) - v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 16 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -5980,20 +5444,20 @@ func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { _ = b // match: (Lsh16x32 x y) // cond: - // result: (ANDW (SHLW x y) (SBBLcarrymask (CMPLconst y [16]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) - v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 16 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6005,20 +5469,20 @@ func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { _ = b // match: (Lsh16x64 x y) // cond: - // result: (ANDW (SHLW x y) (SBBLcarrymask (CMPQconst y [16]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) - v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 16 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6030,20 +5494,20 @@ func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { _ = b // match: (Lsh16x8 x y) // cond: - // result: (ANDW (SHLW x y) (SBBLcarrymask (CMPBconst y [16]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) - v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 16 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6255,20 +5719,20 @@ func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { _ = b // match: (Lsh8x16 x y) // cond: - // result: (ANDB (SHLB x y) (SBBLcarrymask (CMPWconst y [8]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) - v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 8 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6280,20 +5744,20 @@ func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { _ = b // match: (Lsh8x32 x y) // cond: - // result: (ANDB (SHLB x y) (SBBLcarrymask (CMPLconst y [8]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) - v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 8 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6305,20 +5769,20 @@ func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { _ = b // match: (Lsh8x64 x y) // cond: - // result: (ANDB (SHLB x y) (SBBLcarrymask (CMPQconst y [8]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) - v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 8 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6330,20 +5794,20 @@ func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { _ = b // match: (Lsh8x8 x y) // cond: - // result: (ANDB (SHLB x y) (SBBLcarrymask (CMPBconst y [8]))) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) - v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) v2.AddArg(y) - v2.AuxInt = 8 + v2.AuxInt = 32 v1.AddArg(v2) v.AddArg(v1) return true @@ -6378,12 +5842,12 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (MOVBQSX (ANDBconst [c] x)) + // match: (MOVBQSX (ANDLconst [c] x)) // cond: c & 0x80 == 0 - // result: (ANDQconst [c & 0x7f] x) + // result: (ANDLconst [c & 0x7f] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt @@ -6391,7 +5855,7 @@ func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { if !(c&0x80 == 0) { break } - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0x7f v.AddArg(x) return true @@ -6482,17 +5946,17 @@ func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (MOVBQZX (ANDBconst [c] x)) + // match: (MOVBQZX (ANDLconst [c] x)) // cond: - // result: (ANDQconst [c & 0xff] x) + // result: (ANDLconst [c & 0xff] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDBconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt x := v_0.Args[0] - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0xff v.AddArg(x) return true @@ -6743,7 +6207,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off] {sym} ptr (MOVBconst [c]) mem) + // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) // cond: validOff(off) // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) for { @@ -6751,7 +6215,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt @@ -7334,7 +6798,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { } // match: (MOVLQSX (ANDLconst [c] x)) // cond: c & 0x80000000 == 0 - // result: (ANDQconst [c & 0x7fffffff] x) + // result: (ANDLconst [c & 0x7fffffff] x) for { v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { @@ -7345,7 +6809,7 @@ func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { if !(c&0x80000000 == 0) { break } - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0x7fffffff v.AddArg(x) return true @@ -7464,8 +6928,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { return true } // match: (MOVLQZX (ANDLconst [c] x)) - // cond: c & 0x80000000 == 0 - // result: (ANDQconst [c & 0x7fffffff] x) + // cond: + // result: (ANDLconst [c] x) for { v_0 := v.Args[0] if v_0.Op != OpAMD64ANDLconst { @@ -7473,11 +6937,8 @@ func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { } c := v_0.AuxInt x := v_0.Args[0] - if !(c&0x80000000 == 0) { - break - } - v.reset(OpAMD64ANDQconst) - v.AuxInt = c & 0x7fffffff + v.reset(OpAMD64ANDLconst) + v.AuxInt = c v.AddArg(x) return true } @@ -10635,12 +10096,12 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (MOVWQSX (ANDWconst [c] x)) + // match: (MOVWQSX (ANDLconst [c] x)) // cond: c & 0x8000 == 0 - // result: (ANDQconst [c & 0x7fff] x) + // result: (ANDLconst [c & 0x7fff] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt @@ -10648,7 +10109,7 @@ func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { if !(c&0x8000 == 0) { break } - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0x7fff v.AddArg(x) return true @@ -10766,17 +10227,17 @@ func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (MOVWQZX (ANDWconst [c] x)) + // match: (MOVWQZX (ANDLconst [c] x)) // cond: - // result: (ANDQconst [c & 0xffff] x) + // result: (ANDLconst [c & 0xffff] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDWconst { + if v_0.Op != OpAMD64ANDLconst { break } c := v_0.AuxInt x := v_0.Args[0] - v.reset(OpAMD64ANDQconst) + v.reset(OpAMD64ANDLconst) v.AuxInt = c & 0xffff v.AddArg(x) return true @@ -11126,7 +10587,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { v.AddArg(mem) return true } - // match: (MOVWstore [off] {sym} ptr (MOVWconst [c]) mem) + // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) // cond: validOff(off) // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) for { @@ -11134,7 +10595,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt @@ -12037,60 +11498,6 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64MULB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MULB x (MOVBconst [c])) - // cond: - // result: (MULBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64MULBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULB (MOVBconst [c]) x) - // cond: - // result: (MULBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64MULBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MULBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [int64(int8(c*d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = int64(int8(c * d)) - return true - } - return false -} func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { b := v.Block _ = b @@ -12557,66 +11964,12 @@ func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64MULW(v *Value, config *Config) bool { +func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MULW x (MOVWconst [c])) + // match: (Mod16 x y) // cond: - // result: (MULWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64MULWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULW (MOVWconst [c]) x) - // cond: - // result: (MULWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64MULWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MULWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [int64(int16(c*d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = int64(int16(c * d)) - return true - } - return false -} -func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Mod16 x y) - // cond: - // result: (MODW x y) + // result: (MODW x y) for { x := v.Args[0] y := v.Args[1] @@ -13106,11 +12459,11 @@ func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { _ = b // match: (Mul16 x y) // cond: - // result: (MULW x y) + // result: (MULL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64MULW) + v.reset(OpAMD64MULL) v.AddArg(x) v.AddArg(y) return true @@ -13186,35 +12539,17 @@ func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { _ = b // match: (Mul8 x y) // cond: - // result: (MULB x y) + // result: (MULL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64MULB) + v.reset(OpAMD64MULL) v.AddArg(x) v.AddArg(y) return true } return false } -func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NEGB (MOVBconst [c])) - // cond: - // result: (MOVBconst [int64(int8(-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = int64(int8(-c)) - return true - } - return false -} func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { b := v.Block _ = b @@ -13251,42 +12586,6 @@ func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NEGW (MOVWconst [c])) - // cond: - // result: (MOVWconst [int64(int16(-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = int64(int16(-c)) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64NOTB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NOTB (MOVBconst [c])) - // cond: - // result: (MOVBconst [^c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = ^c - return true - } - return false -} func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { b := v.Block _ = b @@ -13323,33 +12622,15 @@ func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64NOTW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NOTW (MOVWconst [c])) - // cond: - // result: (MOVWconst [^c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = ^c - return true - } - return false -} func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { b := v.Block _ = b // match: (Neg16 x) // cond: - // result: (NEGW x) + // result: (NEGL x) for { x := v.Args[0] - v.reset(OpAMD64NEGW) + v.reset(OpAMD64NEGL) v.AddArg(x) return true } @@ -13422,10 +12703,10 @@ func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { _ = b // match: (Neg8 x) // cond: - // result: (NEGB x) + // result: (NEGL x) for { x := v.Args[0] - v.reset(OpAMD64NEGB) + v.reset(OpAMD64NEGL) v.AddArg(x) return true } @@ -13578,50 +12859,50 @@ func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { _ = b // match: (Not x) // cond: - // result: (XORBconst [1] x) + // result: (XORLconst [1] x) for { x := v.Args[0] - v.reset(OpAMD64XORBconst) + v.reset(OpAMD64XORLconst) v.AuxInt = 1 v.AddArg(x) return true } return false } -func rewriteValueAMD64_OpAMD64ORB(v *Value, config *Config) bool { +func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { b := v.Block _ = b - // match: (ORB x (MOVBconst [c])) + // match: (ORL x (MOVLconst [c])) // cond: - // result: (ORBconst [c] x) + // result: (ORLconst [c] x) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { + if v_1.Op != OpAMD64MOVLconst { break } c := v_1.AuxInt - v.reset(OpAMD64ORBconst) + v.reset(OpAMD64ORLconst) v.AuxInt = c v.AddArg(x) return true } - // match: (ORB (MOVBconst [c]) x) + // match: (ORL (MOVLconst [c]) x) // cond: - // result: (ORBconst [c] x) + // result: (ORLconst [c] x) for { v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if v_0.Op != OpAMD64MOVLconst { break } c := v_0.AuxInt x := v.Args[1] - v.reset(OpAMD64ORBconst) + v.reset(OpAMD64ORLconst) v.AuxInt = c v.AddArg(x) return true } - // match: (ORB x x) + // match: (ORL x x) // cond: // result: x for { @@ -13634,97 +12915,52 @@ func rewriteValueAMD64_OpAMD64ORB(v *Value, config *Config) bool { v.AddArg(x) return true } - return false -} -func rewriteValueAMD64_OpAMD64ORBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ORBconst [c] x) - // cond: int8(c)==0 - // result: x + // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == 0) { + x0 := v.Args[0] + if x0.Op != OpAMD64MOVBload { break } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORBconst [c] _) - // cond: int8(c)==-1 - // result: (MOVBconst [-1]) - for { - c := v.AuxInt - if !(int8(c) == -1) { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - v.reset(OpAMD64MOVBconst) - v.AuxInt = -1 - return true - } - // match: (ORBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [c|d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + if s0.AuxInt != 8 { break } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = c | d - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ORL x (MOVLconst [c])) - // cond: - // result: (ORLconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBload { break } - c := v_1.AuxInt - v.reset(OpAMD64ORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (MOVLconst [c]) x) - // cond: - // result: (ORLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { + if x1.AuxInt != i+1 { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) + v.AddArg(v0) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) return true } // match: (ORL o0:(ORL o1:(ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLLconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i+3] {s} p mem))) @@ -13829,19 +13065,11 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { v0.AddArg(mem) return true } - // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) + // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) for { - o0 := v.Args[0] - if o0.Op != OpAMD64ORL { - break - } - o1 := o0.Args[0] - if o1.Op != OpAMD64ORL { - break - } - x0 := o1.Args[0] + x0 := v.Args[0] if x0.Op != OpAMD64MOVBloadidx1 { break } @@ -13850,7 +13078,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { p := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - s0 := o1.Args[1] + s0 := v.Args[1] if s0.Op != OpAMD64SHLLconst { break } @@ -13876,24 +13104,85 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { if mem != x1.Args[2] { break } - s1 := o0.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - if s1.AuxInt != 16 { - break - } - x2 := s1.Args[0] - if x2.Op != OpAMD64MOVBloadidx1 { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } - if x2.AuxInt != i+2 { + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (ORL o0:(ORL o1:(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVLloadidx1 [i] {s} p idx mem) + for { + o0 := v.Args[0] + if o0.Op != OpAMD64ORL { break } - if x2.Aux != s { + o1 := o0.Args[0] + if o1.Op != OpAMD64ORL { break } - if p != x2.Args[0] { + x0 := o1.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o1.Args[1] + if s0.Op != OpAMD64SHLLconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if idx != x1.Args[1] { + break + } + if mem != x1.Args[2] { + break + } + s1 := o0.Args[1] + if s1.Op != OpAMD64SHLLconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBloadidx1 { + break + } + if x2.AuxInt != i+2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { break } if idx != x2.Args[1] { @@ -14529,200 +13818,6 @@ func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ORW x (MOVWconst [c])) - // cond: - // result: (ORWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORW (MOVWconst [c]) x) - // cond: - // result: (ORWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORW x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORW x0:(MOVBload [i] {s} p mem) s0:(SHLWconst [8] x1:(MOVBload [i+1] {s} p mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLWconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - if x1.AuxInt != i+1 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORW x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLWconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLWconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - if x1.AuxInt != i+1 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Line, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (ORWconst [c] x) - // cond: int16(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORWconst [c] _) - // cond: int16(c)==-1 - // result: (MOVWconst [-1]) - for { - c := v.AuxInt - if !(int16(c) == -1) { - break - } - v.reset(OpAMD64MOVWconst) - v.AuxInt = -1 - return true - } - // match: (ORWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [c|d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = c | d - return true - } - return false -} func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { b := v.Block _ = b @@ -14760,11 +13855,11 @@ func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { _ = b // match: (Or16 x y) // cond: - // result: (ORW x y) + // result: (ORL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ORW) + v.reset(OpAMD64ORL) v.AddArg(x) v.AddArg(y) return true @@ -14808,11 +13903,11 @@ func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { _ = b // match: (Or8 x y) // cond: - // result: (ORB x y) + // result: (ORL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ORB) + v.reset(OpAMD64ORL) v.AddArg(x) v.AddArg(y) return true @@ -14824,12 +13919,12 @@ func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux16 x y) // cond: - // result: (ANDW (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) v0.AddArg(x) v0.AddArg(y) @@ -14849,12 +13944,12 @@ func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux32 x y) // cond: - // result: (ANDW (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) v0.AddArg(x) v0.AddArg(y) @@ -14874,12 +13969,12 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux64 x y) // cond: - // result: (ANDW (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPQconst y [16]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) v0.AddArg(x) v0.AddArg(y) @@ -14899,12 +13994,12 @@ func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { _ = b // match: (Rsh16Ux8 x y) // cond: - // result: (ANDW (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) + // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDW) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) v0.AddArg(x) v0.AddArg(y) @@ -14924,7 +14019,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { _ = b // match: (Rsh16x16 x y) // cond: - // result: (SARW x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) for { t := v.Type x := v.Args[0] @@ -14932,7 +14027,7 @@ func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { v.reset(OpAMD64SARW) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15008,7 +14103,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { _ = b // match: (Rsh16x8 x y) // cond: - // result: (SARW x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) + // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { t := v.Type x := v.Args[0] @@ -15016,7 +14111,7 @@ func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { v.reset(OpAMD64SARW) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15136,7 +14231,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { _ = b // match: (Rsh32x16 x y) // cond: - // result: (SARL x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) for { t := v.Type x := v.Args[0] @@ -15144,7 +14239,7 @@ func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { v.reset(OpAMD64SARL) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15220,7 +14315,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { _ = b // match: (Rsh32x8 x y) // cond: - // result: (SARL x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) + // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { t := v.Type x := v.Args[0] @@ -15228,7 +14323,7 @@ func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { v.reset(OpAMD64SARL) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15348,7 +14443,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { _ = b // match: (Rsh64x16 x y) // cond: - // result: (SARQ x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [64]))))) for { t := v.Type x := v.Args[0] @@ -15356,7 +14451,7 @@ func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { v.reset(OpAMD64SARQ) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15432,7 +14527,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { _ = b // match: (Rsh64x8 x y) // cond: - // result: (SARQ x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) + // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) for { t := v.Type x := v.Args[0] @@ -15440,7 +14535,7 @@ func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { v.reset(OpAMD64SARQ) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15460,12 +14555,12 @@ func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux16 x y) // cond: - // result: (ANDB (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) v0.AddArg(x) v0.AddArg(y) @@ -15485,12 +14580,12 @@ func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux32 x y) // cond: - // result: (ANDB (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) v0.AddArg(x) v0.AddArg(y) @@ -15510,12 +14605,12 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux64 x y) // cond: - // result: (ANDB (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPQconst y [8]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) v0.AddArg(x) v0.AddArg(y) @@ -15535,12 +14630,12 @@ func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { _ = b // match: (Rsh8Ux8 x y) // cond: - // result: (ANDB (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) + // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { t := v.Type x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64ANDB) + v.reset(OpAMD64ANDL) v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) v0.AddArg(x) v0.AddArg(y) @@ -15560,7 +14655,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { _ = b // match: (Rsh8x16 x y) // cond: - // result: (SARB x (ORW y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) for { t := v.Type x := v.Args[0] @@ -15568,7 +14663,7 @@ func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { v.reset(OpAMD64SARB) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15644,7 +14739,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { _ = b // match: (Rsh8x8 x y) // cond: - // result: (SARB x (ORB y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) + // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { t := v.Type x := v.Args[0] @@ -15652,7 +14747,7 @@ func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { v.reset(OpAMD64SARB) v.Type = t v.AddArg(x) - v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) v0.AddArg(y) v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) @@ -15700,66 +14795,18 @@ func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SARB x (MOVWconst [c])) + return false +} +func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARBconst [c] (MOVQconst [d])) // cond: - // result: (SARBconst [c&31] x) + // result: (MOVQconst [d>>uint64(c)]) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARB x (MOVBconst [c])) - // cond: - // result: (SARBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARB x (ANDBconst [31] y)) - // cond: - // result: (SARB x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDBconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SARB) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SARBconst [c] (MOVQconst [d])) - // cond: - // result: (MOVQconst [d>>uint64(c)]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { break } d := v_0.AuxInt @@ -15802,36 +14849,6 @@ func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SARL x (MOVWconst [c])) - // cond: - // result: (SARLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARL x (MOVBconst [c])) - // cond: - // result: (SARLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } // match: (SARL x (ANDLconst [31] y)) // cond: // result: (SARL x y) @@ -15904,36 +14921,6 @@ func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SARQ x (MOVWconst [c])) - // cond: - // result: (SARQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SARQ x (MOVBconst [c])) - // cond: - // result: (SARQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } // match: (SARQ x (ANDQconst [63] y)) // cond: // result: (SARQ x y) @@ -16006,54 +14993,6 @@ func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SARW x (MOVWconst [c])) - // cond: - // result: (SARWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARW x (MOVBconst [c])) - // cond: - // result: (SARWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SARWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SARW x (ANDWconst [31] y)) - // cond: - // result: (SARW x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDWconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SARW) - v.AddArg(x) - v.AddArg(y) - return true - } return false } func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { @@ -16223,61 +15162,61 @@ func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { } // match: (SETA (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETA (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETA (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETA (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETA (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } @@ -16301,61 +15240,61 @@ func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { } // match: (SETAE (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETAE (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETAE (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETAE (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETAE (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } @@ -16379,61 +15318,61 @@ func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { } // match: (SETB (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETB (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETB (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETB (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETB (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16457,61 +15396,61 @@ func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { } // match: (SETBE (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETBE (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETBE (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETBE (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETBE (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16535,61 +15474,61 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { } // match: (SETEQ (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETEQ (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETEQ (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETEQ (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETEQ (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16613,61 +15552,61 @@ func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { } // match: (SETG (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETG (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETG (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETG (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETG (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } @@ -16691,61 +15630,61 @@ func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { } // match: (SETGE (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETGE (FlagLT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETGE (FlagLT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETGE (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETGE (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } @@ -16769,61 +15708,61 @@ func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { } // match: (SETL (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETL (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETL (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETL (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETL (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16847,61 +15786,61 @@ func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { } // match: (SETLE (FlagEQ)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETLE (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETLE (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETLE (FlagGT_ULT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETLE (FlagGT_UGT)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } @@ -16925,72 +15864,72 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { } // match: (SETNE (FlagEQ)) // cond: - // result: (MOVBconst [0]) + // result: (MOVLconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagEQ { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 0 return true } // match: (SETNE (FlagLT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETNE (FlagLT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagLT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETNE (FlagGT_ULT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_ULT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } // match: (SETNE (FlagGT_UGT)) // cond: - // result: (MOVBconst [1]) + // result: (MOVLconst [1]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64FlagGT_UGT { break } - v.reset(OpAMD64MOVBconst) + v.reset(OpAMD64MOVLconst) v.AuxInt = 1 return true } return false } -func rewriteValueAMD64_OpAMD64SHLB(v *Value, config *Config) bool { +func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { b := v.Block _ = b - // match: (SHLB x (MOVQconst [c])) + // match: (SHLL x (MOVQconst [c])) // cond: - // result: (SHLBconst [c&31] x) + // result: (SHLLconst [c&31] x) for { x := v.Args[0] v_1 := v.Args[1] @@ -16998,14 +15937,14 @@ func rewriteValueAMD64_OpAMD64SHLB(v *Value, config *Config) bool { break } c := v_1.AuxInt - v.reset(OpAMD64SHLBconst) + v.reset(OpAMD64SHLLconst) v.AuxInt = c & 31 v.AddArg(x) return true } - // match: (SHLB x (MOVLconst [c])) + // match: (SHLL x (MOVLconst [c])) // cond: - // result: (SHLBconst [c&31] x) + // result: (SHLLconst [c&31] x) for { x := v.Args[0] v_1 := v.Args[1] @@ -17013,67 +15952,37 @@ func rewriteValueAMD64_OpAMD64SHLB(v *Value, config *Config) bool { break } c := v_1.AuxInt - v.reset(OpAMD64SHLBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLB x (MOVWconst [c])) - // cond: - // result: (SHLBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLB x (MOVBconst [c])) - // cond: - // result: (SHLBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLBconst) + v.reset(OpAMD64SHLLconst) v.AuxInt = c & 31 v.AddArg(x) return true } - // match: (SHLB x (ANDBconst [31] y)) + // match: (SHLL x (ANDLconst [31] y)) // cond: - // result: (SHLB x y) + // result: (SHLL x y) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDBconst { + if v_1.Op != OpAMD64ANDLconst { break } if v_1.AuxInt != 31 { break } y := v_1.Args[0] - v.reset(OpAMD64SHLB) + v.reset(OpAMD64SHLL) v.AddArg(x) v.AddArg(y) return true } return false } -func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { +func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { b := v.Block _ = b - // match: (SHLL x (MOVQconst [c])) + // match: (SHLQ x (MOVQconst [c])) // cond: - // result: (SHLLconst [c&31] x) + // result: (SHLQconst [c&63] x) for { x := v.Args[0] v_1 := v.Args[1] @@ -17081,95 +15990,12 @@ func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { break } c := v_1.AuxInt - v.reset(OpAMD64SHLLconst) - v.AuxInt = c & 31 + v.reset(OpAMD64SHLQconst) + v.AuxInt = c & 63 v.AddArg(x) return true } - // match: (SHLL x (MOVLconst [c])) - // cond: - // result: (SHLLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLL x (MOVWconst [c])) - // cond: - // result: (SHLLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLL x (MOVBconst [c])) - // cond: - // result: (SHLLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLL x (ANDLconst [31] y)) - // cond: - // result: (SHLL x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDLconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHLL) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SHLQ x (MOVQconst [c])) - // cond: - // result: (SHLQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SHLQ x (MOVLconst [c])) + // match: (SHLQ x (MOVLconst [c])) // cond: // result: (SHLQconst [c&63] x) for { @@ -17184,36 +16010,6 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHLQ x (MOVWconst [c])) - // cond: - // result: (SHLQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SHLQ x (MOVBconst [c])) - // cond: - // result: (SHLQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } // match: (SHLQ x (ANDQconst [63] y)) // cond: // result: (SHLQ x y) @@ -17234,89 +16030,6 @@ func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64SHLW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SHLW x (MOVQconst [c])) - // cond: - // result: (SHLWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLW x (MOVLconst [c])) - // cond: - // result: (SHLWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLW x (MOVWconst [c])) - // cond: - // result: (SHLWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLW x (MOVBconst [c])) - // cond: - // result: (SHLWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHLWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHLW x (ANDWconst [31] y)) - // cond: - // result: (SHLW x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDWconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHLW) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { b := v.Block _ = b @@ -17350,54 +16063,6 @@ func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHRB x (MOVWconst [c])) - // cond: - // result: (SHRBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRB x (MOVBconst [c])) - // cond: - // result: (SHRBconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRBconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRB x (ANDBconst [31] y)) - // cond: - // result: (SHRB x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDBconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHRB) - v.AddArg(x) - v.AddArg(y) - return true - } return false } func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { @@ -17433,36 +16098,6 @@ func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (SHRL x (MOVWconst [c])) - // cond: - // result: (SHRLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRL x (MOVBconst [c])) - // cond: - // result: (SHRLconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRLconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } // match: (SHRL x (ANDLconst [31] y)) // cond: // result: (SHRL x y) @@ -17506,253 +16141,66 @@ func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { // result: (SHRQconst [c&63] x) for { x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SHRQ x (MOVWconst [c])) - // cond: - // result: (SHRQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SHRQ x (MOVBconst [c])) - // cond: - // result: (SHRQconst [c&63] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRQconst) - v.AuxInt = c & 63 - v.AddArg(x) - return true - } - // match: (SHRQ x (ANDQconst [63] y)) - // cond: - // result: (SHRQ x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDQconst { - break - } - if v_1.AuxInt != 63 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHRQ) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SHRW x (MOVQconst [c])) - // cond: - // result: (SHRWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRW x (MOVLconst [c])) - // cond: - // result: (SHRWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRW x (MOVWconst [c])) - // cond: - // result: (SHRWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRW x (MOVBconst [c])) - // cond: - // result: (SHRWconst [c&31] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SHRWconst) - v.AuxInt = c & 31 - v.AddArg(x) - return true - } - // match: (SHRW x (ANDWconst [31] y)) - // cond: - // result: (SHRW x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ANDWconst { - break - } - if v_1.AuxInt != 31 { - break - } - y := v_1.Args[0] - v.reset(OpAMD64SHRW) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SUBB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SUBB x (MOVBconst [c])) - // cond: - // result: (SUBBconst x [c]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SUBBconst) - v.AddArg(x) - v.AuxInt = c - return true - } - // match: (SUBB (MOVBconst [c]) x) - // cond: - // result: (NEGB (SUBBconst x [c])) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64NEGB) - v0 := b.NewValue0(v.Line, OpAMD64SUBBconst, v.Type) - v0.AddArg(x) - v0.AuxInt = c - v.AddArg(v0) - return true - } - // match: (SUBB x x) - // cond: - // result: (MOVBconst [0]) - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpAMD64MOVBconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SUBBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SUBBconst [c] x) - // cond: int8(c) == 0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == 0) { + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLconst { break } - v.reset(OpCopy) - v.Type = x.Type + c := v_1.AuxInt + v.reset(OpAMD64SHRQconst) + v.AuxInt = c & 63 v.AddArg(x) return true } - // match: (SUBBconst [c] x) + // match: (SHRQ x (ANDQconst [63] y)) // cond: - // result: (ADDBconst [int64(int8(-c))] x) + // result: (SHRQ x y) for { - c := v.AuxInt x := v.Args[0] - v.reset(OpAMD64ADDBconst) - v.AuxInt = int64(int8(-c)) + v_1 := v.Args[1] + if v_1.Op != OpAMD64ANDQconst { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpAMD64SHRQ) v.AddArg(x) + v.AddArg(y) return true } - // match: (SUBBconst (MOVBconst [d]) [c]) + return false +} +func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHRW x (MOVQconst [c])) // cond: - // result: (MOVBconst [int64(int8(d-c))]) + // result: (SHRWconst [c&31] x) for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { break } - d := v_0.AuxInt - c := v.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = int64(int8(d - c)) + c := v_1.AuxInt + v.reset(OpAMD64SHRWconst) + v.AuxInt = c & 31 + v.AddArg(x) return true } - // match: (SUBBconst (SUBBconst x [d]) [c]) + // match: (SHRW x (MOVLconst [c])) // cond: - // result: (ADDBconst [int64(int8(-c-d))] x) + // result: (SHRWconst [c&31] x) for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SUBBconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLconst { break } - x := v_0.Args[0] - d := v_0.AuxInt - c := v.AuxInt - v.reset(OpAMD64ADDBconst) - v.AuxInt = int64(int8(-c - d)) + c := v_1.AuxInt + v.reset(OpAMD64SHRWconst) + v.AuxInt = c & 31 v.AddArg(x) return true } @@ -17987,115 +16435,6 @@ func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64SUBW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SUBW x (MOVWconst [c])) - // cond: - // result: (SUBWconst x [c]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64SUBWconst) - v.AddArg(x) - v.AuxInt = c - return true - } - // match: (SUBW (MOVWconst [c]) x) - // cond: - // result: (NEGW (SUBWconst x [c])) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64NEGW) - v0 := b.NewValue0(v.Line, OpAMD64SUBWconst, v.Type) - v0.AddArg(x) - v0.AuxInt = c - v.AddArg(v0) - return true - } - // match: (SUBW x x) - // cond: - // result: (MOVWconst [0]) - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpAMD64MOVWconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueAMD64_OpAMD64SUBWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (SUBWconst [c] x) - // cond: int16(c) == 0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (SUBWconst [c] x) - // cond: - // result: (ADDWconst [int64(int16(-c))] x) - for { - c := v.AuxInt - x := v.Args[0] - v.reset(OpAMD64ADDWconst) - v.AuxInt = int64(int16(-c)) - v.AddArg(x) - return true - } - // match: (SUBWconst (MOVWconst [d]) [c]) - // cond: - // result: (MOVWconst [int64(int16(d-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - c := v.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = int64(int16(d - c)) - return true - } - // match: (SUBWconst (SUBWconst x [d]) [c]) - // cond: - // result: (ADDWconst [int64(int16(-c-d))] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SUBWconst { - break - } - x := v_0.Args[0] - d := v_0.AuxInt - c := v.AuxInt - v.reset(OpAMD64ADDWconst) - v.AuxInt = int64(int16(-c - d)) - v.AddArg(x) - return true - } - return false -} func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { b := v.Block _ = b @@ -18324,11 +16663,11 @@ func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { _ = b // match: (Sub16 x y) // cond: - // result: (SUBW x y) + // result: (SUBL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64SUBW) + v.reset(OpAMD64SUBL) v.AddArg(x) v.AddArg(y) return true @@ -18404,11 +16743,11 @@ func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { _ = b // match: (Sub8 x y) // cond: - // result: (SUBB x y) + // result: (SUBL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64SUBB) + v.reset(OpAMD64SUBL) v.AddArg(x) v.AddArg(y) return true @@ -18521,86 +16860,6 @@ func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64XORB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (XORB x (MOVBconst [c])) - // cond: - // result: (XORBconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVBconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64XORBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORB (MOVBconst [c]) x) - // cond: - // result: (XORBconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64XORBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORB x x) - // cond: - // result: (MOVBconst [0]) - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpAMD64MOVBconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueAMD64_OpAMD64XORBconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (XORBconst [c] x) - // cond: int8(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int8(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (XORBconst [c] (MOVBconst [d])) - // cond: - // result: (MOVBconst [c^d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVBconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVBconst) - v.AuxInt = c ^ d - return true - } - return false -} func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { b := v.Block _ = b @@ -18766,96 +17025,16 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { } return false } -func rewriteValueAMD64_OpAMD64XORW(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (XORW x (MOVWconst [c])) - // cond: - // result: (XORWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64XORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORW (MOVWconst [c]) x) - // cond: - // result: (XORWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64XORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (XORW x x) - // cond: - // result: (MOVWconst [0]) - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpAMD64MOVWconst) - v.AuxInt = 0 - return true - } - return false -} -func rewriteValueAMD64_OpAMD64XORWconst(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (XORWconst [c] x) - // cond: int16(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int16(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (XORWconst [c] (MOVWconst [d])) - // cond: - // result: (MOVWconst [c^d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVWconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVWconst) - v.AuxInt = c ^ d - return true - } - return false -} func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { b := v.Block _ = b // match: (Xor16 x y) // cond: - // result: (XORW x y) + // result: (XORL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64XORW) + v.reset(OpAMD64XORL) v.AddArg(x) v.AddArg(y) return true @@ -18899,11 +17078,11 @@ func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { _ = b // match: (Xor8 x y) // cond: - // result: (XORB x y) + // result: (XORL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpAMD64XORB) + v.reset(OpAMD64XORL) v.AddArg(x) v.AddArg(y) return true -- cgit v1.3 From 934c3599648ae841668ec753881134347fc28c29 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 23 Apr 2016 22:59:01 -0700 Subject: cmd/compile: reorder how slicelit initializes a slice func f(x, y, z *int) { a := []*int{x,y,z} ... } We used to use: var tmp [3]*int a := tmp[:] a[0] = x a[1] = y a[2] = z Now we do: var tmp [3]*int tmp[0] = x tmp[1] = y tmp[2] = z a := tmp[:] Doesn't sound like a big deal, but the compiler has trouble eliminating write barriers when using the former method because it doesn't know that the slice points to the stack. In the latter method, the compiler knows the array is on the stack and as a result doesn't emit any write barriers. This turns out to be extremely common when building ... args, like for calls fmt.Printf. Makes go binaries ~1% smaller. Doesn't have a measurable effect on the go1 fmt benchmarks, unfortunately. Fixes #14263 Update #6853 Change-Id: I9074a2788ec9e561a75f3b71c119b69f304d6ba2 Reviewed-on: https://go-review.googlesource.com/22395 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/gc/sinit.go | 33 +++++++++++++++++---------------- src/cmd/compile/internal/gc/walk.go | 3 +-- test/writebarrier.go | 14 ++++++++++++++ 3 files changed, 32 insertions(+), 18 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 2c2ade06f5..cc1d1962d2 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -745,15 +745,15 @@ func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) { // var vauto *[...]t = new([...]t) // 4. copy the static array to the auto array // *vauto = vstat - // 5. assign slice of allocated heap to var - // var = [0:]*auto - // 6. for each dynamic part assign to the slice - // var[i] = dynamic part + // 5. for each dynamic part assign to the array + // vauto[i] = dynamic part + // 6. assign slice of allocated heap to var + // var = vauto[:] // // an optimization is done if there is no constant part // 3. var vauto *[...]t = new([...]t) - // 5. var = [0:]*auto - // 6. var[i] = dynamic part + // 5. vauto[i] = dynamic part + // 6. var = vauto[:] // if the literal contains constants, // make static initialized array (1),(2) @@ -811,21 +811,14 @@ func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) { init.Append(a) } - // make slice out of heap (5) - a = Nod(OAS, var_, Nod(OSLICE, vauto, Nod(OKEY, nil, nil))) - - a = typecheck(a, Etop) - a = orderstmtinplace(a) - a = walkstmt(a) - init.Append(a) - // put dynamics into slice (6) + // put dynamics into array (5) for _, r := range n.List.Slice() { if r.Op != OKEY { Fatalf("slicelit: rhs not OKEY: %v", r) } index := r.Left value := r.Right - a := Nod(OINDEX, var_, index) + a := Nod(OINDEX, vauto, index) a.Bounded = true // TODO need to check bounds? @@ -847,7 +840,7 @@ func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) { continue } - // build list of var[c] = expr + // build list of vauto[c] = expr setlineno(value) a = Nod(OAS, a, value) @@ -856,6 +849,14 @@ func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) { a = walkstmt(a) init.Append(a) } + + // make slice out of heap (6) + a = Nod(OAS, var_, Nod(OSLICE, vauto, Nod(OKEY, nil, nil))) + + a = typecheck(a, Etop) + a = orderstmtinplace(a) + a = walkstmt(a) + init.Append(a) } func maplit(ctxt int, n *Node, var_ *Node, init *Nodes) { diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 27ff045028..7c4d74c8c3 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -2748,8 +2748,7 @@ func addstr(n *Node, init *Nodes) *Node { prealloc[slice] = prealloc[n] } slice.List.Set(args[1:]) // skip buf arg - args = []*Node{buf} - args = append(args, slice) + args = []*Node{buf, slice} slice.Esc = EscNone } diff --git a/test/writebarrier.go b/test/writebarrier.go index 44e42f0883..2ff0ee9584 100644 --- a/test/writebarrier.go +++ b/test/writebarrier.go @@ -182,3 +182,17 @@ func f18(p *T18, x *[]int) { p.s = p.s[8:9] // ERROR "write barrier" *x = (*x)[3:5] // ERROR "write barrier" } + +func f19(x, y *int, i int) int { + // Constructing a temporary slice on the stack should not + // require any write barriers. See issue 14263. + a := []*int{x, y} // no barrier + return *a[i] +} + +func f20(x, y *int, i int) []*int { + // ... but if that temporary slice escapes, then the + // write barriers are necessary. + a := []*int{x, y} // ERROR "write barrier" + return a +} -- cgit v1.3 From b6b144bf97744ead3ac51fd1b5648d2e31a8de0e Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Mon, 4 May 2015 15:01:29 -0700 Subject: cmd/compile: don't generate algs for ... args Note that this is only safe because the compiler generates multiple distinct gc.Types. If we switch to having canonical gc.Types, then this will need to be updated to handle the case in which the user uses both map[[n]T]S and also calls a function f(...T) with n arguments. In that case, the runtime needs algs for [n]T, but this could mark the sole [n]T type as Noalg. This is a general problem with having a single bool to represent whether alg generation is needed for a type. Cuts 17k off cmd/go and 13k off golang.org/x/tools/cmd/godoc, approx 0.14% and 0.07% respectively. For #6853 and #9930 Change-Id: Iccb6b9fd88ade5497d7090528a903816d340bf0a Reviewed-on: https://go-review.googlesource.com/19770 Reviewed-by: David Crawshaw Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/walk.go | 1 + 1 file changed, 1 insertion(+) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 7c4d74c8c3..7e160bdd94 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1739,6 +1739,7 @@ func mkdotargslice(lr0, nn []*Node, l *Field, fp int, init *Nodes, ddd *Node) [] } tslice := typSlice(l.Type.Elem()) + tslice.Noalg = true var n *Node if len(lr0) == 0 { -- cgit v1.3 From a6abc1cd70bf561d1e4c10d53499733c502c30b5 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Mon, 4 May 2015 15:01:03 -0700 Subject: cmd/compile: don't generate algs for map buckets Note that this is only safe because the compiler generates multiple distinct gc.Types. If we switch to having canonical gc.Types, then this will need to be updated to handle the case in which the user uses both map[T]S and also map[[8]T]S. In that case, the runtime needs algs for [8]T, but this could mark the sole [8]T type as Noalg. This is a general problem with having a single bool to represent whether alg generation is needed for a type. Cuts 5k off cmd/go and 22k off golang.org/x/tools/cmd/godoc, approx 0.04% and 0.12% respectively. For #6853 and #9930 Change-Id: I30a15ec72ecb62e2aa053260a7f0f75015fc0ade Reviewed-on: https://go-review.googlesource.com/19769 Reviewed-by: David Crawshaw Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/reflect.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 49d55091ff..727b9939e9 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -102,13 +102,18 @@ func mapbucket(t *Type) *Type { valtype = Ptrto(valtype) } + field := make([]*Field, 0, 5) + // The first field is: uint8 topbits[BUCKETSIZE]. arr := typArray(Types[TUINT8], BUCKETSIZE) - field := make([]*Field, 0, 5) field = append(field, makefield("topbits", arr)) + arr = typArray(keytype, BUCKETSIZE) + arr.Noalg = true field = append(field, makefield("keys", arr)) + arr = typArray(valtype, BUCKETSIZE) + arr.Noalg = true field = append(field, makefield("values", arr)) // Make sure the overflow pointer is the last memory in the struct, -- cgit v1.3 From f027241445f3064b41f5d5e68f86370d37bad0be Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Fri, 22 Apr 2016 07:14:10 -0700 Subject: cmd/compile: give gc.Op a String method, use it Passes toolstash -cmp. Change-Id: I915e76374fd64aa2597e6fa47e4fa95ca00ca643 Reviewed-on: https://go-review.googlesource.com/22380 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: David Crawshaw --- src/cmd/compile/internal/gc/bexport.go | 6 +++--- src/cmd/compile/internal/gc/bimport.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 4 ++++ src/cmd/compile/internal/gc/obj.go | 6 +++--- src/cmd/compile/internal/gc/sinit.go | 2 +- src/cmd/compile/internal/gc/ssa.go | 14 +++++++------- 6 files changed, 19 insertions(+), 15 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 496491131a..6b83e70403 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -1274,7 +1274,7 @@ func (p *exporter) expr(n *Node) { p.op(ODCLCONST) default: - Fatalf("exporter: CANNOT EXPORT: %s\nPlease notify gri@\n", opnames[n.Op]) + Fatalf("exporter: CANNOT EXPORT: %s\nPlease notify gri@\n", n.Op) } } @@ -1404,7 +1404,7 @@ func (p *exporter) stmt(n *Node) { p.expr(n.Left) default: - Fatalf("exporter: CANNOT EXPORT: %s\nPlease notify gri@\n", opnames[n.Op]) + Fatalf("exporter: CANNOT EXPORT: %s\nPlease notify gri@\n", n.Op) } } @@ -1492,7 +1492,7 @@ func (p *exporter) bool(b bool) bool { func (p *exporter) op(op Op) { if p.trace { p.tracef("[") - defer p.tracef("= %s] ", opnames[op]) + defer p.tracef("= %s] ", op) } p.int(int(op)) diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index e05329bb12..cbd3fb0e87 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -1013,7 +1013,7 @@ func (p *importer) node() *Node { return nil default: - Fatalf("importer: %s (%d) node not yet supported", opnames[op], op) + Fatalf("importer: %s (%d) node not yet supported", op, op) panic("unreachable") // satisfy compiler } } diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 5f6edd1018..a14b837584 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -453,6 +453,10 @@ func (e EType) String() string { return Econv(e) } +func (o Op) String() string { + return Oconv(o, 0) +} + // Fmt "%S": syms func symfmt(s *Sym, flag FmtFlag) string { if s.Pkg != nil && flag&FmtShort == 0 { diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 378ac0d2c3..c1132b6aac 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -334,7 +334,7 @@ func dsymptrOffLSym(s *obj.LSym, off int, x *obj.LSym, xoff int) int { func gdata(nam *Node, nr *Node, wid int) { if nam.Op != ONAME { - Fatalf("gdata nam op %v", opnames[nam.Op]) + Fatalf("gdata nam op %v", nam.Op) } if nam.Sym == nil { Fatalf("gdata nil nam sym") @@ -372,7 +372,7 @@ func gdata(nam *Node, nr *Node, wid int) { case OADDR: if nr.Left.Op != ONAME { - Fatalf("gdata ADDR left op %s", opnames[nr.Left.Op]) + Fatalf("gdata ADDR left op %s", nr.Left.Op) } to := nr.Left Linksym(nam.Sym).WriteAddr(Ctxt, nam.Xoffset, wid, Linksym(to.Sym), to.Xoffset) @@ -384,7 +384,7 @@ func gdata(nam *Node, nr *Node, wid int) { Linksym(nam.Sym).WriteAddr(Ctxt, nam.Xoffset, wid, Linksym(funcsym(nr.Sym)), nr.Xoffset) default: - Fatalf("gdata unhandled op %v %v\n", nr, opnames[nr.Op]) + Fatalf("gdata unhandled op %v %v\n", nr, nr.Op) } } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index cc1d1962d2..cb43855514 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -1033,7 +1033,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init *Nodes) { t := n.Type switch n.Op { default: - Fatalf("anylit: not lit, op=%v node=%v", opnames[n.Op], n) + Fatalf("anylit: not lit, op=%v node=%v", n.Op, n) case OPTRLIT: if !t.IsPtr() { diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index f989ad0375..964818a082 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -969,7 +969,7 @@ func (s *state) stmt(n *Node) { s.nilCheck(p) default: - s.Unimplementedf("unhandled stmt %s", opnames[n.Op]) + s.Unimplementedf("unhandled stmt %s", n.Op) } } @@ -1247,7 +1247,7 @@ func (s *state) ssaOp(op Op, t *Type) ssa.Op { etype := s.concreteEtype(t) x, ok := opToSSA[opAndType{op, etype}] if !ok { - s.Unimplementedf("unhandled binary op %s %s", opnames[op], Econv(etype)) + s.Unimplementedf("unhandled binary op %s %s", op, Econv(etype)) } return x } @@ -1405,7 +1405,7 @@ func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op { etype2 := s.concreteEtype(u) x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] if !ok { - s.Unimplementedf("unhandled shift op %s etype=%s/%s", opnames[op], Econv(etype1), Econv(etype2)) + s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, Econv(etype1), Econv(etype2)) } return x } @@ -1414,7 +1414,7 @@ func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op { etype1 := s.concreteEtype(t) x, ok := opToSSA[opAndType{op, etype1}] if !ok { - s.Unimplementedf("unhandled rotate op %s etype=%s", opnames[op], Econv(etype1)) + s.Unimplementedf("unhandled rotate op %s etype=%s", op, Econv(etype1)) } return x } @@ -1729,7 +1729,7 @@ func (s *state) expr(n *Node) *ssa.Value { case ONE: return s.newValue1(ssa.OpNot, Types[TBOOL], c) default: - s.Fatalf("ordered complex compare %s", opnames[n.Op]) + s.Fatalf("ordered complex compare %s", n.Op) } } return s.newValue2(s.ssaOp(n.Op, n.Left.Type), Types[TBOOL], a, b) @@ -2088,7 +2088,7 @@ func (s *state) expr(n *Node) *ssa.Value { return s.append(n, false) default: - s.Unimplementedf("unhandled expr %s", opnames[n.Op]) + s.Unimplementedf("unhandled expr %s", n.Op) return nil } } @@ -2632,7 +2632,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { case sym != nil: call = s.newValue1A(ssa.OpStaticCall, ssa.TypeMem, sym, s.mem()) default: - Fatalf("bad call type %s %v", opnames[n.Op], n) + Fatalf("bad call type %s %v", n.Op, n) } call.AuxInt = stksize // Call operations carry the argsize of the callee along with them -- cgit v1.3 From fca0f331c8b99d476c871d8718e296b32ad24073 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Fri, 22 Apr 2016 08:39:56 -0700 Subject: cmd/compile: use gc.Etype's String method Passes toolstash -cmp. Change-Id: I42c962cc5a3ffec2969f223cf238c2fdadbf5857 Reviewed-on: https://go-review.googlesource.com/22381 Run-TryBot: Josh Bleecher Snyder Reviewed-by: Brad Fitzpatrick TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/fmt.go | 8 ++++---- src/cmd/compile/internal/gc/reg.go | 8 ++++---- src/cmd/compile/internal/gc/ssa.go | 10 +++++----- src/cmd/compile/internal/gc/subr.go | 2 +- src/cmd/compile/internal/gc/type.go | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index a14b837584..9bba709649 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -586,7 +586,7 @@ func typefmt(t *Type, flag FmtFlag) string { if fmtmode == FDbg { fmtmode = 0 - str := Econv(t.Etype) + "-" + typefmt(t, flag) + str := t.Etype.String() + "-" + typefmt(t, flag) fmtmode = FDbg return str } @@ -748,18 +748,18 @@ func typefmt(t *Type, flag FmtFlag) string { if fmtmode == FExp { Fatalf("cannot use TDDDFIELD with old exporter") } - return fmt.Sprintf("%v <%v> %v", Econv(t.Etype), t.Sym, t.DDDField()) + return fmt.Sprintf("%v <%v> %v", t.Etype, t.Sym, t.DDDField()) case Txxx: return "Txxx" } if fmtmode == FExp { - Fatalf("missing %v case during export", Econv(t.Etype)) + Fatalf("missing %v case during export", t.Etype) } // Don't know how to handle - fall back to detailed prints. - return fmt.Sprintf("%v <%v> %v", Econv(t.Etype), t.Sym, t.Elem()) + return fmt.Sprintf("%v <%v> %v", t.Etype, t.Sym, t.Elem()) } // Statements which may be rendered with a simplestmt as init. diff --git a/src/cmd/compile/internal/gc/reg.go b/src/cmd/compile/internal/gc/reg.go index 138ad683c5..5763f79de1 100644 --- a/src/cmd/compile/internal/gc/reg.go +++ b/src/cmd/compile/internal/gc/reg.go @@ -488,7 +488,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits { } if Debug['R'] != 0 { - fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(et), o, w, Nconv(node, FmtSharp), Ctxt.Dconv(a), v.addr) + fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, et, o, w, Nconv(node, FmtSharp), Ctxt.Dconv(a), v.addr) } Ostats.Nvar++ @@ -652,7 +652,7 @@ func allreg(b uint64, r *Rgn) uint64 { r.regno = 0 switch v.etype { default: - Fatalf("unknown etype %d/%v", Bitno(b), Econv(v.etype)) + Fatalf("unknown etype %d/%v", Bitno(b), v.etype) case TINT8, TUINT8, @@ -1147,7 +1147,7 @@ func regopt(firstp *obj.Prog) { } if Debug['R'] != 0 && Debug['v'] != 0 { - fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(v.etype), v.width, v.node, v.offset) + fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, v.etype, v.width, v.node, v.offset) } } @@ -1358,7 +1358,7 @@ loop2: if rgp.regno != 0 { if Debug['R'] != 0 && Debug['v'] != 0 { v := &vars[rgp.varno] - fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(v.etype), obj.Rconv(int(rgp.regno)), usedreg, vreg) + fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, v.etype, obj.Rconv(int(rgp.regno)), usedreg, vreg) } paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno)) diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 964818a082..e177ceda01 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1247,7 +1247,7 @@ func (s *state) ssaOp(op Op, t *Type) ssa.Op { etype := s.concreteEtype(t) x, ok := opToSSA[opAndType{op, etype}] if !ok { - s.Unimplementedf("unhandled binary op %s %s", op, Econv(etype)) + s.Unimplementedf("unhandled binary op %s %s", op, etype) } return x } @@ -1405,7 +1405,7 @@ func (s *state) ssaShiftOp(op Op, t *Type, u *Type) ssa.Op { etype2 := s.concreteEtype(u) x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}] if !ok { - s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, Econv(etype1), Econv(etype2)) + s.Unimplementedf("unhandled shift op %s etype=%s/%s", op, etype1, etype2) } return x } @@ -1414,7 +1414,7 @@ func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op { etype1 := s.concreteEtype(t) x, ok := opToSSA[opAndType{op, etype1}] if !ok { - s.Unimplementedf("unhandled rotate op %s etype=%s", op, Econv(etype1)) + s.Unimplementedf("unhandled rotate op %s etype=%s", op, etype1) } return x } @@ -1561,7 +1561,7 @@ func (s *state) expr(n *Node) *ssa.Value { return nil } if etypesign(from.Etype) != etypesign(to.Etype) { - s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, Econv(from.Etype), to, Econv(to.Etype)) + s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype) return nil } @@ -1706,7 +1706,7 @@ func (s *state) expr(n *Node) *ssa.Value { s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) } - s.Unimplementedf("unhandled OCONV %s -> %s", Econv(n.Left.Type.Etype), Econv(n.Type.Etype)) + s.Unimplementedf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) return nil case ODOTTYPE: diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index cb0c86ee81..5fc16858d9 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -615,7 +615,7 @@ func cplxsubtype(et EType) EType { return TFLOAT64 } - Fatalf("cplxsubtype: %v\n", Econv(et)) + Fatalf("cplxsubtype: %v\n", et) return 0 } diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index baac282c0a..1401332632 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -845,7 +845,7 @@ func (t *Type) Alignment() int64 { } func (t *Type) SimpleString() string { - return Econv(t.Etype) + return t.Etype.String() } // Compare compares types for purposes of the SSA back -- cgit v1.3 From 1da62afeef1fdfb822afc4af0feb2eece10d8c7d Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Sun, 24 Apr 2016 13:50:26 -0700 Subject: cmd/compile: replace len(Nodes.Slice()) with Nodes.Len() Generated with eg: func before(n gc.Nodes) int { return len(n.Slice()) } func after(n gc.Nodes) int { return n.Len() } Change-Id: Ifdf01915e60069166afe96aa7b1d08720bf62fc5 Reviewed-on: https://go-review.googlesource.com/22420 Run-TryBot: Josh Bleecher Snyder Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/bimport.go | 4 ++-- src/cmd/compile/internal/gc/closure.go | 2 +- src/cmd/compile/internal/gc/esc.go | 6 +++--- src/cmd/compile/internal/gc/export.go | 4 ++-- src/cmd/compile/internal/gc/fmt.go | 4 ++-- src/cmd/compile/internal/gc/inl.go | 12 ++++++------ src/cmd/compile/internal/gc/main.go | 2 +- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/parser.go | 2 +- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 2 +- src/cmd/compile/internal/gc/walk.go | 2 +- 13 files changed, 23 insertions(+), 23 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 6b83e70403..f0907b45eb 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -528,7 +528,7 @@ func (p *exporter) pos(n *Node) { } func isInlineable(n *Node) bool { - if exportInlined && n != nil && n.Func != nil && len(n.Func.Inl.Slice()) != 0 { + if exportInlined && n != nil && n.Func != nil && n.Func.Inl.Len() != 0 { // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet. // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package if Debug['l'] < 2 { diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index cbd3fb0e87..1219d8d370 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -271,7 +271,7 @@ func (p *importer) obj(tag int) { if Debug['E'] > 0 { fmt.Printf("import [%q] func %v \n", importpkg.Path, n) - if Debug['m'] > 2 && len(n.Func.Inl.Slice()) != 0 { + if Debug['m'] > 2 && n.Func.Inl.Len() != 0 { fmt.Printf("inl body: %v\n", n.Func.Inl) } } @@ -368,7 +368,7 @@ func (p *importer) typ() *Type { if Debug['E'] > 0 { fmt.Printf("import [%q] meth %v \n", importpkg.Path, n) - if Debug['m'] > 2 && len(n.Func.Inl.Slice()) != 0 { + if Debug['m'] > 2 && n.Func.Inl.Len() != 0 { fmt.Printf("inl body: %v\n", n.Func.Inl) } } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index db4eb3f14d..d2cb9ebf1e 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -194,7 +194,7 @@ func makeclosure(func_ *Node) *Node { xfunc.Nbody.Set(func_.Nbody.Slice()) xfunc.Func.Dcl = append(func_.Func.Dcl, xfunc.Func.Dcl...) func_.Func.Dcl = nil - if len(xfunc.Nbody.Slice()) == 0 { + if xfunc.Nbody.Len() == 0 { Fatalf("empty body - won't generate any code") } xfunc = typecheck(xfunc, Etop) diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index d7a63668a6..2f4e5fb6ef 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -522,7 +522,7 @@ func escfunc(e *EscState, func_ *Node) { if ln.Type != nil && !haspointers(ln.Type) { break } - if len(Curfn.Nbody.Slice()) == 0 && !Curfn.Noescape { + if Curfn.Nbody.Len() == 0 && !Curfn.Noescape { ln.Esc = EscHeap } else { ln.Esc = EscNone // prime for escflood later @@ -1469,7 +1469,7 @@ func esccall(e *EscState, n *Node, up *Node) { nE := e.nodeEscState(n) if fn != nil && fn.Op == ONAME && fn.Class == PFUNC && - fn.Name.Defn != nil && len(fn.Name.Defn.Nbody.Slice()) != 0 && fn.Name.Param.Ntype != nil && fn.Name.Defn.Esc < EscFuncTagged { + fn.Name.Defn != nil && fn.Name.Defn.Nbody.Len() != 0 && fn.Name.Param.Ntype != nil && fn.Name.Defn.Esc < EscFuncTagged { if Debug['m'] > 3 { fmt.Printf("%v::esccall:: %v in recursive group\n", linestr(lineno), Nconv(n, FmtShort)) } @@ -1969,7 +1969,7 @@ func esctag(e *EscState, func_ *Node) { // External functions are assumed unsafe, // unless //go:noescape is given before the declaration. - if len(func_.Nbody.Slice()) == 0 { + if func_.Nbody.Len() == 0 { if func_.Noescape { for _, t := range func_.Type.Params().Fields().Slice() { if haspointers(t.Type) { diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 1dd02aef1f..a275377598 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -252,7 +252,7 @@ func dumpexportvar(s *Sym) { dumpexporttype(t) if t.Etype == TFUNC && n.Class == PFUNC { - if n.Func != nil && len(n.Func.Inl.Slice()) != 0 { + if n.Func != nil && n.Func.Inl.Len() != 0 { // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet. // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package if Debug['l'] < 2 { @@ -323,7 +323,7 @@ func dumpexporttype(t *Type) { if f.Nointerface { exportf("\t//go:nointerface\n") } - if f.Type.Nname() != nil && len(f.Type.Nname().Func.Inl.Slice()) != 0 { // nname was set by caninl + if f.Type.Nname() != nil && f.Type.Nname().Func.Inl.Len() != 0 { // nname was set by caninl // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet. // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 9bba709649..12ae915fb2 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -1196,7 +1196,7 @@ func exprfmt(n *Node, prec int) string { if fmtmode == FErr { return "func literal" } - if len(n.Nbody.Slice()) != 0 { + if n.Nbody.Len() != 0 { return fmt.Sprintf("%v { %v }", n.Type, n.Nbody) } return fmt.Sprintf("%v { %v }", n.Type, n.Name.Param.Closure.Nbody) @@ -1577,7 +1577,7 @@ func nodedump(n *Node, flag FmtFlag) string { fmt.Fprintf(&buf, "%v-rlist%v", Oconv(n.Op, 0), n.Rlist) } - if len(n.Nbody.Slice()) != 0 { + if n.Nbody.Len() != 0 { indent(&buf) fmt.Fprintf(&buf, "%v-body%v", Oconv(n.Op, 0), n.Nbody) } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index f9e425618b..da026e1396 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -100,7 +100,7 @@ func caninl(fn *Node) { } // If fn has no body (is defined outside of Go), cannot inline it. - if len(fn.Nbody.Slice()) == 0 { + if fn.Nbody.Len() == 0 { return } @@ -173,12 +173,12 @@ func ishairy(n *Node, budget *int) bool { switch n.Op { // Call is okay if inlinable and we have the budget for the body. case OCALLFUNC: - if n.Left.Func != nil && len(n.Left.Func.Inl.Slice()) != 0 { + if n.Left.Func != nil && n.Left.Func.Inl.Len() != 0 { *budget -= int(n.Left.Func.InlCost) break } if n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME { // methods called as functions - if n.Left.Sym.Def != nil && len(n.Left.Sym.Def.Func.Inl.Slice()) != 0 { + if n.Left.Sym.Def != nil && n.Left.Sym.Def.Func.Inl.Len() != 0 { *budget -= int(n.Left.Sym.Def.Func.InlCost) break } @@ -195,7 +195,7 @@ func ishairy(n *Node, budget *int) bool { if n.Left.Type.Nname() == nil { Fatalf("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, FmtSign)) } - if len(n.Left.Type.Nname().Func.Inl.Slice()) != 0 { + if n.Left.Type.Nname().Func.Inl.Len() != 0 { *budget -= int(n.Left.Type.Nname().Func.InlCost) break } @@ -453,7 +453,7 @@ func inlnode(n *Node) *Node { if Debug['m'] > 3 { fmt.Printf("%v:call to func %v\n", n.Line(), Nconv(n.Left, FmtSign)) } - if n.Left.Func != nil && len(n.Left.Func.Inl.Slice()) != 0 && !isIntrinsicCall1(n) { // normal case + if n.Left.Func != nil && n.Left.Func.Inl.Len() != 0 && !isIntrinsicCall1(n) { // normal case n = mkinlcall(n, n.Left, n.Isddd) } else if n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME { // methods called as functions if n.Left.Sym.Def != nil { @@ -520,7 +520,7 @@ var inlgen int // n.Left = mkinlcall1(n.Left, fn, isddd) func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { // For variadic fn. - if len(fn.Func.Inl.Slice()) == 0 { + if fn.Func.Inl.Len() == 0 { return n } diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index f6de58462e..c3a0481ffd 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -412,7 +412,7 @@ func Main() { // Typecheck imported function bodies if debug['l'] > 1, // otherwise lazily when used or re-exported. for _, n := range importlist { - if len(n.Func.Inl.Slice()) != 0 { + if n.Func.Inl.Len() != 0 { saveerrors() typecheckinl(n) } diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 2b9546f4f5..00ba4308cb 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -1146,7 +1146,7 @@ func orderexpr(n *Node, order *Order, lhs *Node) *Node { } case OCLOSURE: - if n.Noescape && len(n.Func.Cvars.Slice()) > 0 { + if n.Noescape && n.Func.Cvars.Len() > 0 { prealloc[n] = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type } diff --git a/src/cmd/compile/internal/gc/parser.go b/src/cmd/compile/internal/gc/parser.go index ae4b497b7b..766f352d33 100644 --- a/src/cmd/compile/internal/gc/parser.go +++ b/src/cmd/compile/internal/gc/parser.go @@ -2906,7 +2906,7 @@ func (p *parser) hidden_import() { if Debug['E'] > 0 { fmt.Printf("import [%q] func %v \n", importpkg.Path, s2) - if Debug['m'] > 2 && len(s2.Func.Inl.Slice()) != 0 { + if Debug['m'] > 2 && s2.Func.Inl.Len() != 0 { fmt.Printf("inl body:%v\n", s2.Func.Inl) } } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 7b9b91e7b0..bba4ff5e48 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -363,7 +363,7 @@ func compile(fn *Node) { Curfn = fn dowidth(Curfn.Type) - if len(fn.Nbody.Slice()) == 0 { + if fn.Nbody.Len() == 0 { if pure_go || strings.HasPrefix(fn.Func.Nname.Sym.Name, "init.") { Yyerror("missing function body for %q", fn.Func.Nname.Sym.Name) return diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 9bf4f58412..49b991c5a5 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -3953,7 +3953,7 @@ func (n *Node) isterminating() bool { } func checkreturn(fn *Node) { - if fn.Type.Results().NumFields() != 0 && len(fn.Nbody.Slice()) != 0 { + if fn.Type.Results().NumFields() != 0 && fn.Nbody.Len() != 0 { markbreaklist(fn.Nbody, nil) if !fn.Nbody.isterminating() { yyerrorl(fn.Func.Endlineno, "missing return at end of function") diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 7e160bdd94..04ccfad971 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -70,7 +70,7 @@ func walk(fn *Node) { } heapmoves() - if Debug['W'] != 0 && len(Curfn.Func.Enter.Slice()) > 0 { + if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 { s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym) dumplist(s, Curfn.Func.Enter) } -- cgit v1.3 From 758431fe8c2906690a209e33531d8b95e381c8c1 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Sun, 24 Apr 2016 14:09:03 -0700 Subject: cmd/compile: minor cleanup in inl * Make budget an int32 to avoid needless conversions. * Introduce some temporary variables to reduce repetition. * If ... args are present, they will be the last argument to the function. No need to scan all arguments. Passes toolstash -cmp. Change-Id: I55203609f5d2f25a4e238cd48c63214651120cfc Reviewed-on: https://go-review.googlesource.com/22421 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/inl.go | 48 ++++++++++++++++++++----------------- src/cmd/compile/internal/gc/type.go | 6 +++++ 2 files changed, 32 insertions(+), 22 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index da026e1396..c863b84203 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -110,8 +110,9 @@ func caninl(fn *Node) { // can't handle ... args yet if Debug['l'] < 3 { - for _, t := range fn.Type.Params().Fields().Slice() { - if t.Isddd { + f := fn.Type.Params().Fields() + if len := f.Len(); len > 0 { + if t := f.Index(len - 1); t.Isddd { return } } @@ -128,7 +129,7 @@ func caninl(fn *Node) { } const maxBudget = 80 - budget := maxBudget // allowed hairyness + budget := int32(maxBudget) // allowed hairyness if ishairylist(fn.Nbody, &budget) || budget < 0 { return } @@ -136,27 +137,29 @@ func caninl(fn *Node) { savefn := Curfn Curfn = fn - fn.Func.Nname.Func.Inl.Set(fn.Nbody.Slice()) - fn.Nbody.Set(inlcopylist(fn.Func.Nname.Func.Inl.Slice())) - inldcl := inlcopylist(fn.Func.Nname.Name.Defn.Func.Dcl) - fn.Func.Nname.Func.Inldcl.Set(inldcl) - fn.Func.Nname.Func.InlCost = int32(maxBudget - budget) + n := fn.Func.Nname + + n.Func.Inl.Set(fn.Nbody.Slice()) + fn.Nbody.Set(inlcopylist(n.Func.Inl.Slice())) + inldcl := inlcopylist(n.Name.Defn.Func.Dcl) + n.Func.Inldcl.Set(inldcl) + n.Func.InlCost = maxBudget - budget // hack, TODO, check for better way to link method nodes back to the thing with the ->inl // this is so export can find the body of a method - fn.Type.SetNname(fn.Func.Nname) + fn.Type.SetNname(n) if Debug['m'] > 1 { - fmt.Printf("%v: can inline %v as: %v { %v }\n", fn.Line(), Nconv(fn.Func.Nname, FmtSharp), Tconv(fn.Type, FmtSharp), Hconv(fn.Func.Nname.Func.Inl, FmtSharp)) + fmt.Printf("%v: can inline %v as: %v { %v }\n", fn.Line(), Nconv(n, FmtSharp), Tconv(fn.Type, FmtSharp), Hconv(n.Func.Inl, FmtSharp)) } else if Debug['m'] != 0 { - fmt.Printf("%v: can inline %v\n", fn.Line(), fn.Func.Nname) + fmt.Printf("%v: can inline %v\n", fn.Line(), n) } Curfn = savefn } // Look for anything we want to punt on. -func ishairylist(ll Nodes, budget *int) bool { +func ishairylist(ll Nodes, budget *int32) bool { for _, n := range ll.Slice() { if ishairy(n, budget) { return true @@ -165,7 +168,7 @@ func ishairylist(ll Nodes, budget *int) bool { return false } -func ishairy(n *Node, budget *int) bool { +func ishairy(n *Node, budget *int32) bool { if n == nil { return false } @@ -173,13 +176,13 @@ func ishairy(n *Node, budget *int) bool { switch n.Op { // Call is okay if inlinable and we have the budget for the body. case OCALLFUNC: - if n.Left.Func != nil && n.Left.Func.Inl.Len() != 0 { - *budget -= int(n.Left.Func.InlCost) + if fn := n.Left.Func; fn != nil && fn.Inl.Len() != 0 { + *budget -= fn.InlCost break } if n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME { // methods called as functions - if n.Left.Sym.Def != nil && n.Left.Sym.Def.Func.Inl.Len() != 0 { - *budget -= int(n.Left.Sym.Def.Func.InlCost) + if d := n.Left.Sym.Def; d != nil && d.Func.Inl.Len() != 0 { + *budget -= d.Func.InlCost break } } @@ -189,14 +192,15 @@ func ishairy(n *Node, budget *int) bool { // Call is okay if inlinable and we have the budget for the body. case OCALLMETH: - if n.Left.Type == nil { + t := n.Left.Type + if t == nil { Fatalf("no function type for [%p] %v\n", n.Left, Nconv(n.Left, FmtSign)) } - if n.Left.Type.Nname() == nil { - Fatalf("no function definition for [%p] %v\n", n.Left.Type, Tconv(n.Left.Type, FmtSign)) + if t.Nname() == nil { + Fatalf("no function definition for [%p] %v\n", t, Tconv(t, FmtSign)) } - if n.Left.Type.Nname().Func.Inl.Len() != 0 { - *budget -= int(n.Left.Type.Nname().Func.InlCost) + if inlfn := t.Nname().Func; inlfn.Inl.Len() != 0 { + *budget -= inlfn.InlCost break } if Debug['l'] < 4 { diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index 1401332632..da295bba78 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -332,6 +332,12 @@ func (f *Fields) Slice() []*Field { return *f.s } +// Index returns the i'th element of Fields. +// It panics if f does not have at least i+1 elements. +func (f *Fields) Index(i int) *Field { + return (*f.s)[i] +} + // Set sets f to a slice. // This takes ownership of the slice. func (f *Fields) Set(s []*Field) { -- cgit v1.3 From f12bd8a5a8f8485f13793f03d4803a924923badb Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Thu, 21 Apr 2016 11:55:33 -0700 Subject: cmd/compile: encapsulate OSLICE* representation As a nice side-effect, this allows us to unify several code paths. The terminology (low, high, max, simple slice expr, full slice expr) is taken from the spec and the examples in the spec. This is a trial run. The plan, probably for Go 1.8, is to change slice expressions to use Node.List instead of OKEY, and to do some similar tree structure changes for other ops. Passes toolstash -cmp. No performance change. all.bash passes with GO_GCFLAGS=-newexport. Updates #15350 Change-Id: Ic1efdc36e79cdb95ae1636e9817a3ac8f83ab1ac Reviewed-on: https://go-review.googlesource.com/22425 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 7 ++- src/cmd/compile/internal/gc/bimport.go | 12 +++- src/cmd/compile/internal/gc/cgen.go | 10 +--- src/cmd/compile/internal/gc/fmt.go | 34 +++++++---- src/cmd/compile/internal/gc/inl.go | 2 +- src/cmd/compile/internal/gc/order.go | 26 +++----- src/cmd/compile/internal/gc/parser.go | 15 ++--- src/cmd/compile/internal/gc/racewalk.go | 6 +- src/cmd/compile/internal/gc/sinit.go | 7 ++- src/cmd/compile/internal/gc/ssa.go | 47 ++++++--------- src/cmd/compile/internal/gc/subr.go | 62 +++++++++++++++++++ src/cmd/compile/internal/gc/typecheck.go | 100 +++++++------------------------ src/cmd/compile/internal/gc/walk.go | 76 ++++++++++++----------- 13 files changed, 205 insertions(+), 199 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index f0907b45eb..bf1354c71f 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -1191,12 +1191,15 @@ func (p *exporter) expr(n *Node) { case OSLICE, OSLICESTR, OSLICEARR: p.op(OSLICE) p.expr(n.Left) - p.expr(n.Right) + low, high, _ := n.SliceBounds() + p.exprsOrNil(low, high) case OSLICE3, OSLICE3ARR: p.op(OSLICE3) p.expr(n.Left) - p.expr(n.Right) + low, high, max := n.SliceBounds() + p.exprsOrNil(low, high) + p.expr(max) case OCOPY, OCOMPLEX: p.op(op) diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 1219d8d370..3665bbdec2 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -822,9 +822,19 @@ func (p *importer) node() *Node { // case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: // unreachable - mapped to cases below by exporter - case OINDEX, OSLICE, OSLICE3: + case OINDEX: return Nod(op, p.expr(), p.expr()) + case OSLICE, OSLICE3: + n := Nod(op, p.expr(), nil) + low, high := p.exprsOrNil() + var max *Node + if n.Op.IsSlice3() { + max = p.expr() + } + n.SetSliceBounds(low, high, max) + return n + case OCOPY, OCOMPLEX: n := builtinCall(op) n.List.Set([]*Node{p.expr(), p.expr()}) diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index a9393a6d9e..3d3600a079 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -3106,15 +3106,7 @@ func cgen_slice(n, res *Node, wb bool) { x.Xoffset -= 2 * int64(Widthptr) } - var x1, x2, x3 *Node // unevaluated index arguments - x1 = n.Right.Left - switch n.Op { - default: - x2 = n.Right.Right - case OSLICE3, OSLICE3ARR: - x2 = n.Right.Right.Left - x3 = n.Right.Right.Right - } + x1, x2, x3 := n.SliceBounds() // unevaluated index arguments // load computes src into targ, but if src refers to the len or cap of n.Left, // load copies those from xlen, xcap, loading xlen if needed. diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 12ae915fb2..27ece1d393 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -1312,17 +1312,29 @@ func exprfmt(n *Node, prec int) string { f += fmt.Sprintf(".(%v)", n.Type) return f - case OINDEX, - OINDEXMAP, - OSLICE, - OSLICESTR, - OSLICEARR, - OSLICE3, - OSLICE3ARR: - var f string - f += exprfmt(n.Left, nprec) - f += fmt.Sprintf("[%v]", n.Right) - return f + case OINDEX, OINDEXMAP: + return fmt.Sprintf("%s[%v]", exprfmt(n.Left, nprec), n.Right) + + case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR: + var buf bytes.Buffer + buf.WriteString(exprfmt(n.Left, nprec)) + buf.WriteString("[") + low, high, max := n.SliceBounds() + if low != nil { + buf.WriteString(low.String()) + } + buf.WriteString(":") + if high != nil { + buf.WriteString(high.String()) + } + if n.Op.IsSlice3() { + buf.WriteString(":") + if max != nil { + buf.WriteString(max.String()) + } + } + buf.WriteString("]") + return buf.String() case OCOPY, OCOMPLEX: return fmt.Sprintf("%v(%v, %v)", Oconv(n.Op, FmtSharp), n.Left, n.Right) diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index c863b84203..10b61377ca 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -754,7 +754,7 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { vararrtype := typArray(varargtype.Elem(), int64(varargcount)) as.Right = Nod(OCOMPLIT, nil, typenod(vararrtype)) as.Right.List.Set(varargs) - as.Right = Nod(OSLICE, as.Right, Nod(OKEY, nil, nil)) + as.Right = Nod(OSLICE, as.Right, nil) } as = typecheck(as, Etop) diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 00ba4308cb..7373479ac9 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -1123,24 +1123,16 @@ func orderexpr(n *Node, order *Order, lhs *Node) *Node { n = ordercopyexpr(n, n.Type, order, 0) } - case OSLICE, OSLICEARR, OSLICESTR: + case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: n.Left = orderexpr(n.Left, order, nil) - n.Right.Left = orderexpr(n.Right.Left, order, nil) - n.Right.Left = ordercheapexpr(n.Right.Left, order) - n.Right.Right = orderexpr(n.Right.Right, order, nil) - n.Right.Right = ordercheapexpr(n.Right.Right, order) - if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) { - n = ordercopyexpr(n, n.Type, order, 0) - } - - case OSLICE3, OSLICE3ARR: - n.Left = orderexpr(n.Left, order, nil) - n.Right.Left = orderexpr(n.Right.Left, order, nil) - n.Right.Left = ordercheapexpr(n.Right.Left, order) - n.Right.Right.Left = orderexpr(n.Right.Right.Left, order, nil) - n.Right.Right.Left = ordercheapexpr(n.Right.Right.Left, order) - n.Right.Right.Right = orderexpr(n.Right.Right.Right, order, nil) - n.Right.Right.Right = ordercheapexpr(n.Right.Right.Right, order) + low, high, max := n.SliceBounds() + low = orderexpr(low, order, nil) + low = ordercheapexpr(low, order) + high = orderexpr(high, order, nil) + high = ordercheapexpr(high, order) + max = orderexpr(max, order, nil) + max = ordercheapexpr(max, order) + n.SetSliceBounds(low, high, max) if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.Left) { n = ordercopyexpr(n, n.Type, order, 0) } diff --git a/src/cmd/compile/internal/gc/parser.go b/src/cmd/compile/internal/gc/parser.go index 766f352d33..97a18497ff 100644 --- a/src/cmd/compile/internal/gc/parser.go +++ b/src/cmd/compile/internal/gc/parser.go @@ -1408,20 +1408,17 @@ loop: } x = Nod(OINDEX, x, i) case 1: - i := index[0] - j := index[1] - x = Nod(OSLICE, x, Nod(OKEY, i, j)) + x = Nod(OSLICE, x, nil) + x.SetSliceBounds(index[0], index[1], nil) case 2: - i := index[0] - j := index[1] - k := index[2] - if j == nil { + if index[1] == nil { Yyerror("middle index required in 3-index slice") } - if k == nil { + if index[2] == nil { Yyerror("final index required in 3-index slice") } - x = Nod(OSLICE3, x, Nod(OKEY, i, Nod(OKEY, j, k))) + x = Nod(OSLICE3, x, nil) + x.SetSliceBounds(index[0], index[1], index[2]) default: panic("unreachable") diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index a8a5e92485..620bcb34a3 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -307,7 +307,11 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR: instrumentnode(&n.Left, init, 0, 0) - instrumentnode(&n.Right, init, 0, 0) + low, high, max := n.SliceBounds() + instrumentnode(&low, init, 0, 0) + instrumentnode(&high, init, 0, 0) + instrumentnode(&max, init, 0, 0) + n.SetSliceBounds(low, high, max) goto ret case OKEY: diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index cb43855514..c6f2acffbf 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -727,7 +727,7 @@ func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) { arraylit(ctxt, 2, n, vstat, init) // copy static to slice - a := Nod(OSLICE, vstat, Nod(OKEY, nil, nil)) + a := Nod(OSLICE, vstat, nil) a = Nod(OAS, var_, a) a = typecheck(a, Etop) @@ -851,7 +851,7 @@ func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) { } // make slice out of heap (6) - a = Nod(OAS, var_, Nod(OSLICE, vauto, Nod(OKEY, nil, nil))) + a = Nod(OAS, var_, Nod(OSLICE, vauto, nil)) a = typecheck(a, Etop) a = orderstmtinplace(a) @@ -1391,7 +1391,8 @@ func genAsInitNoCheck(n *Node, reportOnly bool) bool { fallthrough case OSLICEARR: - if nr.Right.Op != OKEY || nr.Right.Left != nil || nr.Right.Right != nil { + low, high, _ := nr.SliceBounds() + if low != nil || high != nil { return false } nr = nr.Left diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index e177ceda01..5c367c7268 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -736,14 +736,7 @@ func (s *state) stmt(n *Node) { if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) { // We're assigning a slicing operation back to its source. // Don't write back fields we aren't changing. See issue #14855. - i := rhs.Right.Left - var j, k *Node - if rhs.Op == OSLICE3 { - j = rhs.Right.Right.Left - k = rhs.Right.Right.Right - } else { - j = rhs.Right.Right - } + i, j, k := rhs.SliceBounds() if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) { // [0:...] is the same as [:...] i = nil @@ -2038,38 +2031,34 @@ func (s *state) expr(n *Node) *ssa.Value { } return s.newValue2(ssa.OpIMake, n.Type, tab, data) - case OSLICE, OSLICEARR: + case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR: v := s.expr(n.Left) - var i, j *ssa.Value - if n.Right.Left != nil { - i = s.extendIndex(s.expr(n.Right.Left)) + var i, j, k *ssa.Value + low, high, max := n.SliceBounds() + if low != nil { + i = s.extendIndex(s.expr(low)) } - if n.Right.Right != nil { - j = s.extendIndex(s.expr(n.Right.Right)) + if high != nil { + j = s.extendIndex(s.expr(high)) } - p, l, c := s.slice(n.Left.Type, v, i, j, nil) + if max != nil { + k = s.extendIndex(s.expr(max)) + } + p, l, c := s.slice(n.Left.Type, v, i, j, k) return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) + case OSLICESTR: v := s.expr(n.Left) var i, j *ssa.Value - if n.Right.Left != nil { - i = s.extendIndex(s.expr(n.Right.Left)) + low, high, _ := n.SliceBounds() + if low != nil { + i = s.extendIndex(s.expr(low)) } - if n.Right.Right != nil { - j = s.extendIndex(s.expr(n.Right.Right)) + if high != nil { + j = s.extendIndex(s.expr(high)) } p, l, _ := s.slice(n.Left.Type, v, i, j, nil) return s.newValue2(ssa.OpStringMake, n.Type, p, l) - case OSLICE3, OSLICE3ARR: - v := s.expr(n.Left) - var i *ssa.Value - if n.Right.Left != nil { - i = s.extendIndex(s.expr(n.Right.Left)) - } - j := s.extendIndex(s.expr(n.Right.Right.Left)) - k := s.extendIndex(s.expr(n.Right.Right.Right)) - p, l, c := s.slice(n.Left.Type, v, i, j, k) - return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c) case OCALLFUNC: if isIntrinsicCall1(n) { diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 5fc16858d9..38f21eb585 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1024,6 +1024,68 @@ func Is64(t *Type) bool { return false } +// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max]. +// n must be a slice expression. max is nil if n is a simple slice expression. +func (n *Node) SliceBounds() (low, high, max *Node) { + switch n.Op { + case OSLICE, OSLICEARR, OSLICESTR: + if n.Right == nil { + return nil, nil, nil + } + if n.Right.Op != OKEY { + Fatalf("SliceBounds right %s", opnames[n.Right.Op]) + } + return n.Right.Left, n.Right.Right, nil + case OSLICE3, OSLICE3ARR: + if n.Right.Op != OKEY || n.Right.Right.Op != OKEY { + Fatalf("SliceBounds right %s %s", opnames[n.Right.Op], opnames[n.Right.Right.Op]) + } + return n.Right.Left, n.Right.Right.Left, n.Right.Right.Right + } + Fatalf("SliceBounds op %s: %v", n.Op, n) + return nil, nil, nil +} + +// SetSliceBounds sets n's slice bounds, where n is a slice expression. +// n must be a slice expression. If max is non-nil, n must be a full slice expression. +func (n *Node) SetSliceBounds(low, high, max *Node) { + switch n.Op { + case OSLICE, OSLICEARR, OSLICESTR: + if max != nil { + Fatalf("SetSliceBounds %s given three bounds", n.Op) + } + if n.Right == nil { + n.Right = Nod(OKEY, low, high) + return + } + n.Right.Left = low + n.Right.Right = high + return + case OSLICE3, OSLICE3ARR: + if n.Right == nil { + n.Right = Nod(OKEY, low, Nod(OKEY, high, max)) + } + n.Right.Left = low + n.Right.Right.Left = high + n.Right.Right.Right = max + return + } + Fatalf("SetSliceBounds op %s: %v", n.Op, n) +} + +// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR). +// o must be a slicing op. +func (o Op) IsSlice3() bool { + switch o { + case OSLICE, OSLICEARR, OSLICESTR: + return false + case OSLICE3, OSLICE3ARR: + return true + } + Fatalf("IsSlice3 op %v", o) + return false +} + // Is a conversion between t1 and t2 a no-op? func Noconv(t1 *Type, t2 *Type) bool { e1 := Simtype[t1.Etype] diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 49b991c5a5..8860c5d803 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -1105,14 +1105,19 @@ OpSwitch: n.Type = nil break OpSwitch - case OSLICE: + case OSLICE, OSLICE3: ok |= Erv n.Left = typecheck(n.Left, top) - n.Right.Left = typecheck(n.Right.Left, Erv) - n.Right.Right = typecheck(n.Right.Right, Erv) + low, high, max := n.SliceBounds() + hasmax := n.Op.IsSlice3() + low = typecheck(low, Erv) + high = typecheck(high, Erv) + max = typecheck(max, Erv) n.Left = defaultlit(n.Left, nil) - n.Right.Left = indexlit(n.Right.Left) - n.Right.Right = indexlit(n.Right.Right) + low = indexlit(low) + high = indexlit(high) + max = indexlit(max) + n.SetSliceBounds(low, high, max) l := n.Left if l.Type.IsArray() { if !islvalue(n.Left) { @@ -1134,78 +1139,22 @@ OpSwitch: } var tp *Type if t.IsString() { + if hasmax { + Yyerror("invalid operation %v (3-index slice of string)", n) + n.Type = nil + return n + } n.Type = t n.Op = OSLICESTR } else if t.IsPtr() && t.Elem().IsArray() { tp = t.Elem() n.Type = typSlice(tp.Elem()) dowidth(n.Type) - n.Op = OSLICEARR - } else if t.IsSlice() { - n.Type = t - } else { - Yyerror("cannot slice %v (type %v)", l, t) - n.Type = nil - return n - } - - lo := n.Right.Left - if lo != nil && !checksliceindex(l, lo, tp) { - n.Type = nil - return n - } - hi := n.Right.Right - if hi != nil && !checksliceindex(l, hi, tp) { - n.Type = nil - return n - } - if !checksliceconst(lo, hi) { - n.Type = nil - return n - } - break OpSwitch - - case OSLICE3: - ok |= Erv - n.Left = typecheck(n.Left, top) - n.Right.Left = typecheck(n.Right.Left, Erv) - n.Right.Right.Left = typecheck(n.Right.Right.Left, Erv) - n.Right.Right.Right = typecheck(n.Right.Right.Right, Erv) - n.Left = defaultlit(n.Left, nil) - n.Right.Left = indexlit(n.Right.Left) - n.Right.Right.Left = indexlit(n.Right.Right.Left) - n.Right.Right.Right = indexlit(n.Right.Right.Right) - l := n.Left - if l.Type.IsArray() { - if !islvalue(n.Left) { - Yyerror("invalid operation %v (slice of unaddressable value)", n) - n.Type = nil - return n + if hasmax { + n.Op = OSLICE3ARR + } else { + n.Op = OSLICEARR } - - n.Left = Nod(OADDR, n.Left, nil) - n.Left.Implicit = true - n.Left = typecheck(n.Left, Erv) - l = n.Left - } - - t := l.Type - if t == nil { - n.Type = nil - return n - } - if t.IsString() { - Yyerror("invalid operation %v (3-index slice of string)", n) - n.Type = nil - return n - } - - var tp *Type - if t.IsPtr() && t.Elem().IsArray() { - tp = t.Elem() - n.Type = typSlice(tp.Elem()) - dowidth(n.Type) - n.Op = OSLICE3ARR } else if t.IsSlice() { n.Type = t } else { @@ -1214,22 +1163,19 @@ OpSwitch: return n } - lo := n.Right.Left - if lo != nil && !checksliceindex(l, lo, tp) { + if low != nil && !checksliceindex(l, low, tp) { n.Type = nil return n } - mid := n.Right.Right.Left - if mid != nil && !checksliceindex(l, mid, tp) { + if high != nil && !checksliceindex(l, high, tp) { n.Type = nil return n } - hi := n.Right.Right.Right - if hi != nil && !checksliceindex(l, hi, tp) { + if max != nil && !checksliceindex(l, max, tp) { n.Type = nil return n } - if !checksliceconst(lo, hi) || !checksliceconst(lo, mid) || !checksliceconst(mid, hi) { + if !checksliceconst(low, high) || !checksliceconst(low, max) || !checksliceconst(high, max) { n.Type = nil return n } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 04ccfad971..e8fee67d05 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -1241,35 +1241,28 @@ opswitch: case ORECV: Fatalf("walkexpr ORECV") // should see inside OAS only - case OSLICE, OSLICEARR, OSLICESTR: + case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: n.Left = walkexpr(n.Left, init) - n.Right.Left = walkexpr(n.Right.Left, init) - if n.Right.Left != nil && iszero(n.Right.Left) { - // Reduce x[0:j] to x[:j]. - n.Right.Left = nil - } - n.Right.Right = walkexpr(n.Right.Right, init) - n = reduceSlice(n) - - case OSLICE3, OSLICE3ARR: - n.Left = walkexpr(n.Left, init) - n.Right.Left = walkexpr(n.Right.Left, init) - if n.Right.Left != nil && iszero(n.Right.Left) { - // Reduce x[0:j:k] to x[:j:k]. - n.Right.Left = nil - } - n.Right.Right.Left = walkexpr(n.Right.Right.Left, init) - n.Right.Right.Right = walkexpr(n.Right.Right.Right, init) - - r := n.Right.Right.Right - if r != nil && r.Op == OCAP && samesafeexpr(n.Left, r.Left) { - // Reduce x[i:j:cap(x)] to x[i:j]. - n.Right.Right = n.Right.Right.Left - if n.Op == OSLICE3 { - n.Op = OSLICE - } else { - n.Op = OSLICEARR + low, high, max := n.SliceBounds() + low = walkexpr(low, init) + if low != nil && iszero(low) { + // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. + low = nil + } + high = walkexpr(high, init) + max = walkexpr(max, init) + n.SetSliceBounds(low, high, max) + if n.Op.IsSlice3() { + if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) { + // Reduce x[i:j:cap(x)] to x[i:j]. + if n.Op == OSLICE3 { + n.Op = OSLICE + } else { + n.Op = OSLICEARR + } + n = reduceSlice(n) } + } else { n = reduceSlice(n) } @@ -1425,8 +1418,9 @@ opswitch: a := Nod(OAS, var_, nil) // zero temp a = typecheck(a, Etop) init.Append(a) - r := Nod(OSLICE, var_, Nod(OKEY, nil, l)) // arr[:l] - r = conv(r, n.Type) // in case n.Type is named. + r := Nod(OSLICE, var_, nil) // arr[:l] + r.SetSliceBounds(nil, l, nil) + r = conv(r, n.Type) // in case n.Type is named. r = typecheck(r, Erv) r = walkexpr(r, init) n = r @@ -1596,13 +1590,15 @@ opswitch: return n } +// TODO(josharian): combine this with its caller and simplify func reduceSlice(n *Node) *Node { - r := n.Right.Right - if r != nil && r.Op == OLEN && samesafeexpr(n.Left, r.Left) { + low, high, max := n.SliceBounds() + if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) { // Reduce x[i:len(x)] to x[i:]. - n.Right.Right = nil + high = nil } - if (n.Op == OSLICE || n.Op == OSLICESTR) && n.Right.Left == nil && n.Right.Right == nil { + n.SetSliceBounds(low, high, max) + if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil { // Reduce x[:] to x. if Debug_slice > 0 { Warn("slice: omit slice operation") @@ -2816,14 +2812,15 @@ func appendslice(n *Node, init *Nodes) *Node { l = append(l, nif) // s = s[:n] - nt := Nod(OSLICE, s, Nod(OKEY, nil, nn)) + nt := Nod(OSLICE, s, nil) + nt.SetSliceBounds(nil, nn, nil) nt.Etype = 1 l = append(l, Nod(OAS, s, nt)) if haspointers(l1.Type.Elem()) { // copy(s[len(l1):], l2) - nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), nil)) - + nptr1 := Nod(OSLICE, s, nil) + nptr1.SetSliceBounds(Nod(OLEN, l1, nil), nil, nil) nptr1.Etype = 1 nptr2 := l2 fn := syslook("typedslicecopy") @@ -2835,8 +2832,8 @@ func appendslice(n *Node, init *Nodes) *Node { } else if instrumenting { // rely on runtime to instrument copy. // copy(s[len(l1):], l2) - nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), nil)) - + nptr1 := Nod(OSLICE, s, nil) + nptr1.SetSliceBounds(Nod(OLEN, l1, nil), nil, nil) nptr1.Etype = 1 nptr2 := l2 var fn *Node @@ -2950,7 +2947,8 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { nn := temp(Types[TINT]) l = append(l, Nod(OAS, nn, Nod(OLEN, ns, nil))) // n = len(s) - nx = Nod(OSLICE, ns, Nod(OKEY, nil, Nod(OADD, nn, na))) // ...s[:n+argc] + nx = Nod(OSLICE, ns, nil) // ...s[:n+argc] + nx.SetSliceBounds(nil, Nod(OADD, nn, na), nil) nx.Etype = 1 l = append(l, Nod(OAS, ns, nx)) // s = s[:n+argc] -- cgit v1.3 From 8b92397bcdcd5d6de3f72951a5514933fee32eb2 Mon Sep 17 00:00:00 2001 From: Alexandru Moșoi Date: Sun, 24 Apr 2016 21:21:07 +0200 Subject: cmd/compile: introduce bool operations. Introduce OrB, EqB, NeqB, AndB to handle bool operations. Change-Id: I53e4d5125a8090d5eeb4576db619103f19fff58d Reviewed-on: https://go-review.googlesource.com/22412 Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 4 +- src/cmd/compile/internal/ssa/gen/AMD64.rules | 4 + src/cmd/compile/internal/ssa/gen/generic.rules | 16 +- src/cmd/compile/internal/ssa/gen/genericOps.go | 9 +- src/cmd/compile/internal/ssa/opGen.go | 24 +++ src/cmd/compile/internal/ssa/phiopt.go | 4 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 76 +++++++++ src/cmd/compile/internal/ssa/rewritegeneric.go | 218 +++++++++++++------------ test/phiopt.go | 10 +- 9 files changed, 243 insertions(+), 122 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 5c367c7268..7763b18ce2 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1121,7 +1121,7 @@ var opToSSA = map[opAndType]ssa.Op{ opAndType{OXOR, TINT64}: ssa.OpXor64, opAndType{OXOR, TUINT64}: ssa.OpXor64, - opAndType{OEQ, TBOOL}: ssa.OpEq8, + opAndType{OEQ, TBOOL}: ssa.OpEqB, opAndType{OEQ, TINT8}: ssa.OpEq8, opAndType{OEQ, TUINT8}: ssa.OpEq8, opAndType{OEQ, TINT16}: ssa.OpEq16, @@ -1141,7 +1141,7 @@ var opToSSA = map[opAndType]ssa.Op{ opAndType{OEQ, TFLOAT64}: ssa.OpEq64F, opAndType{OEQ, TFLOAT32}: ssa.OpEq32F, - opAndType{ONE, TBOOL}: ssa.OpNeq8, + opAndType{ONE, TBOOL}: ssa.OpNeqB, opAndType{ONE, TINT8}: ssa.OpNeq8, opAndType{ONE, TUINT8}: ssa.OpNeq8, opAndType{ONE, TINT16}: ssa.OpNeq16, diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index c0e83d7adc..9d405131c0 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -281,6 +281,7 @@ (Eq32 x y) -> (SETEQ (CMPL x y)) (Eq16 x y) -> (SETEQ (CMPW x y)) (Eq8 x y) -> (SETEQ (CMPB x y)) +(EqB x y) -> (SETEQ (CMPB x y)) (EqPtr x y) -> (SETEQ (CMPQ x y)) (Eq64F x y) -> (SETEQF (UCOMISD x y)) (Eq32F x y) -> (SETEQF (UCOMISS x y)) @@ -289,6 +290,7 @@ (Neq32 x y) -> (SETNE (CMPL x y)) (Neq16 x y) -> (SETNE (CMPW x y)) (Neq8 x y) -> (SETNE (CMPB x y)) +(NeqB x y) -> (SETNE (CMPB x y)) (NeqPtr x y) -> (SETNE (CMPQ x y)) (Neq64F x y) -> (SETNEF (UCOMISD x y)) (Neq32F x y) -> (SETNEF (UCOMISS x y)) @@ -366,6 +368,8 @@ (Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 -> (REPMOVSQ dst src (MOVQconst [size/8]) mem) +(AndB x y) -> (ANDL x y) +(OrB x y) -> (ORL x y) (Not x) -> (XORLconst [1] x) (OffPtr [off] ptr) && is32Bit(off) -> (ADDQconst [off] ptr) diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index 3270ec1534..b33037f100 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -114,7 +114,7 @@ (Lsh16x16 (Rsh16Ux16 (Lsh16x16 x (Const16 [c1])) (Const16 [c2])) (Const16 [c3])) && uint16(c1) >= uint16(c2) && uint16(c3) >= uint16(c2) -> (Lsh16x16 x (Const16 [int64(int16(c1-c2+c3))])) (Lsh8x8 (Rsh8Ux8 (Lsh8x8 x (Const8 [c1])) (Const8 [c2])) (Const8 [c3])) && uint8(c1) >= uint8(c2) && uint8(c3) >= uint8(c2) -> (Lsh8x8 x (Const8 [int64(int8(c1-c2+c3))])) -// Fold IsInBounds when the range of the index cannot exceed the limt. +// Fold IsInBounds when the range of the index cannot exceed the limit. (IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c -> (ConstBool [1]) (IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c -> (ConstBool [1]) (IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c -> (ConstBool [1]) @@ -141,17 +141,17 @@ (Eq32 x x) -> (ConstBool [1]) (Eq16 x x) -> (ConstBool [1]) (Eq8 x x) -> (ConstBool [1]) -(Eq8 (ConstBool [c]) (ConstBool [d])) -> (ConstBool [b2i(c == d)]) -(Eq8 (ConstBool [0]) x) -> (Not x) -(Eq8 (ConstBool [1]) x) -> x +(EqB (ConstBool [c]) (ConstBool [d])) -> (ConstBool [b2i(c == d)]) +(EqB (ConstBool [0]) x) -> (Not x) +(EqB (ConstBool [1]) x) -> x (Neq64 x x) -> (ConstBool [0]) (Neq32 x x) -> (ConstBool [0]) (Neq16 x x) -> (ConstBool [0]) (Neq8 x x) -> (ConstBool [0]) -(Neq8 (ConstBool [c]) (ConstBool [d])) -> (ConstBool [b2i(c != d)]) -(Neq8 (ConstBool [0]) x) -> x -(Neq8 (ConstBool [1]) x) -> (Not x) +(NeqB (ConstBool [c]) (ConstBool [d])) -> (ConstBool [b2i(c != d)]) +(NeqB (ConstBool [0]) x) -> x +(NeqB (ConstBool [1]) x) -> (Not x) (Eq64 (Const64 [c]) (Add64 (Const64 [d]) x)) -> (Eq64 (Const64 [c-d]) x) (Eq32 (Const32 [c]) (Add32 (Const32 [d]) x)) -> (Eq32 (Const32 [int64(int32(c-d))]) x) @@ -168,13 +168,11 @@ (Eq32 x (Const32 [c])) && x.Op != OpConst32 -> (Eq32 (Const32 [c]) x) (Eq16 x (Const16 [c])) && x.Op != OpConst16 -> (Eq16 (Const16 [c]) x) (Eq8 x (Const8 [c])) && x.Op != OpConst8 -> (Eq8 (Const8 [c]) x) -(Eq8 x (ConstBool [c])) && x.Op != OpConstBool -> (Eq8 (ConstBool [c]) x) (Neq64 x (Const64 [c])) && x.Op != OpConst64 -> (Neq64 (Const64 [c]) x) (Neq32 x (Const32 [c])) && x.Op != OpConst32 -> (Neq32 (Const32 [c]) x) (Neq16 x (Const16 [c])) && x.Op != OpConst16 -> (Neq16 (Const16 [c]) x) (Neq8 x (Const8 [c])) && x.Op != OpConst8 -> (Neq8 (Const8 [c]) x) -(Neq8 x (ConstBool [c])) && x.Op != OpConstBool -> (Neq8 (ConstBool [c]) x) // AddPtr is not canonicalized because nilcheck ptr checks the first argument to be non-nil. (Add64 x (Const64 [c])) && x.Op != OpConst64 -> (Add64 (Const64 [c]) x) diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index e6a0e8355b..88ae8b189d 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -237,9 +237,14 @@ var genericOps = []opData{ {name: "Geq32F", argLength: 2}, {name: "Geq64F", argLength: 2}, - // 1-input ops - {name: "Not", argLength: 1}, // !arg0, boolean + // boolean ops + {name: "AndB", argLength: 2}, // arg0 && arg1 (not shortcircuited) + {name: "OrB", argLength: 2}, // arg0 || arg1 (not shortcircuited) + {name: "EqB", argLength: 2}, // arg0 == arg1 + {name: "NeqB", argLength: 2}, // arg0 != arg1 + {name: "Not", argLength: 1}, // !arg0, boolean + // 1-input ops {name: "Neg8", argLength: 1}, // -arg0 {name: "Neg16", argLength: 1}, {name: "Neg32", argLength: 1}, diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 70af757194..a53899ec52 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -503,6 +503,10 @@ const ( OpGeq64U OpGeq32F OpGeq64F + OpAndB + OpOrB + OpEqB + OpNeqB OpNot OpNeg8 OpNeg16 @@ -4773,6 +4777,26 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "AndB", + argLen: 2, + generic: true, + }, + { + name: "OrB", + argLen: 2, + generic: true, + }, + { + name: "EqB", + argLen: 2, + generic: true, + }, + { + name: "NeqB", + argLen: 2, + generic: true, + }, { name: "Not", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go index aae83bacf2..3b6728ca86 100644 --- a/src/cmd/compile/internal/ssa/phiopt.go +++ b/src/cmd/compile/internal/ssa/phiopt.go @@ -84,7 +84,7 @@ func phiopt(f *Func) { // of value are not seen if a is false. if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 { if tmp := v.Args[1-reverse]; f.sdom.isAncestorEq(tmp.Block, b) { - v.reset(OpOr8) + v.reset(OpOrB) v.SetArgs2(b0.Control, tmp) if f.pass.debug > 0 { f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) @@ -100,7 +100,7 @@ func phiopt(f *Func) { // of value are not seen if a is false. if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 { if tmp := v.Args[reverse]; f.sdom.isAncestorEq(tmp.Block, b) { - v.reset(OpAnd8) + v.reset(OpAndB) v.SetArgs2(b0.Control, tmp) if f.pass.debug > 0 { f.Config.Warnl(b.Line, "converted OpPhi to %v", v.Op) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index e2c4240ae3..f8cefb7eab 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -48,6 +48,8 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpAnd64(v, config) case OpAnd8: return rewriteValueAMD64_OpAnd8(v, config) + case OpAndB: + return rewriteValueAMD64_OpAndB(v, config) case OpAvg64u: return rewriteValueAMD64_OpAvg64u(v, config) case OpBswap32: @@ -164,6 +166,8 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpEq64F(v, config) case OpEq8: return rewriteValueAMD64_OpEq8(v, config) + case OpEqB: + return rewriteValueAMD64_OpEqB(v, config) case OpEqPtr: return rewriteValueAMD64_OpEqPtr(v, config) case OpGeq16: @@ -512,6 +516,8 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpNeq64F(v, config) case OpNeq8: return rewriteValueAMD64_OpNeq8(v, config) + case OpNeqB: + return rewriteValueAMD64_OpNeqB(v, config) case OpNeqPtr: return rewriteValueAMD64_OpNeqPtr(v, config) case OpNilCheck: @@ -536,6 +542,8 @@ func rewriteValueAMD64(v *Value, config *Config) bool { return rewriteValueAMD64_OpOr64(v, config) case OpOr8: return rewriteValueAMD64_OpOr8(v, config) + case OpOrB: + return rewriteValueAMD64_OpOrB(v, config) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16(v, config) case OpRsh16Ux32: @@ -1709,6 +1717,22 @@ func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { } return false } +func rewriteValueAMD64_OpAndB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (AndB x y) + // cond: + // result: (ANDL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { b := v.Block _ = b @@ -3560,6 +3584,24 @@ func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { } return false } +func rewriteValueAMD64_OpEqB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (EqB x y) + // cond: + // result: (SETEQ (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { b := v.Block _ = b @@ -12820,6 +12862,24 @@ func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { } return false } +func rewriteValueAMD64_OpNeqB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NeqB x y) + // cond: + // result: (SETNE (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { b := v.Block _ = b @@ -13914,6 +13974,22 @@ func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { } return false } +func rewriteValueAMD64_OpOrB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (OrB x y) + // cond: + // result: (ORL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ORL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { b := v.Block _ = b diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 54a6815c93..eb8f704124 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -66,6 +66,8 @@ func rewriteValuegeneric(v *Value, config *Config) bool { return rewriteValuegeneric_OpEq64(v, config) case OpEq8: return rewriteValuegeneric_OpEq8(v, config) + case OpEqB: + return rewriteValuegeneric_OpEqB(v, config) case OpEqInter: return rewriteValuegeneric_OpEqInter(v, config) case OpEqPtr: @@ -218,6 +220,8 @@ func rewriteValuegeneric(v *Value, config *Config) bool { return rewriteValuegeneric_OpNeq64(v, config) case OpNeq8: return rewriteValuegeneric_OpNeq8(v, config) + case OpNeqB: + return rewriteValuegeneric_OpNeqB(v, config) case OpNeqInter: return rewriteValuegeneric_OpNeqInter(v, config) case OpNeqPtr: @@ -2348,57 +2352,6 @@ func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool { v.AuxInt = 1 return true } - // match: (Eq8 (ConstBool [c]) (ConstBool [d])) - // cond: - // result: (ConstBool [b2i(c == d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } - // match: (Eq8 (ConstBool [0]) x) - // cond: - // result: (Not x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - if v_0.AuxInt != 0 { - break - } - x := v.Args[1] - v.reset(OpNot) - v.AddArg(x) - return true - } - // match: (Eq8 (ConstBool [1]) x) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - if v_0.AuxInt != 1 { - break - } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Eq8 (Const8 [c]) (Add8 (Const8 [d]) x)) // cond: // result: (Eq8 (Const8 [int64(int8(c-d))]) x) @@ -2491,6 +2444,62 @@ func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool { } return false } +func rewriteValuegeneric_OpEqB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (EqB (ConstBool [c]) (ConstBool [d])) + // cond: + // result: (ConstBool [b2i(c == d)]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConstBool { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConstBool { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c == d) + return true + } + // match: (EqB (ConstBool [0]) x) + // cond: + // result: (Not x) + for { + v_0 := v.Args[0] + if v_0.Op != OpConstBool { + break + } + if v_0.AuxInt != 0 { + break + } + x := v.Args[1] + v.reset(OpNot) + v.AddArg(x) + return true + } + // match: (EqB (ConstBool [1]) x) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpConstBool { + break + } + if v_0.AuxInt != 1 { + break + } + x := v.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} func rewriteValuegeneric_OpEqInter(v *Value, config *Config) bool { b := v.Block _ = b @@ -5707,57 +5716,6 @@ func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool { v.AuxInt = 0 return true } - // match: (Neq8 (ConstBool [c]) (ConstBool [d])) - // cond: - // result: (ConstBool [b2i(c != d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (Neq8 (ConstBool [0]) x) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - if v_0.AuxInt != 0 { - break - } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Neq8 (ConstBool [1]) x) - // cond: - // result: (Not x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - if v_0.AuxInt != 1 { - break - } - x := v.Args[1] - v.reset(OpNot) - v.AddArg(x) - return true - } // match: (Neq8 (Const8 [c]) (Add8 (Const8 [d]) x)) // cond: // result: (Neq8 (Const8 [int64(int8(c-d))]) x) @@ -5850,6 +5808,62 @@ func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool { } return false } +func rewriteValuegeneric_OpNeqB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NeqB (ConstBool [c]) (ConstBool [d])) + // cond: + // result: (ConstBool [b2i(c != d)]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConstBool { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConstBool { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true + } + // match: (NeqB (ConstBool [0]) x) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpConstBool { + break + } + if v_0.AuxInt != 0 { + break + } + x := v.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (NeqB (ConstBool [1]) x) + // cond: + // result: (Not x) + for { + v_0 := v.Args[0] + if v_0.Op != OpConstBool { + break + } + if v_0.AuxInt != 1 { + break + } + x := v.Args[1] + v.reset(OpNot) + v.AddArg(x) + return true + } + return false +} func rewriteValuegeneric_OpNeqInter(v *Value, config *Config) bool { b := v.Block _ = b diff --git a/test/phiopt.go b/test/phiopt.go index 4347909752..21dd13155c 100644 --- a/test/phiopt.go +++ b/test/phiopt.go @@ -49,7 +49,7 @@ func f3(a, b int) bool { //go:noinline func f4(a, b bool) bool { - return a || b // ERROR "converted OpPhi to Or8$" + return a || b // ERROR "converted OpPhi to OrB$" } //go:noinline @@ -60,7 +60,7 @@ func f5or(a int, b bool) bool { } else { x = b } - return x // ERROR "converted OpPhi to Or8$" + return x // ERROR "converted OpPhi to OrB$" } //go:noinline @@ -71,7 +71,7 @@ func f5and(a int, b bool) bool { } else { x = false } - return x // ERROR "converted OpPhi to And8$" + return x // ERROR "converted OpPhi to AndB$" } //go:noinline @@ -96,12 +96,12 @@ func f6and(a int, b bool) bool { //go:noinline func f7or(a bool, b bool) bool { - return a || b // ERROR "converted OpPhi to Or8$" + return a || b // ERROR "converted OpPhi to OrB$" } //go:noinline func f7and(a bool, b bool) bool { - return a && b // ERROR "converted OpPhi to And8$" + return a && b // ERROR "converted OpPhi to AndB$" } func main() { -- cgit v1.3 From e48a2958d1cfa4ae75dead9d8e65489b53c70f14 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Mon, 25 Apr 2016 13:24:48 -0700 Subject: cmd/compile: treat empty and absent struct field tags as identical Fixes #15439. Change-Id: I5a32384c46e20f8db6968e5a9e854c45ab262fe4 Reviewed-on: https://go-review.googlesource.com/22429 Reviewed-by: Josh Bleecher Snyder Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/bexport.go | 15 ++------------- src/cmd/compile/internal/gc/bimport.go | 13 +++---------- src/cmd/compile/internal/gc/dcl.go | 2 +- src/cmd/compile/internal/gc/esc.go | 16 ++++++++-------- src/cmd/compile/internal/gc/fmt.go | 4 ++-- src/cmd/compile/internal/gc/order.go | 2 +- src/cmd/compile/internal/gc/reflect.go | 7 ++----- src/cmd/compile/internal/gc/subr.go | 6 +----- src/cmd/compile/internal/gc/type.go | 12 ++---------- src/cmd/compile/internal/gc/walk.go | 2 +- test/fixedbugs/issue15439.go | 25 +++++++++++++++++++++++++ 11 files changed, 48 insertions(+), 56 deletions(-) create mode 100644 test/fixedbugs/issue15439.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index bf1354c71f..c635129ccc 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -720,18 +720,7 @@ func (p *exporter) field(f *Field) { p.pos(f.Sym.Def) p.fieldName(f.Sym, f) p.typ(f.Type) - // TODO(gri) Do we care that a non-present tag cannot be distinguished - // from a present but empty ta string? (reflect doesn't seem to make - // a difference). Investigate. - p.note(f.Note) -} - -func (p *exporter) note(n *string) { - var s string - if n != nil { - s = *n - } - p.string(s) + p.string(f.Note) } func (p *exporter) methodList(t *Type) { @@ -847,7 +836,7 @@ func (p *exporter) param(q *Field, n int, numbered bool) { // TODO(gri) This is compiler-specific (escape info). // Move into compiler-specific section eventually? // (Not having escape info causes tests to fail, e.g. runtime GCInfoTest) - p.note(q.Note) + p.string(q.Note) } func parName(f *Field, numbered bool) string { diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 3665bbdec2..7fed8b1342 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -457,7 +457,7 @@ func (p *importer) field() *Node { p.pos() sym := p.fieldName() typ := p.typ() - note := p.note() + note := p.string() var n *Node if sym.Name != "" { @@ -475,18 +475,11 @@ func (p *importer) field() *Node { n = embedded(s, pkg) n.Right = typenod(typ) } - n.SetVal(note) + n.SetVal(Val{U: note}) return n } -func (p *importer) note() (v Val) { - if s := p.string(); s != "" { - v.U = s - } - return -} - // parser.go:hidden_interfacedcl_list func (p *importer) methodList() (methods []*Node) { if n := p.int(); n > 0 { @@ -572,7 +565,7 @@ func (p *importer) param(named bool) *Node { // TODO(gri) This is compiler-specific (escape info). // Move into compiler-specific section eventually? - n.SetVal(p.note()) + n.SetVal(Val{U: p.string()}) return n } diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 7f6e167488..ca9caf69d7 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -757,7 +757,7 @@ func structfield(n *Node) *Field { switch u := n.Val().U.(type) { case string: - f.Note = &u + f.Note = u default: Yyerror("field annotation must be string") case nil: diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 2f4e5fb6ef..795e688090 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -1181,7 +1181,7 @@ func escassign(e *EscState, dst, src *Node, step *EscStep) { var tags [1 << (bitsPerOutputInTag + EscReturnBits)]string // mktag returns the string representation for an escape analysis tag. -func mktag(mask int) *string { +func mktag(mask int) string { switch mask & EscMask { case EscNone, EscReturn: break @@ -1191,22 +1191,22 @@ func mktag(mask int) *string { } if mask < len(tags) && tags[mask] != "" { - return &tags[mask] + return tags[mask] } s := fmt.Sprintf("esc:0x%x", mask) if mask < len(tags) { tags[mask] = s } - return &s + return s } // parsetag decodes an escape analysis tag and returns the esc value. -func parsetag(note *string) uint16 { - if note == nil || !strings.HasPrefix(*note, "esc:") { +func parsetag(note string) uint16 { + if !strings.HasPrefix(note, "esc:") { return EscUnknown } - n, _ := strconv.ParseInt((*note)[4:], 0, 0) + n, _ := strconv.ParseInt(note[4:], 0, 0) em := uint16(n) if em == 0 { return EscNone @@ -1268,7 +1268,7 @@ func describeEscape(em uint16) string { // escassignfromtag models the input-to-output assignment flow of one of a function // calls arguments, where the flow is encoded in "note". -func escassignfromtag(e *EscState, note *string, dsts Nodes, src *Node) uint16 { +func escassignfromtag(e *EscState, note string, dsts Nodes, src *Node) uint16 { em := parsetag(note) if src.Op == OLITERAL { return em @@ -1997,7 +1997,7 @@ func esctag(e *EscState, func_ *Node) { } Warnl(func_.Lineno, "%v assuming %v is unsafe uintptr", funcSym(func_), name) } - t.Note = &unsafeUintptrTag + t.Note = unsafeUintptrTag } } diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 27ece1d393..3bd3874845 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -1700,8 +1700,8 @@ func Fldconv(f *Field, flag FmtFlag) string { // (The escape analysis tags do not apply to func vars.) // But it must not suppress struct field tags. // See golang.org/issue/13777 and golang.org/issue/14331. - if flag&FmtShort == 0 && (!fmtbody || !f.Funarg) && f.Note != nil { - str += " " + strconv.Quote(*f.Note) + if flag&FmtShort == 0 && (!fmtbody || !f.Funarg) && f.Note != "" { + str += " " + strconv.Quote(f.Note) } if fmtmode == FTypeId && (sf&FmtUnsigned != 0) { diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 7373479ac9..7e7bda466d 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -373,7 +373,7 @@ func ordercall(n *Node, order *Order) { if t == nil { break } - if t.Note != nil && *t.Note == unsafeUintptrTag { + if t.Note == unsafeUintptrTag { xp := n.List.Addr(i) for (*xp).Op == OCONVNOP && !(*xp).Type.IsPtr() { xp = &(*xp).Left diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 727b9939e9..a578820256 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -501,14 +501,11 @@ func isExportedField(ft *Field) bool { // dnameField dumps a reflect.name for a struct field. func dnameField(s *Sym, ot int, ft *Field) int { - var name, tag string + var name string if ft.Sym != nil && ft.Embedded == 0 { name = ft.Sym.Name } - if ft.Note != nil { - tag = *ft.Note - } - nsym := dname(name, tag, nil, isExportedField(ft)) + nsym := dname(name, ft.Note, nil, isExportedField(ft)) return dsymptrLSym(Linksym(s), ot, nsym, 0) } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 38f21eb585..f2f2a70446 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -619,10 +619,6 @@ func cplxsubtype(et EType) EType { return 0 } -func eqnote(a, b *string) bool { - return a == b || a != nil && b != nil && *a == *b -} - // Eqtype reports whether t1 and t2 are identical, following the spec rules. // // Any cyclic type must go through a named type, and if one is @@ -670,7 +666,7 @@ func eqtype1(t1, t2 *Type, assumedEqual map[typePair]struct{}) bool { t1, i1 := IterFields(t1) t2, i2 := IterFields(t2) for ; t1 != nil && t2 != nil; t1, t2 = i1.Next(), i2.Next() { - if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, assumedEqual) || !eqnote(t1.Note, t2.Note) { + if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, assumedEqual) || t1.Note != t2.Note { return false } } diff --git a/src/cmd/compile/internal/gc/type.go b/src/cmd/compile/internal/gc/type.go index da295bba78..9f049babc2 100644 --- a/src/cmd/compile/internal/gc/type.go +++ b/src/cmd/compile/internal/gc/type.go @@ -300,7 +300,7 @@ type Field struct { // or interface Type. Offset int64 - Note *string // literal string annotation + Note string // literal string annotation } // End returns the offset of the first byte immediately after this field. @@ -1003,15 +1003,7 @@ func (t *Type) cmp(x *Type) ssa.Cmp { return cmpForNe(t1.Embedded < x1.Embedded) } if t1.Note != x1.Note { - if t1.Note == nil { - return ssa.CMPlt - } - if x1.Note == nil { - return ssa.CMPgt - } - if *t1.Note != *x1.Note { - return cmpForNe(*t1.Note < *x1.Note) - } + return cmpForNe(t1.Note < x1.Note) } if c := t1.Sym.cmpsym(x1.Sym); c != ssa.CMPeq { return c diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index e8fee67d05..0c7c5fa7aa 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3807,7 +3807,7 @@ func usefield(n *Node) { if field == nil { Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym) } - if field.Note == nil || !strings.Contains(*field.Note, "go:\"track\"") { + if !strings.Contains(field.Note, "go:\"track\"") { return } diff --git a/test/fixedbugs/issue15439.go b/test/fixedbugs/issue15439.go new file mode 100644 index 0000000000..840a3c02a8 --- /dev/null +++ b/test/fixedbugs/issue15439.go @@ -0,0 +1,25 @@ +// run + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "reflect" + +func main() { + a := &struct{ x int }{} + b := &struct{ x int "" }{} + + ta := reflect.TypeOf(a) + tb := reflect.TypeOf(b) + + // Ensure cmd/compile treats absent and empty tags as equivalent. + a = b + + // Ensure package reflect treats absent and empty tags as equivalent. + if !tb.AssignableTo(ta) { + panic("fail") + } +} -- cgit v1.3 From 0b8c0767d0b95066734647edeb5a252c270a4a1a Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Mon, 25 Apr 2016 14:39:51 -0700 Subject: cmd/compile: for now, keep parameter numbering in binary export format The numbering is only required for parameters of functions/methods with exported inlineable bodies. For now, always export parameter names with internal numbering to minimize the diffs between assembly code dumps of code compiled with the textual vs the binary format. To be disabled again once the new export format is default. Change-Id: I6d14c564e734cc5596c7e995d8851e06d5a35013 Reviewed-on: https://go-review.googlesource.com/22441 Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 9 ++++++++- src/go/internal/gcimporter/bimport.go | 4 ++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index c635129ccc..512da43d51 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -123,6 +123,13 @@ const posInfoFormat = false // TODO(gri) remove eventually const forceNewExport = false // force new export format - DO NOT SUBMIT with this flag set +// forceNumberedParams keeps parameter numbering in exported parameter names +// even where we don't really need it (because the parameter names are not used +// elsewhere). Leave it enabled for now to remove this difference in generated +// object files so we can more easily compare old and new format. +// TODO(gri) remove once we switched to new format +const forceNumberedParams = true + const exportVersion = "v0" // exportInlined enables the export of inlined function bodies and related @@ -875,7 +882,7 @@ func parName(f *Field, numbered bool) string { // Functions that can be inlined use numbered parameters so we can distingish them // from other names in their context after inlining (i.e., the parameter numbering // is a form of parameter rewriting). See issue 4326 for an example and test case. - if numbered { + if forceNumberedParams || numbered { if !strings.Contains(name, "·") && f.Nname != nil && f.Nname.Name != nil && f.Nname.Name.Vargen > 0 { name = fmt.Sprintf("%s·%d", name, f.Nname.Name.Vargen) // append Vargen } diff --git a/src/go/internal/gcimporter/bimport.go b/src/go/internal/gcimporter/bimport.go index f2080ffe59..5ba9af1b02 100644 --- a/src/go/internal/gcimporter/bimport.go +++ b/src/go/internal/gcimporter/bimport.go @@ -11,6 +11,7 @@ import ( "go/token" "go/types" "sort" + "strings" "unicode" "unicode/utf8" ) @@ -504,6 +505,9 @@ func (p *importer) param(named bool) (*types.Var, bool) { if name == "" { panic("expected named parameter") } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } pkg = p.pkg() } -- cgit v1.3 From d78c84c419b0ecdd70e85aad22951798c1707f50 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Mon, 25 Apr 2016 15:59:42 -0700 Subject: cmd/compile: sort import strings for canonical obj files This is not necessary for reproduceability but it removes differences due to imported package order between compiles using textual vs binary export format. The packages list tends to be very short, so it's ok doing it always for now. Guarded with a documented (const) flag so it's trivial to disable and remove eventually. Also, use the same flag now to enforce parameter numbering. Change-Id: Ie05d2490df770239696ecbecc07532ed62ccd5c0 Reviewed-on: https://go-review.googlesource.com/22445 Run-TryBot: Robert Griesemer Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 13 ++++++------- src/cmd/compile/internal/gc/reflect.go | 11 +++++++++++ 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 512da43d51..0dc61374f1 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -123,12 +123,11 @@ const posInfoFormat = false // TODO(gri) remove eventually const forceNewExport = false // force new export format - DO NOT SUBMIT with this flag set -// forceNumberedParams keeps parameter numbering in exported parameter names -// even where we don't really need it (because the parameter names are not used -// elsewhere). Leave it enabled for now to remove this difference in generated -// object files so we can more easily compare old and new format. -// TODO(gri) remove once we switched to new format -const forceNumberedParams = true +// forceObjFileStability enforces additional constraints in export data +// and other parts of the compiler to eliminate object file differences +// only due to the choice of export format. +// TODO(gri) disable and remove once there is only one export format again +const forceObjFileStability = true const exportVersion = "v0" @@ -882,7 +881,7 @@ func parName(f *Field, numbered bool) string { // Functions that can be inlined use numbered parameters so we can distingish them // from other names in their context after inlining (i.e., the parameter numbering // is a form of parameter rewriting). See issue 4326 for an example and test case. - if forceNumberedParams || numbered { + if forceObjFileStability || numbered { if !strings.Contains(name, "·") && f.Nname != nil && f.Nname.Name != nil && f.Nname.Name.Vargen > 0 { name = fmt.Sprintf("%s·%d", name, f.Nname.Name.Vargen) // append Vargen } diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index a578820256..ceed55a2a5 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -1391,6 +1391,11 @@ func dumptypestructs() { } // generate import strings for imported packages + if forceObjFileStability { + // Sorting the packages is not necessary but to compare binaries created + // using textual and binary format we sort by path to reduce differences. + sort.Sort(pkgByPath(pkgs)) + } for _, p := range pkgs { if p.Direct { dimportpath(p) @@ -1429,6 +1434,12 @@ func dumptypestructs() { } } +type pkgByPath []*Pkg + +func (a pkgByPath) Len() int { return len(a) } +func (a pkgByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } +func (a pkgByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + func dalgsym(t *Type) *Sym { var s *Sym var hashfunc *Sym -- cgit v1.3 From 0b6332eb54767f916926ae39516ddaed87b26edb Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 25 Apr 2016 16:24:11 -0400 Subject: cmd/compile: fix another bug in dominator computation Here, "fix" means "replace". The new dominator computation is the "simple" algorithm from Lengauer and Tarjan's TOPLAS paper, with minimal changes. Also included is a test that tweaks the fixed error. Change-Id: I0abdf53d5d64df1e67e4e62f55e88957045cd63b Reviewed-on: https://go-review.googlesource.com/22401 Run-TryBot: David Chase TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/dom.go | 279 ++++++++++++------------------- src/cmd/compile/internal/ssa/dom_test.go | 165 ++++++++++++++---- 2 files changed, 241 insertions(+), 203 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/dom.go b/src/cmd/compile/internal/ssa/dom.go index 86b170080a..c0a4bb4188 100644 --- a/src/cmd/compile/internal/ssa/dom.go +++ b/src/cmd/compile/internal/ssa/dom.go @@ -20,6 +20,9 @@ const ( // postorder computes a postorder traversal ordering for the // basic blocks in f. Unreachable blocks will not appear. func postorder(f *Func) []*Block { + return postorderWithNumbering(f, []int{}) +} +func postorderWithNumbering(f *Func, ponums []int) []*Block { mark := make([]markKind, f.NumBlocks()) // result ordering @@ -36,6 +39,9 @@ func postorder(f *Func) []*Block { // Children have all been visited. Pop & output block. s = s[:len(s)-1] mark[b.ID] = done + if len(ponums) > 0 { + ponums[b.ID] = len(order) + } order = append(order, b) case notExplored: // Children have not been visited yet. Mark as explored @@ -56,14 +62,14 @@ func postorder(f *Func) []*Block { type linkedBlocks func(*Block) []*Block -const nscratchslices = 8 +const nscratchslices = 7 // experimentally, functions with 512 or fewer blocks account // for 75% of memory (size) allocation for dominator computation // in make.bash. const minscratchblocks = 512 -func (cfg *Config) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g, h []ID) { +func (cfg *Config) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g []ID) { tot := maxBlockID * nscratchslices scratch := cfg.domblockstore if len(scratch) < tot { @@ -90,216 +96,143 @@ func (cfg *Config) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g, h [ e = scratch[4*maxBlockID : 5*maxBlockID] f = scratch[5*maxBlockID : 6*maxBlockID] g = scratch[6*maxBlockID : 7*maxBlockID] - h = scratch[7*maxBlockID : 8*maxBlockID] - - return -} - -// dfs performs a depth first search over the blocks starting at the set of -// blocks in the entries list (in arbitrary order). dfnum contains a mapping -// from block id to an int indicating the order the block was reached or -// 0 if the block was not reached. order contains a mapping from dfnum -// to block. -func (f *Func) dfs(entries []*Block, succFn linkedBlocks, dfnum, order, parent []ID) (fromID []*Block) { - maxBlockID := entries[0].Func.NumBlocks() - - fromID = make([]*Block, maxBlockID) - - for _, entry := range entries[0].Func.Blocks { - eid := entry.ID - if fromID[eid] != nil { - panic("Colliding entry IDs") - } - fromID[eid] = entry - } - - n := ID(0) - s := make([]*Block, 0, 256) - for _, entry := range entries { - if dfnum[entry.ID] != 0 { - continue // already found from a previous entry - } - s = append(s, entry) - parent[entry.ID] = entry.ID - for len(s) > 0 { - node := s[len(s)-1] - s = s[:len(s)-1] - if dfnum[node.ID] != 0 { - continue // already found from a previous entry - } - n++ - dfnum[node.ID] = n - order[n] = node.ID - for _, w := range succFn(node) { - // if it has a dfnum, we've already visited it - if dfnum[w.ID] == 0 { - s = append(s, w) - parent[w.ID] = node.ID // keep overwriting this till it is visited. - } - } - } - } return } -// dominators computes the dominator tree for f. It returns a slice -// which maps block ID to the immediate dominator of that block. -// Unreachable blocks map to nil. The entry block maps to nil. func dominators(f *Func) []*Block { preds := func(b *Block) []*Block { return b.Preds } succs := func(b *Block) []*Block { return b.Succs } //TODO: benchmark and try to find criteria for swapping between // dominatorsSimple and dominatorsLT - return f.dominatorsLT([]*Block{f.Entry}, preds, succs) + return f.dominatorsLTOrig(f.Entry, preds, succs) } -// postDominators computes the post-dominator tree for f. -func postDominators(f *Func) []*Block { - - if len(f.Blocks) == 0 { - return nil - } - - // find the exit blocks - var exits []*Block - for _, b := range f.Blocks { - switch b.Kind { - case BlockExit, BlockRet, BlockRetJmp, BlockCall, BlockCheck: - exits = append(exits, b) - } - } - - // TODO: postdominators is not really right, and it's not used yet - preds := func(b *Block) []*Block { return b.Preds } - succs := func(b *Block) []*Block { return b.Succs } - - // infinite loop with no exit - if exits == nil { - return make([]*Block, f.NumBlocks()) - } - return f.dominatorsLT(exits, succs, preds) -} - -// dominatorsLt runs Lengauer-Tarjan to compute a dominator tree starting at +// dominatorsLTOrig runs Lengauer-Tarjan to compute a dominator tree starting at // entry and using predFn/succFn to find predecessors/successors to allow // computing both dominator and post-dominator trees. -func (f *Func) dominatorsLT(entries []*Block, predFn linkedBlocks, succFn linkedBlocks) []*Block { - // Based on Lengauer-Tarjan from Modern Compiler Implementation in C - - // Appel with optimizations from Finding Dominators in Practice - - // Georgiadis - - maxBlockID := entries[0].Func.NumBlocks() - - dfnum, vertex, parent, semi, samedom, ancestor, best, bucket := f.Config.scratchBlocksForDom(maxBlockID) - - // dfnum := make([]ID, maxBlockID) // conceptually int32, but punning for allocation purposes. - // vertex := make([]ID, maxBlockID) - // parent := make([]ID, maxBlockID) - - // semi := make([]ID, maxBlockID) - // samedom := make([]ID, maxBlockID) - // ancestor := make([]ID, maxBlockID) - // best := make([]ID, maxBlockID) - // bucket := make([]ID, maxBlockID) +func (f *Func) dominatorsLTOrig(entry *Block, predFn linkedBlocks, succFn linkedBlocks) []*Block { + // Adapted directly from the original TOPLAS article's "simple" algorithm + + maxBlockID := entry.Func.NumBlocks() + semi, vertex, label, parent, ancestor, bucketHead, bucketLink := f.Config.scratchBlocksForDom(maxBlockID) + + // This version uses integers for most of the computation, + // to make the work arrays smaller and pointer-free. + // fromID translates from ID to *Block where that is needed. + fromID := make([]*Block, maxBlockID) + for _, v := range f.Blocks { + fromID[v.ID] = v + } + idom := make([]*Block, maxBlockID) // Step 1. Carry out a depth first search of the problem graph. Number // the vertices from 1 to n as they are reached during the search. - fromID := f.dfs(entries, succFn, dfnum, vertex, parent) + n := f.dfsOrig(entry, succFn, semi, vertex, label, parent) - idom := make([]*Block, maxBlockID) - - // Step 2. Compute the semidominators of all vertices by applying - // Theorem 4. Carry out the computation vertex by vertex in decreasing - // order by number. - for i := maxBlockID - 1; i > 0; i-- { + for i := n; i >= 2; i-- { w := vertex[i] - if w == 0 { - continue - } - if dfnum[w] == 0 { - // skip unreachable node - continue - } - - // Step 3. Implicitly define the immediate dominator of each - // vertex by applying Corollary 1. (reordered) - for v := bucket[w]; v != 0; v = bucket[v] { - u := eval(v, ancestor, semi, dfnum, best) - if semi[u] == semi[v] { - idom[v] = fromID[w] // true dominator - } else { - samedom[v] = u // v has same dominator as u - } - } - - p := parent[w] - s := p // semidominator - - var sp ID - // calculate the semidominator of w + // step2 in TOPLAS paper for _, v := range predFn(fromID[w]) { - if dfnum[v.ID] == 0 { + if semi[v.ID] == 0 { // skip unreachable predecessor + // not in original, but we're using existing pred instead of building one. continue } - - if dfnum[v.ID] <= dfnum[w] { - sp = v.ID - } else { - sp = semi[eval(v.ID, ancestor, semi, dfnum, best)] - } - - if dfnum[sp] < dfnum[s] { - s = sp + u := evalOrig(v.ID, ancestor, semi, label) + if semi[u] < semi[w] { + semi[w] = semi[u] } } - // link - ancestor[w] = p - best[w] = w + // add w to bucket[vertex[semi[w]]] + // implement bucket as a linked list implemented + // in a pair of arrays. + vsw := vertex[semi[w]] + bucketLink[w] = bucketHead[vsw] + bucketHead[vsw] = w + + linkOrig(parent[w], w, ancestor) - semi[w] = s - if semi[s] != parent[s] { - bucket[w] = bucket[s] - bucket[s] = w + // step3 in TOPLAS paper + for v := bucketHead[parent[w]]; v != 0; v = bucketLink[v] { + u := evalOrig(v, ancestor, semi, label) + if semi[u] < semi[v] { + idom[v] = fromID[u] + } else { + idom[v] = fromID[parent[w]] + } } } - - // Final pass of step 3 - for v := bucket[0]; v != 0; v = bucket[v] { - idom[v] = fromID[bucket[0]] + // step 4 in toplas paper + for i := ID(2); i <= n; i++ { + w := vertex[i] + if idom[w].ID != vertex[semi[w]] { + idom[w] = idom[idom[w].ID] + } } - // Step 4. Explicitly define the immediate dominator of each vertex, - // carrying out the computation vertex by vertex in increasing order by - // number. - for i := 1; i < maxBlockID-1; i++ { - w := vertex[i] - if w == 0 { - continue + return idom +} + +// dfs performs a depth first search over the blocks starting at entry block +// (in arbitrary order). This is a de-recursed version of dfs from the +// original Tarjan-Lengauer TOPLAS article. It's important to return the +// same values for parent as the original algorithm. +func (f *Func) dfsOrig(entry *Block, succFn linkedBlocks, semi, vertex, label, parent []ID) ID { + n := ID(0) + s := make([]*Block, 0, 256) + s = append(s, entry) + + for len(s) > 0 { + v := s[len(s)-1] + s = s[:len(s)-1] + // recursing on v + + if semi[v.ID] != 0 { + continue // already visited } - // w has the same dominator as samedom[w] - if samedom[w] != 0 { - idom[w] = idom[samedom[w]] + n++ + semi[v.ID] = n + vertex[n] = v.ID + label[v.ID] = v.ID + // ancestor[v] already zero + for _, w := range succFn(v) { + // if it has a dfnum, we've already visited it + if semi[w.ID] == 0 { + // yes, w can be pushed multiple times. + s = append(s, w) + parent[w.ID] = v.ID // keep overwriting this till it is visited. + } } } - return idom + return n } -// eval function from LT paper with path compression -func eval(v ID, ancestor []ID, semi []ID, dfnum []ID, best []ID) ID { - a := ancestor[v] - if ancestor[a] != 0 { - bid := eval(a, ancestor, semi, dfnum, best) - ancestor[v] = ancestor[a] - if dfnum[semi[bid]] < dfnum[semi[best[v]]] { - best[v] = bid +// compressOrig is the "simple" compress function from LT paper +func compressOrig(v ID, ancestor, semi, label []ID) { + if ancestor[ancestor[v]] != 0 { + compressOrig(ancestor[v], ancestor, semi, label) + if semi[label[ancestor[v]]] < semi[label[v]] { + label[v] = label[ancestor[v]] } + ancestor[v] = ancestor[ancestor[v]] } - return best[v] +} + +// evalOrig is the "simple" eval function from LT paper +func evalOrig(v ID, ancestor, semi, label []ID) ID { + if ancestor[v] == 0 { + return v + } + compressOrig(v, ancestor, semi, label) + return label[v] +} + +func linkOrig(v, w ID, ancestor []ID) { + ancestor[w] = v } // dominators computes the dominator tree for f. It returns a slice diff --git a/src/cmd/compile/internal/ssa/dom_test.go b/src/cmd/compile/internal/ssa/dom_test.go index 19b898596c..6ecbe923d4 100644 --- a/src/cmd/compile/internal/ssa/dom_test.go +++ b/src/cmd/compile/internal/ssa/dom_test.go @@ -372,32 +372,6 @@ func TestDominatorsMultPred(t *testing.T) { verifyDominators(t, fun, dominatorsSimple, doms) } -func TestPostDominators(t *testing.T) { - c := testConfig(t) - fun := Fun(c, "entry", - Bloc("entry", - Valu("mem", OpInitMem, TypeMem, 0, nil), - Valu("p", OpConstBool, TypeBool, 1, nil), - If("p", "a", "c")), - Bloc("a", - If("p", "b", "c")), - Bloc("b", - Goto("c")), - Bloc("c", - If("p", "b", "exit")), - Bloc("exit", - Exit("mem"))) - - doms := map[string]string{"entry": "c", - "a": "c", - "b": "c", - "c": "exit", - } - - CheckFunc(fun.f) - verifyDominators(t, fun, postDominators, doms) -} - func TestInfiniteLoop(t *testing.T) { c := testConfig(t) // note lack of an exit block @@ -415,10 +389,6 @@ func TestInfiniteLoop(t *testing.T) { doms := map[string]string{"a": "entry", "b": "a"} verifyDominators(t, fun, dominators, doms) - - // no exit block, so there are no post-dominators - postDoms := map[string]string{} - verifyDominators(t, fun, postDominators, postDoms) } func TestDomTricky(t *testing.T) { @@ -465,3 +435,138 @@ func TestDomTricky(t *testing.T) { verifyDominators(t, fun, dominatorsSimple, doms) } } + +// generateDominatorMap uses dominatorsSimple to obtain a +// reference dominator tree for testing faster algorithms. +func generateDominatorMap(fut fun) map[string]string { + blockNames := map[*Block]string{} + for n, b := range fut.blocks { + blockNames[b] = n + } + referenceDom := dominatorsSimple(fut.f) + doms := make(map[string]string) + for _, b := range fut.f.Blocks { + if d := referenceDom[b.ID]; d != nil { + doms[blockNames[b]] = blockNames[d] + } + } + return doms +} + +func TestDominatorsPostTricky(t *testing.T) { + c := testConfig(t) + fun := Fun(c, "b1", + Bloc("b1", + Valu("mem", OpInitMem, TypeMem, 0, nil), + Valu("p", OpConstBool, TypeBool, 1, nil), + If("p", "b3", "b2")), + Bloc("b3", + If("p", "b5", "b6")), + Bloc("b5", + Goto("b7")), + Bloc("b7", + If("p", "b8", "b11")), + Bloc("b8", + Goto("b13")), + Bloc("b13", + If("p", "b14", "b15")), + Bloc("b14", + Goto("b10")), + Bloc("b15", + Goto("b16")), + Bloc("b16", + Goto("b9")), + Bloc("b9", + Goto("b7")), + Bloc("b11", + Goto("b12")), + Bloc("b12", + If("p", "b10", "b8")), + Bloc("b10", + Goto("b6")), + Bloc("b6", + Goto("b17")), + Bloc("b17", + Goto("b18")), + Bloc("b18", + If("p", "b22", "b19")), + Bloc("b22", + Goto("b23")), + Bloc("b23", + If("p", "b21", "b19")), + Bloc("b19", + If("p", "b24", "b25")), + Bloc("b24", + Goto("b26")), + Bloc("b26", + Goto("b25")), + Bloc("b25", + If("p", "b27", "b29")), + Bloc("b27", + Goto("b30")), + Bloc("b30", + Goto("b28")), + Bloc("b29", + Goto("b31")), + Bloc("b31", + Goto("b28")), + Bloc("b28", + If("p", "b32", "b33")), + Bloc("b32", + Goto("b21")), + Bloc("b21", + Goto("b47")), + Bloc("b47", + If("p", "b45", "b46")), + Bloc("b45", + Goto("b48")), + Bloc("b48", + Goto("b49")), + Bloc("b49", + If("p", "b50", "b51")), + Bloc("b50", + Goto("b52")), + Bloc("b52", + Goto("b53")), + Bloc("b53", + Goto("b51")), + Bloc("b51", + Goto("b54")), + Bloc("b54", + Goto("b46")), + Bloc("b46", + Exit("mem")), + Bloc("b33", + Goto("b34")), + Bloc("b34", + Goto("b37")), + Bloc("b37", + If("p", "b35", "b36")), + Bloc("b35", + Goto("b38")), + Bloc("b38", + Goto("b39")), + Bloc("b39", + If("p", "b40", "b41")), + Bloc("b40", + Goto("b42")), + Bloc("b42", + Goto("b43")), + Bloc("b43", + Goto("b41")), + Bloc("b41", + Goto("b44")), + Bloc("b44", + Goto("b36")), + Bloc("b36", + Goto("b20")), + Bloc("b20", + Goto("b18")), + Bloc("b2", + Goto("b4")), + Bloc("b4", + Exit("mem"))) + CheckFunc(fun.f) + doms := generateDominatorMap(fun) + verifyDominators(t, fun, dominators, doms) +} -- cgit v1.3 From e4355aeedfdd6a68185c4551c889eb13823cd86d Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Wed, 20 Apr 2016 11:17:41 -0700 Subject: cmd/compile: more sanity checks on rewrite rules Make sure ops have the right number of args, set aux and auxint only if allowed, etc. Normalize error reporting format. Change-Id: Ie545fcc5990c8c7d62d40d9a0a55885f941eb645 Reviewed-on: https://go-review.googlesource.com/22320 Reviewed-by: David Chase --- src/cmd/compile/internal/ssa/check.go | 8 ++ src/cmd/compile/internal/ssa/gen/AMD64.rules | 28 +++--- src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 2 +- src/cmd/compile/internal/ssa/gen/ARMOps.go | 6 +- src/cmd/compile/internal/ssa/gen/rulegen.go | 122 +++++++++++++++++------- src/cmd/compile/internal/ssa/op.go | 3 +- src/cmd/compile/internal/ssa/opGen.go | 17 ++-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 126 ++++++++++++++++++++++--- src/cmd/compile/internal/ssa/rewritegeneric.go | 42 --------- 9 files changed, 236 insertions(+), 118 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index f1d3857f88..4a10606d3c 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -193,6 +193,8 @@ func checkFunc(f *Func) { canHaveAuxInt = true case auxInt64, auxFloat64: canHaveAuxInt = true + case auxInt128: + // AuxInt must be zero, so leave canHaveAuxInt set to false. case auxFloat32: canHaveAuxInt = true if !isExactFloat32(v) { @@ -203,6 +205,12 @@ func checkFunc(f *Func) { case auxSymOff, auxSymValAndOff: canHaveAuxInt = true canHaveAux = true + case auxSymInt32: + if v.AuxInt != int64(int32(v.AuxInt)) { + f.Fatalf("bad int32 AuxInt value for %v", v) + } + canHaveAuxInt = true + canHaveAux = true default: f.Fatalf("unknown aux type for %s", v.Op) } diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 9d405131c0..86123ac5c5 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -408,22 +408,22 @@ (If cond yes no) -> (NE (TESTB cond cond) yes no) -(NE (TESTB (SETL cmp)) yes no) -> (LT cmp yes no) -(NE (TESTB (SETLE cmp)) yes no) -> (LE cmp yes no) -(NE (TESTB (SETG cmp)) yes no) -> (GT cmp yes no) -(NE (TESTB (SETGE cmp)) yes no) -> (GE cmp yes no) -(NE (TESTB (SETEQ cmp)) yes no) -> (EQ cmp yes no) -(NE (TESTB (SETNE cmp)) yes no) -> (NE cmp yes no) -(NE (TESTB (SETB cmp)) yes no) -> (ULT cmp yes no) -(NE (TESTB (SETBE cmp)) yes no) -> (ULE cmp yes no) -(NE (TESTB (SETA cmp)) yes no) -> (UGT cmp yes no) -(NE (TESTB (SETAE cmp)) yes no) -> (UGE cmp yes no) +(NE (TESTB (SETL cmp) (SETL cmp)) yes no) -> (LT cmp yes no) +(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) -> (LE cmp yes no) +(NE (TESTB (SETG cmp) (SETG cmp)) yes no) -> (GT cmp yes no) +(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) -> (GE cmp yes no) +(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) -> (EQ cmp yes no) +(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) -> (NE cmp yes no) +(NE (TESTB (SETB cmp) (SETB cmp)) yes no) -> (ULT cmp yes no) +(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) -> (ULE cmp yes no) +(NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no) +(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no) // Special case for floating point - LF/LEF not generated -(NE (TESTB (SETGF cmp)) yes no) -> (UGT cmp yes no) -(NE (TESTB (SETGEF cmp)) yes no) -> (UGE cmp yes no) -(NE (TESTB (SETEQF cmp)) yes no) -> (EQF cmp yes no) -(NE (TESTB (SETNEF cmp)) yes no) -> (NEF cmp yes no) +(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) -> (UGT cmp yes no) +(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) -> (UGE cmp yes no) +(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) -> (EQF cmp yes no) +(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no) // Disabled because it interferes with the pattern match above and makes worse code. // (SETNEF x) -> (ORQ (SETNE x) (SETNAN x)) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 35eeb61941..b684b9ccdf 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -439,7 +439,7 @@ func init() { clobbers: buildReg("DI FLAGS"), }, }, - {name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", rematerializeable: true}, + {name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", aux: "Int128", rematerializeable: true}, // arg0 = address of memory to zero // arg1 = # of 8-byte words to zero diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index a4f7b17e87..23e8f63471 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -25,13 +25,13 @@ func init() { {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 - {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW"}, // load from arg0 + auxInt + aux. arg1=mem. - {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem // pseudo-ops - {name: "LessThan", argLength: 2, reg: flagsgp}, // bool, 1 flags encode x ... } + loc := fmt.Sprintf("%s.rules:%d", arch.name, lineno) if isBlock(op, arch) { - blockrules[op] = append(blockrules[op], Rule{rule: rule, lineno: lineno}) + blockrules[op] = append(blockrules[op], Rule{rule: rule, loc: loc}) } else { - oprules[op] = append(oprules[op], Rule{rule: rule, lineno: lineno}) + oprules[op] = append(oprules[op], Rule{rule: rule, loc: loc}) } rule = "" } @@ -128,7 +129,7 @@ func genRules(arch arch) { log.Fatalf("scanner failed: %v\n", err) } if unbalanced(rule) { - log.Fatalf("unbalanced rule at line %d: %v\n", lineno, rule) + log.Fatalf("%s.rules:%d: unbalanced rule: %v\n", arch.name, lineno, rule) } // Order all the ops. @@ -174,15 +175,15 @@ func genRules(arch arch) { fmt.Fprintf(w, "// result: %s\n", result) fmt.Fprintf(w, "for {\n") - genMatch(w, arch, match) + genMatch(w, arch, match, rule.loc) if cond != "" { fmt.Fprintf(w, "if !(%s) {\nbreak\n}\n", cond) } - genResult(w, arch, result) + genResult(w, arch, result, rule.loc) if *genLog { - fmt.Fprintf(w, "fmt.Println(\"rewrite %s.rules:%d\")\n", arch.name, rule.lineno) + fmt.Fprintf(w, "fmt.Println(\"rewrite %s\")\n", rule.loc) } fmt.Fprintf(w, "return true\n") @@ -217,7 +218,7 @@ func genRules(arch arch) { if s[1] != "nil" { fmt.Fprintf(w, "v := b.Control\n") if strings.Contains(s[1], "(") { - genMatch0(w, arch, s[1], "v", map[string]struct{}{}, false) + genMatch0(w, arch, s[1], "v", map[string]struct{}{}, false, rule.loc) } else { fmt.Fprintf(w, "%s := b.Control\n", s[1]) } @@ -266,7 +267,7 @@ func genRules(arch arch) { if t[1] == "nil" { fmt.Fprintf(w, "b.SetControl(nil)\n") } else { - fmt.Fprintf(w, "b.SetControl(%s)\n", genResult0(w, arch, t[1], new(int), false, false)) + fmt.Fprintf(w, "b.SetControl(%s)\n", genResult0(w, arch, t[1], new(int), false, false, rule.loc)) } if len(newsuccs) < len(succs) { fmt.Fprintf(w, "b.Succs = b.Succs[:%d]\n", len(newsuccs)) @@ -289,7 +290,7 @@ func genRules(arch arch) { } if *genLog { - fmt.Fprintf(w, "fmt.Println(\"rewrite %s.rules:%d\")\n", arch.name, rule.lineno) + fmt.Fprintf(w, "fmt.Println(\"rewrite %s\")\n", rule.loc) } fmt.Fprintf(w, "return true\n") @@ -315,11 +316,11 @@ func genRules(arch arch) { } } -func genMatch(w io.Writer, arch arch, match string) { - genMatch0(w, arch, match, "v", map[string]struct{}{}, true) +func genMatch(w io.Writer, arch arch, match string, loc string) { + genMatch0(w, arch, match, "v", map[string]struct{}{}, true, loc) } -func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, top bool) { +func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, top bool, loc string) { if match[0] != '(' || match[len(match)-1] != ')' { panic("non-compound expr in genMatch0: " + match) } @@ -328,6 +329,24 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t // contained in () or {}. s := split(match[1 : len(match)-1]) // remove parens, then split + // Find op record + var op opData + for _, x := range genericOps { + if x.name == s[0] { + op = x + break + } + } + for _, x := range arch.ops { + if x.name == s[0] { + op = x + break + } + } + if op.name == "" { + log.Fatalf("%s: unknown op %s", loc, s[0]) + } + // check op if !top { fmt.Fprintf(w, "if %s.Op != %s {\nbreak\n}\n", v, opName(s[0], arch)) @@ -354,6 +373,11 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t } } else if a[0] == '[' { // auxint restriction + switch op.aux { + case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32": + default: + log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux) + } x := a[1 : len(a)-1] // remove [] if !isVariable(x) { // code @@ -368,7 +392,12 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t } } } else if a[0] == '{' { - // auxint restriction + // aux restriction + switch op.aux { + case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32": + default: + log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux) + } x := a[1 : len(a)-1] // remove {} if !isVariable(x) { // code @@ -412,30 +441,18 @@ func genMatch0(w io.Writer, arch arch, match, v string, m map[string]struct{}, t argname = fmt.Sprintf("%s_%d", v, argnum) } fmt.Fprintf(w, "%s := %s.Args[%d]\n", argname, v, argnum) - genMatch0(w, arch, a, argname, m, false) + genMatch0(w, arch, a, argname, m, false, loc) argnum++ } } - - variableLength := false - for _, op := range genericOps { - if op.name == s[0] && op.argLength == -1 { - variableLength = true - break - } - } - for _, op := range arch.ops { - if op.name == s[0] && op.argLength == -1 { - variableLength = true - break - } - } - if variableLength { + if op.argLength == -1 { fmt.Fprintf(w, "if len(%s.Args) != %d {\nbreak\n}\n", v, argnum) + } else if int(op.argLength) != argnum { + log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum) } } -func genResult(w io.Writer, arch arch, result string) { +func genResult(w io.Writer, arch arch, result string, loc string) { move := false if result[0] == '@' { // parse @block directive @@ -444,9 +461,9 @@ func genResult(w io.Writer, arch arch, result string) { result = s[1] move = true } - genResult0(w, arch, result, new(int), true, move) + genResult0(w, arch, result, new(int), true, move, loc) } -func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move bool) string { +func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move bool, loc string) string { // TODO: when generating a constant result, use f.constVal to avoid // introducing copies just to clean them up again. if result[0] != '(' { @@ -464,6 +481,24 @@ func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move boo s := split(result[1 : len(result)-1]) // remove parens, then split + // Find op record + var op opData + for _, x := range genericOps { + if x.name == s[0] { + op = x + break + } + } + for _, x := range arch.ops { + if x.name == s[0] { + op = x + break + } + } + if op.name == "" { + log.Fatalf("%s: unknown op %s", loc, s[0]) + } + // Find the type of the variable. var opType string var typeOverride bool @@ -512,23 +547,38 @@ func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move boo fmt.Fprintf(w, "v.AddArg(%s)\n", v) } } + argnum := 0 for _, a := range s[1:] { if a[0] == '<' { // type restriction, handled above } else if a[0] == '[' { // auxint restriction + switch op.aux { + case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", "SymOff", "SymValAndOff", "SymInt32": + default: + log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux) + } x := a[1 : len(a)-1] // remove [] fmt.Fprintf(w, "%s.AuxInt = %s\n", v, x) } else if a[0] == '{' { // aux restriction + switch op.aux { + case "String", "Sym", "SymOff", "SymValAndOff", "SymInt32": + default: + log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux) + } x := a[1 : len(a)-1] // remove {} fmt.Fprintf(w, "%s.Aux = %s\n", v, x) } else { // regular argument (sexpr or variable) - x := genResult0(w, arch, a, alloc, false, move) + x := genResult0(w, arch, a, alloc, false, move, loc) fmt.Fprintf(w, "%s.AddArg(%s)\n", v, x) + argnum++ } } + if op.argLength != -1 && int(op.argLength) != argnum { + log.Fatalf("%s: op %s should have %d args, has %d", loc, op.name, op.argLength, argnum) + } return v } diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 64807ec106..cadbc7cd7a 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -49,9 +49,10 @@ const ( auxInt16 // auxInt is a 16-bit integer auxInt32 // auxInt is a 32-bit integer auxInt64 // auxInt is a 64-bit integer + auxInt128 // auxInt represents a 128-bit integer. Always 0. auxFloat32 // auxInt is a float32 (encoded with math.Float64bits) auxFloat64 // auxInt is a float64 (encoded with math.Float64bits) - auxString // auxInt is a string + auxString // aux is a string auxSym // aux is a symbol auxSymOff // aux is a symbol, auxInt is an offset auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index a53899ec52..9ab9be769c 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -3635,6 +3635,7 @@ var opcodeTable = [...]opInfo{ }, { name: "MOVOconst", + auxType: auxInt128, argLen: 0, rematerializeable: true, reg: regInfo{ @@ -3854,9 +3855,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWload", - argLen: 2, - asm: arm.AMOVW, + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ {0, 31}, // R0 R1 R2 R3 SP @@ -3867,9 +3869,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWstore", - argLen: 3, - asm: arm.AMOVW, + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + asm: arm.AMOVW, reg: regInfo{ inputs: []inputInfo{ {0, 31}, // R0 R1 R2 R3 SP @@ -3887,7 +3890,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LessThan", - argLen: 2, + argLen: 1, reg: regInfo{ inputs: []inputInfo{ {0, 32}, // FLAGS diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index f8cefb7eab..c26aeb0bd0 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -18326,7 +18326,7 @@ func rewriteBlockAMD64(b *Block) bool { return true } case BlockAMD64NE: - // match: (NE (TESTB (SETL cmp)) yes no) + // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) // cond: // result: (LT cmp yes no) for { @@ -18339,6 +18339,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETL { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64LT @@ -18347,7 +18354,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETLE cmp)) yes no) + // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) // cond: // result: (LE cmp yes no) for { @@ -18360,6 +18367,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETLE { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64LE @@ -18368,7 +18382,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETG cmp)) yes no) + // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) // cond: // result: (GT cmp yes no) for { @@ -18381,6 +18395,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETG { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64GT @@ -18389,7 +18410,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETGE cmp)) yes no) + // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) // cond: // result: (GE cmp yes no) for { @@ -18402,6 +18423,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETGE { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64GE @@ -18410,7 +18438,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETEQ cmp)) yes no) + // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) // cond: // result: (EQ cmp yes no) for { @@ -18423,6 +18451,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETEQ { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64EQ @@ -18431,7 +18466,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETNE cmp)) yes no) + // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) // cond: // result: (NE cmp yes no) for { @@ -18444,6 +18479,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETNE { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64NE @@ -18452,7 +18494,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETB cmp)) yes no) + // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) // cond: // result: (ULT cmp yes no) for { @@ -18465,6 +18507,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETB { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64ULT @@ -18473,7 +18522,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETBE cmp)) yes no) + // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) // cond: // result: (ULE cmp yes no) for { @@ -18486,6 +18535,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETBE { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64ULE @@ -18494,7 +18550,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETA cmp)) yes no) + // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) // cond: // result: (UGT cmp yes no) for { @@ -18507,6 +18563,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETA { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64UGT @@ -18515,7 +18578,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETAE cmp)) yes no) + // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) // cond: // result: (UGE cmp yes no) for { @@ -18528,6 +18591,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETAE { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64UGE @@ -18536,7 +18606,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETGF cmp)) yes no) + // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) // cond: // result: (UGT cmp yes no) for { @@ -18549,6 +18619,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETGF { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64UGT @@ -18557,7 +18634,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETGEF cmp)) yes no) + // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) // cond: // result: (UGE cmp yes no) for { @@ -18570,6 +18647,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETGEF { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64UGE @@ -18578,7 +18662,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETEQF cmp)) yes no) + // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) // cond: // result: (EQF cmp yes no) for { @@ -18591,6 +18675,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETEQF { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64EQF @@ -18599,7 +18690,7 @@ func rewriteBlockAMD64(b *Block) bool { b.Succs[1] = no return true } - // match: (NE (TESTB (SETNEF cmp)) yes no) + // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) // cond: // result: (NEF cmp yes no) for { @@ -18612,6 +18703,13 @@ func rewriteBlockAMD64(b *Block) bool { break } cmp := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SETNEF { + break + } + if cmp != v_1.Args[0] { + break + } yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockAMD64NEF diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index eb8f704124..43e87c3bf6 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -2403,27 +2403,6 @@ func rewriteValuegeneric_OpEq8(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (Eq8 x (ConstBool [c])) - // cond: x.Op != OpConstBool - // result: (Eq8 (ConstBool [c]) x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - t := v_1.Type - c := v_1.AuxInt - if !(x.Op != OpConstBool) { - break - } - v.reset(OpEq8) - v0 := b.NewValue0(v.Line, OpConstBool, t) - v0.AuxInt = c - v.AddArg(v0) - v.AddArg(x) - return true - } // match: (Eq8 (Const8 [c]) (Const8 [d])) // cond: // result: (ConstBool [b2i(c == d)]) @@ -5767,27 +5746,6 @@ func rewriteValuegeneric_OpNeq8(v *Value, config *Config) bool { v.AddArg(x) return true } - // match: (Neq8 x (ConstBool [c])) - // cond: x.Op != OpConstBool - // result: (Neq8 (ConstBool [c]) x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - t := v_1.Type - c := v_1.AuxInt - if !(x.Op != OpConstBool) { - break - } - v.reset(OpNeq8) - v0 := b.NewValue0(v.Line, OpConstBool, t) - v0.AuxInt = c - v.AddArg(v0) - v.AddArg(x) - return true - } // match: (Neq8 (Const8 [c]) (Const8 [d])) // cond: // result: (ConstBool [b2i(c != d)]) -- cgit v1.3 From 01d5e63faa7cbfe10c6c45a788cd9859da2dfcdb Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 26 Apr 2016 13:18:14 -0400 Subject: cmd/compile/internal/gc: rewrite comment to avoid automated meaning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The comment says 'DΟ NΟT SUBMIT', and that text being in a file can cause automated errors or warnings when trying to check the Go sources into other source control systems. (We reject that string in CL commit messages, which I've avoided here by changing the O's to Ο's above.) Change-Id: I6cdd57a8612ded5208f05a8bd6b137f44424a030 Reviewed-on: https://go-review.googlesource.com/22434 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/gc/bexport.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 0dc61374f1..b44eb5e05a 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -121,7 +121,7 @@ const debugFormat = false // default: false const posInfoFormat = false // TODO(gri) remove eventually -const forceNewExport = false // force new export format - DO NOT SUBMIT with this flag set +const forceNewExport = false // force new export format - do NOT submit with this flag set // forceObjFileStability enforces additional constraints in export data // and other parts of the compiler to eliminate object file differences -- cgit v1.3 From 17db07f9b5034f22851f32f7700649ac61c44e8f Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Tue, 26 Apr 2016 14:11:38 -0700 Subject: cmd/compile: don't discard inlineable but empty functions with binary export format Change-Id: I0f016fa000f949d27847d645b4cdebe68a8abf20 Reviewed-on: https://go-review.googlesource.com/22474 Run-TryBot: Robert Griesemer Reviewed-by: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/bimport.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 7fed8b1342..6fe30cdba9 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -145,7 +145,16 @@ func Import(in *bufio.Reader) { if f := p.funcList[i]; f != nil { // function not yet imported - read body and set it funchdr(f) - f.Func.Inl.Set(p.stmtList()) + body := p.stmtList() + if body == nil { + // Make sure empty body is not interpreted as + // no inlineable body (see also parser.fnbody) + // (not doing so can cause significant performance + // degradation due to unnecessary calls to empty + // functions). + body = []*Node{Nod(OEMPTY, nil, nil)} + } + f.Func.Inl.Set(body) funcbody(f) } else { // function already imported - read body but discard declarations -- cgit v1.3 From 6e4a8615f652a2020471622354be6d890404020c Mon Sep 17 00:00:00 2001 From: Alan Donovan Date: Mon, 25 Apr 2016 18:31:36 -0400 Subject: gc: use AbsFileLine for deterministic binary export data This version of the file name honors the -trimprefix flag, which strips off variable parts like $WORK or $PWD. The TestCgoConsistentResults test now passes. Change-Id: If93980b054f9b13582dd314f9d082c26eaac4f41 Reviewed-on: https://go-review.googlesource.com/22444 Reviewed-by: Robert Griesemer --- src/cmd/compile/internal/gc/bexport.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index b44eb5e05a..5618012c77 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -113,12 +113,8 @@ import ( const debugFormat = false // default: false // If posInfoFormat is set, position information (file, lineno) is written -// for each exported object, including methods and struct fields. Currently -// disabled because it may lead to different object files depending on which -// directory they are built under, which causes tests checking for hermetic -// builds to fail (e.g. TestCgoConsistentResults for cmd/go). -// TODO(gri) determine what to do here. -const posInfoFormat = false +// for each exported object, including methods and struct fields. +const posInfoFormat = true // default: true // TODO(gri) remove eventually const forceNewExport = false // force new export format - do NOT submit with this flag set @@ -517,7 +513,7 @@ func (p *exporter) pos(n *Node) { var file string var line int if n != nil { - file, line = Ctxt.LineHist.FileLine(int(n.Lineno)) + file, line = Ctxt.LineHist.AbsFileLine(int(n.Lineno)) } if file == p.prevFile && line != p.prevLine { -- cgit v1.3 From 8d075beeef137455b9dc40f1c724b495f3ceda26 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 26 Apr 2016 10:55:32 -0700 Subject: cmd/compile: lazily initialize litbuf MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of eagerly creating strings like "literal 2.01" for every lexed number in case we need to mention it in an error message, defer this work to (*parser).syntax_error. name old allocs/op new allocs/op delta Template 482k ± 0% 482k ± 0% -0.12% (p=0.000 n=9+10) GoTypes 1.35M ± 0% 1.35M ± 0% -0.04% (p=0.015 n=10+10) Compiler 5.45M ± 0% 5.44M ± 0% -0.12% (p=0.000 n=9+8) Change-Id: I333b3c80e583864914412fb38f8c0b7f1d8c8821 Reviewed-on: https://go-review.googlesource.com/22480 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/lex.go | 2 +- src/cmd/compile/internal/gc/parser.go | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/lex.go b/src/cmd/compile/internal/gc/lex.go index 09fed98985..8608a6229c 100644 --- a/src/cmd/compile/internal/gc/lex.go +++ b/src/cmd/compile/internal/gc/lex.go @@ -755,7 +755,7 @@ func (l *lexer) number(c rune) { } done: - litbuf = "literal " + str + litbuf = "" // lazily initialized in (*parser).syntax_error l.nlsemi = true l.tok = LLITERAL } diff --git a/src/cmd/compile/internal/gc/parser.go b/src/cmd/compile/internal/gc/parser.go index 97a18497ff..55f352590b 100644 --- a/src/cmd/compile/internal/gc/parser.go +++ b/src/cmd/compile/internal/gc/parser.go @@ -102,6 +102,9 @@ func (p *parser) syntax_error(msg string) { tok = "name" } case LLITERAL: + if litbuf == "" { + litbuf = "literal " + lexbuf.String() + } tok = litbuf case LOPER: tok = goopnames[p.op] -- cgit v1.3 From 3b0efa689ec7a32de30cbda2221452f57abb2532 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 26 Apr 2016 14:09:58 -0700 Subject: cmd/compile: a rule's line number is at its -> Let's define the line number of a multiline rule as the line number on which the -> appears. This helps make the rule cover analysis look a bit nicer. Change-Id: I4ac4c09f2240285976590ecfd416bc4c05e78946 Reviewed-on: https://go-review.googlesource.com/22473 Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/ssa/gen/rulegen.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 02a5da2a5a..5f7d1cf984 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -91,6 +91,7 @@ func genRules(arch arch) { scanner := bufio.NewScanner(text) rule := "" var lineno int + var ruleLineno int // line number of "->" for scanner.Scan() { lineno++ line := scanner.Text() @@ -107,6 +108,9 @@ func genRules(arch arch) { if !strings.Contains(rule, "->") { continue } + if ruleLineno == 0 { + ruleLineno = lineno + } if strings.HasSuffix(rule, "->") { continue } @@ -117,13 +121,14 @@ func genRules(arch arch) { if op[len(op)-1] == ')' { op = op[:len(op)-1] // rule has only opcode, e.g. (ConstNil) -> ... } - loc := fmt.Sprintf("%s.rules:%d", arch.name, lineno) + loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno) if isBlock(op, arch) { blockrules[op] = append(blockrules[op], Rule{rule: rule, loc: loc}) } else { oprules[op] = append(oprules[op], Rule{rule: rule, loc: loc}) } rule = "" + ruleLineno = 0 } if err := scanner.Err(); err != nil { log.Fatalf("scanner failed: %v\n", err) -- cgit v1.3 From 707aed0363c31bfef761a86464a09ecf0817267e Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Tue, 26 Apr 2016 19:11:53 -0700 Subject: cmd/compile: fix opnames Change-Id: Ief4707747338912216a8509b1adbf655c8ffac56 Reviewed-on: https://go-review.googlesource.com/22495 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/opnames.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/opnames.go b/src/cmd/compile/internal/gc/opnames.go index df0d8cb7fb..015baa2376 100644 --- a/src/cmd/compile/internal/gc/opnames.go +++ b/src/cmd/compile/internal/gc/opnames.go @@ -160,9 +160,9 @@ var opnames = []string{ OLROT: "LROT", ORROTC: "RROTC", ORETJMP: "RETJMP", - OPS: "OPS", - OPC: "OPC", - OSQRT: "OSQRT", - OGETG: "OGETG", + OPS: "PS", + OPC: "PC", + OSQRT: "SQRT", + OGETG: "GETG", OEND: "END", } -- cgit v1.3 From 8f2e780e8ac29e47466103998484c0a73df34d51 Mon Sep 17 00:00:00 2001 From: Dave Cheney Date: Wed, 27 Apr 2016 14:46:09 +1000 Subject: cmd/compile/internal: unexport gc.Oconv Updates #15462 Semi automatic change with gofmt -r and hand fixups for callers outside internal/gc. All the uses of gc.Oconv outside cmd/compile/internal/gc were for the Oconv(op, 0) form, which is already handled the Op.String method. Replace the use of gc.Oconv(op, 0) with op itself, which will call Op.String via the %v or %s verb. Unexport Oconv. Change-Id: I84da2a2e4381b35f52efce427b2d6a3bccdf2526 Reviewed-on: https://go-review.googlesource.com/22496 Run-TryBot: Dave Cheney TryBot-Result: Gobot Gobot Reviewed-by: Josh Bleecher Snyder --- src/cmd/compile/internal/amd64/gsubr.go | 2 +- src/cmd/compile/internal/arm/cgen64.go | 6 ++-- src/cmd/compile/internal/arm/gsubr.go | 4 +-- src/cmd/compile/internal/arm64/gsubr.go | 6 ++-- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/cgen.go | 4 +-- src/cmd/compile/internal/gc/const.go | 4 +-- src/cmd/compile/internal/gc/cplx.go | 4 +-- src/cmd/compile/internal/gc/dcl.go | 8 ++--- src/cmd/compile/internal/gc/esc.go | 6 ++-- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 58 ++++++++++++++++---------------- src/cmd/compile/internal/gc/gen.go | 2 +- src/cmd/compile/internal/gc/gsubr.go | 6 ++-- src/cmd/compile/internal/gc/order.go | 12 +++---- src/cmd/compile/internal/gc/pgen.go | 2 +- src/cmd/compile/internal/gc/racewalk.go | 6 ++-- src/cmd/compile/internal/gc/select.go | 8 ++--- src/cmd/compile/internal/gc/ssa.go | 4 +-- src/cmd/compile/internal/gc/subr.go | 10 +++--- src/cmd/compile/internal/gc/swt.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 30 ++++++++--------- src/cmd/compile/internal/gc/unsafe.go | 2 +- src/cmd/compile/internal/gc/walk.go | 16 ++++----- src/cmd/compile/internal/mips64/gsubr.go | 6 ++-- src/cmd/compile/internal/ppc64/gsubr.go | 2 +- src/cmd/compile/internal/s390x/gsubr.go | 2 +- src/cmd/compile/internal/x86/cgen64.go | 6 ++-- src/cmd/compile/internal/x86/ggen.go | 2 +- src/cmd/compile/internal/x86/gsubr.go | 6 ++-- 30 files changed, 115 insertions(+), 115 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/amd64/gsubr.go b/src/cmd/compile/internal/amd64/gsubr.go index 456fa7cbae..e3535f3244 100644 --- a/src/cmd/compile/internal/amd64/gsubr.go +++ b/src/cmd/compile/internal/amd64/gsubr.go @@ -722,7 +722,7 @@ func optoas(op gc.Op, t *gc.Type) obj.As { a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: - gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(op, 0), t) + gc.Fatalf("optoas: no entry %v-%v", op, t) case OADDR_ | gc.TPTR32: a = x86.ALEAL diff --git a/src/cmd/compile/internal/arm/cgen64.go b/src/cmd/compile/internal/arm/cgen64.go index 337bf03179..33e840615c 100644 --- a/src/cmd/compile/internal/arm/cgen64.go +++ b/src/cmd/compile/internal/arm/cgen64.go @@ -19,7 +19,7 @@ func cgen64(n *gc.Node, res *gc.Node) { if res.Op != gc.OINDREG && res.Op != gc.ONAME { gc.Dump("n", n) gc.Dump("res", res) - gc.Fatalf("cgen64 %v of %v", gc.Oconv(n.Op, 0), gc.Oconv(res.Op, 0)) + gc.Fatalf("cgen64 %v of %v", n.Op, res.Op) } l := n.Left @@ -35,7 +35,7 @@ func cgen64(n *gc.Node, res *gc.Node) { split64(l, &lo1, &hi1) switch n.Op { default: - gc.Fatalf("cgen64 %v", gc.Oconv(n.Op, 0)) + gc.Fatalf("cgen64 %v", n.Op) case gc.OMINUS: var lo2 gc.Node @@ -793,7 +793,7 @@ func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) { var br *obj.Prog switch op { default: - gc.Fatalf("cmp64 %v %v", gc.Oconv(op, 0), t) + gc.Fatalf("cmp64 %v %v", op, t) // cmp hi // bne L diff --git a/src/cmd/compile/internal/arm/gsubr.go b/src/cmd/compile/internal/arm/gsubr.go index 26da2e2081..73905f18ce 100644 --- a/src/cmd/compile/internal/arm/gsubr.go +++ b/src/cmd/compile/internal/arm/gsubr.go @@ -719,7 +719,7 @@ func raddr(n *gc.Node, p *obj.Prog) { gc.Naddr(&a, n) if a.Type != obj.TYPE_REG { if n != nil { - gc.Fatalf("bad in raddr: %v", gc.Oconv(n.Op, 0)) + gc.Fatalf("bad in raddr: %v", n.Op) } else { gc.Fatalf("bad in raddr: ") } @@ -790,7 +790,7 @@ func optoas(op gc.Op, t *gc.Type) obj.As { a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: - gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(op, 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]]) + gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", op, t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]]) /* case CASE(OADDR, TPTR32): a = ALEAL; diff --git a/src/cmd/compile/internal/arm64/gsubr.go b/src/cmd/compile/internal/arm64/gsubr.go index 4d64e790af..efa66a09d3 100644 --- a/src/cmd/compile/internal/arm64/gsubr.go +++ b/src/cmd/compile/internal/arm64/gsubr.go @@ -567,7 +567,7 @@ func raddr(n *gc.Node, p *obj.Prog) { gc.Naddr(&a, n) if a.Type != obj.TYPE_REG { if n != nil { - gc.Fatalf("bad in raddr: %v", gc.Oconv(n.Op, 0)) + gc.Fatalf("bad in raddr: %v", n.Op) } else { gc.Fatalf("bad in raddr: ") } @@ -579,7 +579,7 @@ func raddr(n *gc.Node, p *obj.Prog) { func gcmp(as obj.As, lhs *gc.Node, rhs *gc.Node) *obj.Prog { if lhs.Op != gc.OREGISTER { - gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(lhs.Op, 0), gc.Oconv(rhs.Op, 0)) + gc.Fatalf("bad operands to gcmp: %v %v", lhs.Op, rhs.Op) } p := rawgins(as, rhs, nil) @@ -622,7 +622,7 @@ func optoas(op gc.Op, t *gc.Type) obj.As { a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: - gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t) + gc.Fatalf("optoas: no entry for op=%v type=%v", op, t) case OEQ_ | gc.TBOOL, OEQ_ | gc.TINT8, diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 5618012c77..5c9a2734d4 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -501,7 +501,7 @@ func (p *exporter) obj(sym *Sym) { } default: - Fatalf("exporter: unexpected export symbol: %v %v", Oconv(n.Op, 0), sym) + Fatalf("exporter: unexpected export symbol: %v %v", oconv(n.Op, 0), sym) } } diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index 3d3600a079..bb7487c958 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -1807,7 +1807,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { } if !n.Type.IsBoolean() { - Fatalf("bgen: bad type %v for %v", n.Type, Oconv(n.Op, 0)) + Fatalf("bgen: bad type %v for %v", n.Type, oconv(n.Op, 0)) } for n.Op == OCONVNOP { @@ -2454,7 +2454,7 @@ func Ginscall(f *Node, proc int) { func cgen_callinter(n *Node, res *Node, proc int) { i := n.Left if i.Op != ODOTINTER { - Fatalf("cgen_callinter: not ODOTINTER %v", Oconv(i.Op, 0)) + Fatalf("cgen_callinter: not ODOTINTER %v", oconv(i.Op, 0)) } i = i.Left // interface diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 99b48f5ffe..5a7e9f34dd 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -695,7 +695,7 @@ func evconst(n *Node) { switch uint32(n.Op)<<16 | uint32(v.Ctype()) { default: if n.Diag == 0 { - Yyerror("illegal constant expression %v %v", Oconv(n.Op, 0), nl.Type) + Yyerror("illegal constant expression %v %v", oconv(n.Op, 0), nl.Type) n.Diag = 1 } return @@ -1179,7 +1179,7 @@ setfalse: illegal: if n.Diag == 0 { - Yyerror("illegal constant expression: %v %v %v", nl.Type, Oconv(n.Op, 0), nr.Type) + Yyerror("illegal constant expression: %v %v %v", nl.Type, oconv(n.Op, 0), nr.Type) n.Diag = 1 } } diff --git a/src/cmd/compile/internal/gc/cplx.go b/src/cmd/compile/internal/gc/cplx.go index 34fd0b96d9..4218117711 100644 --- a/src/cmd/compile/internal/gc/cplx.go +++ b/src/cmd/compile/internal/gc/cplx.go @@ -399,7 +399,7 @@ func Complexgen(n *Node, res *Node) { switch n.Op { default: Dump("complexgen: unknown op", n) - Fatalf("complexgen: unknown op %v", Oconv(n.Op, 0)) + Fatalf("complexgen: unknown op %v", oconv(n.Op, 0)) case ODOT, ODOTPTR, @@ -458,7 +458,7 @@ func Complexgen(n *Node, res *Node) { switch n.Op { default: - Fatalf("complexgen: unknown op %v", Oconv(n.Op, 0)) + Fatalf("complexgen: unknown op %v", oconv(n.Op, 0)) case OCONV: Complexmove(nl, res) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index ca9caf69d7..53d4ad4d10 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -551,7 +551,7 @@ func funchdr(n *Node) { func funcargs(nt *Node) { if nt.Op != OTFUNC { - Fatalf("funcargs %v", Oconv(nt.Op, 0)) + Fatalf("funcargs %v", oconv(nt.Op, 0)) } // re-start the variable generation number @@ -565,7 +565,7 @@ func funcargs(nt *Node) { if nt.Left != nil { n := nt.Left if n.Op != ODCLFIELD { - Fatalf("funcargs receiver %v", Oconv(n.Op, 0)) + Fatalf("funcargs receiver %v", oconv(n.Op, 0)) } if n.Left != nil { n.Left.Op = ONAME @@ -580,7 +580,7 @@ func funcargs(nt *Node) { for _, n := range nt.List.Slice() { if n.Op != ODCLFIELD { - Fatalf("funcargs in %v", Oconv(n.Op, 0)) + Fatalf("funcargs in %v", oconv(n.Op, 0)) } if n.Left != nil { n.Left.Op = ONAME @@ -598,7 +598,7 @@ func funcargs(nt *Node) { var i int = 0 for _, n := range nt.Rlist.Slice() { if n.Op != ODCLFIELD { - Fatalf("funcargs out %v", Oconv(n.Op, 0)) + Fatalf("funcargs out %v", oconv(n.Op, 0)) } if n.Left == nil { diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 795e688090..a7bc88e5c1 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -998,8 +998,8 @@ func escassign(e *EscState, dst, src *Node, step *EscStep) { if Debug['m'] > 2 { fmt.Printf("%v:[%d] %v escassign: %v(%v)[%v] = %v(%v)[%v]\n", linestr(lineno), e.loopdepth, funcSym(Curfn), - Nconv(dst, FmtShort), Jconv(dst, FmtShort), Oconv(dst.Op, 0), - Nconv(src, FmtShort), Jconv(src, FmtShort), Oconv(src.Op, 0)) + Nconv(dst, FmtShort), Jconv(dst, FmtShort), oconv(dst.Op, 0), + Nconv(src, FmtShort), Jconv(src, FmtShort), oconv(src.Op, 0)) } setlineno(dst) @@ -1741,7 +1741,7 @@ func escwalkBody(e *EscState, level Level, dst *Node, src *Node, step *EscStep, if Debug['m'] > 2 { fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %v(%v) scope:%v[%d] extraloopdepth=%v\n", - level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", Oconv(src.Op, 0), Nconv(src, FmtShort), Jconv(src, FmtShort), e.curfnSym(src), srcE.Escloopdepth, extraloopdepth) + level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", oconv(src.Op, 0), Nconv(src, FmtShort), Jconv(src, FmtShort), e.curfnSym(src), srcE.Escloopdepth, extraloopdepth) } e.pdepth++ diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index a275377598..b6280ab30b 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -354,7 +354,7 @@ func dumpsym(s *Sym) { switch s.Def.Op { default: - Yyerror("unexpected export symbol: %v %v", Oconv(s.Def.Op, 0), s) + Yyerror("unexpected export symbol: %v %v", oconv(s.Def.Op, 0), s) case OLITERAL: dumpexportconst(s) diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 3bd3874845..ee12e35975 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -193,7 +193,7 @@ var goopnames = []string{ } // Fmt "%O": Node opcodes -func Oconv(o Op, flag FmtFlag) string { +func oconv(o Op, flag FmtFlag) string { if (flag&FmtSharp != 0) || fmtmode != FDbg { if o >= 0 && int(o) < len(goopnames) && goopnames[o] != "" { return goopnames[o] @@ -454,7 +454,7 @@ func (e EType) String() string { } func (o Op) String() string { - return Oconv(o, 0) + return oconv(o, 0) } // Fmt "%S": syms @@ -840,7 +840,7 @@ func stmtfmt(n *Node) string { break } - f += fmt.Sprintf("%v %v= %v", n.Left, Oconv(Op(n.Etype), FmtSharp), n.Right) + f += fmt.Sprintf("%v %v= %v", n.Left, oconv(Op(n.Etype), FmtSharp), n.Right) case OAS2: if n.Colas && !complexinit { @@ -914,11 +914,11 @@ func stmtfmt(n *Node) string { case OSELECT, OSWITCH: if fmtmode == FErr { - f += fmt.Sprintf("%v statement", Oconv(n.Op, 0)) + f += fmt.Sprintf("%v statement", oconv(n.Op, 0)) break } - f += Oconv(n.Op, FmtSharp) + f += oconv(n.Op, FmtSharp) if simpleinit { f += fmt.Sprintf(" %v;", n.Ninit.First()) } @@ -941,9 +941,9 @@ func stmtfmt(n *Node) string { OFALL, OXFALL: if n.Left != nil { - f += fmt.Sprintf("%v %v", Oconv(n.Op, FmtSharp), n.Left) + f += fmt.Sprintf("%v %v", oconv(n.Op, FmtSharp), n.Left) } else { - f += Oconv(n.Op, FmtSharp) + f += oconv(n.Op, FmtSharp) } case OEMPTY: @@ -1337,7 +1337,7 @@ func exprfmt(n *Node, prec int) string { return buf.String() case OCOPY, OCOMPLEX: - return fmt.Sprintf("%v(%v, %v)", Oconv(n.Op, FmtSharp), n.Left, n.Right) + return fmt.Sprintf("%v(%v, %v)", oconv(n.Op, FmtSharp), n.Left, n.Right) case OCONV, OCONVIFACE, @@ -1369,12 +1369,12 @@ func exprfmt(n *Node, prec int) string { OPRINT, OPRINTN: if n.Left != nil { - return fmt.Sprintf("%v(%v)", Oconv(n.Op, FmtSharp), n.Left) + return fmt.Sprintf("%v(%v)", oconv(n.Op, FmtSharp), n.Left) } if n.Isddd { - return fmt.Sprintf("%v(%v...)", Oconv(n.Op, FmtSharp), Hconv(n.List, FmtComma)) + return fmt.Sprintf("%v(%v...)", oconv(n.Op, FmtSharp), Hconv(n.List, FmtComma)) } - return fmt.Sprintf("%v(%v)", Oconv(n.Op, FmtSharp), Hconv(n.List, FmtComma)) + return fmt.Sprintf("%v(%v)", oconv(n.Op, FmtSharp), Hconv(n.List, FmtComma)) case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG: var f string @@ -1408,9 +1408,9 @@ func exprfmt(n *Node, prec int) string { ORECV: var f string if n.Left.Op == n.Op { - f += fmt.Sprintf("%v ", Oconv(n.Op, FmtSharp)) + f += fmt.Sprintf("%v ", oconv(n.Op, FmtSharp)) } else { - f += Oconv(n.Op, FmtSharp) + f += oconv(n.Op, FmtSharp) } f += exprfmt(n.Left, nprec+1) return f @@ -1439,7 +1439,7 @@ func exprfmt(n *Node, prec int) string { var f string f += exprfmt(n.Left, nprec) - f += fmt.Sprintf(" %v ", Oconv(n.Op, FmtSharp)) + f += fmt.Sprintf(" %v ", oconv(n.Op, FmtSharp)) f += exprfmt(n.Right, nprec+1) return f @@ -1460,7 +1460,7 @@ func exprfmt(n *Node, prec int) string { var f string f += exprfmt(n.Left, nprec) // TODO(marvin): Fix Node.EType type union. - f += fmt.Sprintf(" %v ", Oconv(Op(n.Etype), FmtSharp)) + f += fmt.Sprintf(" %v ", oconv(Op(n.Etype), FmtSharp)) f += exprfmt(n.Right, nprec+1) return f @@ -1472,7 +1472,7 @@ func exprfmt(n *Node, prec int) string { } } - return fmt.Sprintf("", Oconv(n.Op, 0)) + return fmt.Sprintf("", oconv(n.Op, 0)) } func nodefmt(n *Node, flag FmtFlag) string { @@ -1527,40 +1527,40 @@ func nodedump(n *Node, flag FmtFlag) string { } if n.Ninit.Len() != 0 { - fmt.Fprintf(&buf, "%v-init%v", Oconv(n.Op, 0), n.Ninit) + fmt.Fprintf(&buf, "%v-init%v", oconv(n.Op, 0), n.Ninit) indent(&buf) } } switch n.Op { default: - fmt.Fprintf(&buf, "%v%v", Oconv(n.Op, 0), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v%v", oconv(n.Op, 0), Jconv(n, 0)) case OREGISTER, OINDREG: - fmt.Fprintf(&buf, "%v-%v%v", Oconv(n.Op, 0), obj.Rconv(int(n.Reg)), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v-%v%v", oconv(n.Op, 0), obj.Rconv(int(n.Reg)), Jconv(n, 0)) case OLITERAL: - fmt.Fprintf(&buf, "%v-%v%v", Oconv(n.Op, 0), Vconv(n.Val(), 0), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v-%v%v", oconv(n.Op, 0), Vconv(n.Val(), 0), Jconv(n, 0)) case ONAME, ONONAME: if n.Sym != nil { - fmt.Fprintf(&buf, "%v-%v%v", Oconv(n.Op, 0), n.Sym, Jconv(n, 0)) + fmt.Fprintf(&buf, "%v-%v%v", oconv(n.Op, 0), n.Sym, Jconv(n, 0)) } else { - fmt.Fprintf(&buf, "%v%v", Oconv(n.Op, 0), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v%v", oconv(n.Op, 0), Jconv(n, 0)) } if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil { indent(&buf) - fmt.Fprintf(&buf, "%v-ntype%v", Oconv(n.Op, 0), n.Name.Param.Ntype) + fmt.Fprintf(&buf, "%v-ntype%v", oconv(n.Op, 0), n.Name.Param.Ntype) } case OASOP: - fmt.Fprintf(&buf, "%v-%v%v", Oconv(n.Op, 0), Oconv(Op(n.Etype), 0), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v-%v%v", oconv(n.Op, 0), oconv(Op(n.Etype), 0), Jconv(n, 0)) case OTYPE: - fmt.Fprintf(&buf, "%v %v%v type=%v", Oconv(n.Op, 0), n.Sym, Jconv(n, 0), n.Type) + fmt.Fprintf(&buf, "%v %v%v type=%v", oconv(n.Op, 0), n.Sym, Jconv(n, 0), n.Type) if recur && n.Type == nil && n.Name.Param.Ntype != nil { indent(&buf) - fmt.Fprintf(&buf, "%v-ntype%v", Oconv(n.Op, 0), n.Name.Param.Ntype) + fmt.Fprintf(&buf, "%v-ntype%v", oconv(n.Op, 0), n.Name.Param.Ntype) } } @@ -1581,17 +1581,17 @@ func nodedump(n *Node, flag FmtFlag) string { } if n.List.Len() != 0 { indent(&buf) - fmt.Fprintf(&buf, "%v-list%v", Oconv(n.Op, 0), n.List) + fmt.Fprintf(&buf, "%v-list%v", oconv(n.Op, 0), n.List) } if n.Rlist.Len() != 0 { indent(&buf) - fmt.Fprintf(&buf, "%v-rlist%v", Oconv(n.Op, 0), n.Rlist) + fmt.Fprintf(&buf, "%v-rlist%v", oconv(n.Op, 0), n.Rlist) } if n.Nbody.Len() != 0 { indent(&buf) - fmt.Fprintf(&buf, "%v-body%v", Oconv(n.Op, 0), n.Nbody) + fmt.Fprintf(&buf, "%v-body%v", oconv(n.Op, 0), n.Nbody) } } diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index d16c4fa992..6fb27cf8e1 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -218,7 +218,7 @@ func Genlist(l Nodes) { func cgen_proc(n *Node, proc int) { switch n.Left.Op { default: - Fatalf("cgen_proc: unknown call %v", Oconv(n.Left.Op, 0)) + Fatalf("cgen_proc: unknown call %v", oconv(n.Left.Op, 0)) case OCALLMETH: cgen_callmeth(n.Left, proc) diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index ff6fbe42fb..3d9ab626f6 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -327,7 +327,7 @@ func Naddr(a *obj.Addr, n *Node) { a := a // copy to let escape into Ctxt.Dconv Debug['h'] = 1 Dump("naddr", n) - Fatalf("naddr: bad %v %v", Oconv(n.Op, 0), Ctxt.Dconv(a)) + Fatalf("naddr: bad %v %v", oconv(n.Op, 0), Ctxt.Dconv(a)) case OREGISTER: a.Type = obj.TYPE_REG @@ -422,7 +422,7 @@ func Naddr(a *obj.Addr, n *Node) { if !n.Left.Type.IsStruct() || n.Left.Type.Field(0).Sym != n.Sym { Debug['h'] = 1 Dump("naddr", n) - Fatalf("naddr: bad %v %v", Oconv(n.Op, 0), Ctxt.Dconv(a)) + Fatalf("naddr: bad %v %v", oconv(n.Op, 0), Ctxt.Dconv(a)) } Naddr(a, n.Left) @@ -465,7 +465,7 @@ func Naddr(a *obj.Addr, n *Node) { } if a.Type != obj.TYPE_MEM { a := a // copy to let escape into Ctxt.Dconv - Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(n.Left.Op, 0)) + Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), oconv(n.Left.Op, 0)) } a.Type = obj.TYPE_ADDR diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index 7e7bda466d..aea05caee2 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -165,7 +165,7 @@ func ordersafeexpr(n *Node, order *Order) *Node { a.Right = r return typecheck(&a, Erv) default: - Fatalf("ordersafeexpr %v", Oconv(n.Op, 0)) + Fatalf("ordersafeexpr %v", oconv(n.Op, 0)) return nil // not reached } } @@ -416,7 +416,7 @@ func ordercall(n *Node, order *Order) { func ordermapassign(n *Node, order *Order) { switch n.Op { default: - Fatalf("ordermapassign %v", Oconv(n.Op, 0)) + Fatalf("ordermapassign %v", oconv(n.Op, 0)) case OAS: order.out = append(order.out, n) @@ -478,7 +478,7 @@ func orderstmt(n *Node, order *Order) { switch n.Op { default: - Fatalf("orderstmt %v", Oconv(n.Op, 0)) + Fatalf("orderstmt %v", oconv(n.Op, 0)) case OVARKILL, OVARLIVE: order.out = append(order.out, n) @@ -790,7 +790,7 @@ func orderstmt(n *Node, order *Order) { var r *Node for _, n2 := range n.List.Slice() { if n2.Op != OXCASE { - Fatalf("order select case %v", Oconv(n2.Op, 0)) + Fatalf("order select case %v", oconv(n2.Op, 0)) } r = n2.Left setlineno(n2) @@ -803,7 +803,7 @@ func orderstmt(n *Node, order *Order) { if r != nil { switch r.Op { default: - Yyerror("unknown op in select %v", Oconv(r.Op, 0)) + Yyerror("unknown op in select %v", oconv(r.Op, 0)) Dump("select case", r) // If this is case x := <-ch or case x, y := <-ch, the case has @@ -943,7 +943,7 @@ func orderstmt(n *Node, order *Order) { n.Left = orderexpr(n.Left, order, nil) for _, n4 := range n.List.Slice() { if n4.Op != OXCASE { - Fatalf("order switch case %v", Oconv(n4.Op, 0)) + Fatalf("order switch case %v", oconv(n4.Op, 0)) } orderexprlistinplace(n4.List, order) orderblockNodes(&n4.Nbody) diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index bba4ff5e48..984d468bc6 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -91,7 +91,7 @@ func gvardefx(n *Node, as obj.As) { Fatalf("gvardef nil") } if n.Op != ONAME { - Yyerror("gvardef %v; %v", Oconv(n.Op, FmtSharp), n) + Yyerror("gvardef %v; %v", oconv(n.Op, FmtSharp), n) return } diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index 620bcb34a3..dc7d0068a1 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -132,7 +132,7 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { switch n.Op { default: - Fatalf("instrument: unknown node type %v", Oconv(n.Op, 0)) + Fatalf("instrument: unknown node type %v", oconv(n.Op, 0)) case OAS, OASWB, OAS2FUNC: instrumentnode(&n.Left, init, 1, 0) @@ -374,13 +374,13 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { OAS2RECV, OAS2MAPR, OASOP: - Yyerror("instrument: %v must be lowered by now", Oconv(n.Op, 0)) + Yyerror("instrument: %v must be lowered by now", oconv(n.Op, 0)) goto ret // impossible nodes: only appear in backend. case ORROTC, OEXTEND: - Yyerror("instrument: %v cannot exist now", Oconv(n.Op, 0)) + Yyerror("instrument: %v cannot exist now", oconv(n.Op, 0)) goto ret case OGETG: diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 22c716f9ce..c197fdd57d 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -18,7 +18,7 @@ func typecheckselect(sel *Node) { ncase = n1 setlineno(ncase) if ncase.Op != OXCASE { - Fatalf("typecheckselect %v", Oconv(ncase.Op, 0)) + Fatalf("typecheckselect %v", oconv(ncase.Op, 0)) } if ncase.List.Len() == 0 { @@ -120,7 +120,7 @@ func walkselect(sel *Node) { var ch *Node switch n.Op { default: - Fatalf("select %v", Oconv(n.Op, 0)) + Fatalf("select %v", oconv(n.Op, 0)) // ok already case OSEND: @@ -218,7 +218,7 @@ func walkselect(sel *Node) { r.Ninit.Set(cas.Ninit.Slice()) switch n.Op { default: - Fatalf("select %v", Oconv(n.Op, 0)) + Fatalf("select %v", oconv(n.Op, 0)) // if selectnbsend(c, v) { body } else { default body } case OSEND: @@ -282,7 +282,7 @@ func walkselect(sel *Node) { } else { switch n.Op { default: - Fatalf("select %v", Oconv(n.Op, 0)) + Fatalf("select %v", oconv(n.Op, 0)) // selectsend(sel *byte, hchan *chan any, elem *any) (selected bool); case OSEND: diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 7763b18ce2..758f29d098 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2563,7 +2563,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // want to set it here. case OCALLINTER: if fn.Op != ODOTINTER { - Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", Oconv(fn.Op, 0)) + Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", oconv(fn.Op, 0)) } i := s.expr(fn.Left) itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i) @@ -2787,7 +2787,7 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value { return s.call(n, callNormal) default: - s.Unimplementedf("unhandled addr %v", Oconv(n.Op, 0)) + s.Unimplementedf("unhandled addr %v", oconv(n.Op, 0)) return nil } } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index f2f2a70446..ff491f5dc2 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1203,9 +1203,9 @@ func printframenode(n *Node) { } switch n.Op { case ONAME: - fmt.Printf("%v %v G%d %v width=%d\n", Oconv(n.Op, 0), n.Sym, n.Name.Vargen, n.Type, w) + fmt.Printf("%v %v G%d %v width=%d\n", oconv(n.Op, 0), n.Sym, n.Name.Vargen, n.Type, w) case OTYPE: - fmt.Printf("%v %v width=%d\n", Oconv(n.Op, 0), n.Type, w) + fmt.Printf("%v %v width=%d\n", oconv(n.Op, 0), n.Type, w) } } @@ -1286,7 +1286,7 @@ func badtype(op Op, tl *Type, tr *Type) { } s := fmt_ - Yyerror("illegal types for operand: %v%s", Oconv(op, 0), s) + Yyerror("illegal types for operand: %v%s", oconv(op, 0), s) } // Brcom returns !(op). @@ -1306,7 +1306,7 @@ func Brcom(op Op) Op { case OGE: return OLT } - Fatalf("brcom: no com for %v\n", Oconv(op, 0)) + Fatalf("brcom: no com for %v\n", oconv(op, 0)) return op } @@ -1327,7 +1327,7 @@ func Brrev(op Op) Op { case OGE: return OLE } - Fatalf("brrev: no rev for %v\n", Oconv(op, 0)) + Fatalf("brrev: no rev for %v\n", oconv(op, 0)) return op } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 3b08b13508..a8e6e15e0a 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -350,7 +350,7 @@ func casebody(sw *Node, typeswvar *Node) { for i, n := range sw.List.Slice() { setlineno(n) if n.Op != OXCASE { - Fatalf("casebody %v", Oconv(n.Op, 0)) + Fatalf("casebody %v", oconv(n.Op, 0)) } n.Op = OCASE needvar := n.List.Len() != 1 || n.List.First().Op == OLITERAL diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 8860c5d803..8c51802ac6 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -278,7 +278,7 @@ OpSwitch: default: Dump("typecheck", n) - Fatalf("typecheck %v", Oconv(n.Op, 0)) + Fatalf("typecheck %v", oconv(n.Op, 0)) // names case OLITERAL: @@ -611,7 +611,7 @@ OpSwitch: aop = assignop(l.Type, r.Type, nil) if aop != 0 { if r.Type.IsInterface() && !l.Type.IsInterface() && !l.Type.IsComparable() { - Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(op, 0), typekind(l.Type)) + Yyerror("invalid operation: %v (operator %v not defined on %s)", n, oconv(op, 0), typekind(l.Type)) n.Type = nil return n } @@ -633,7 +633,7 @@ OpSwitch: aop = assignop(r.Type, l.Type, nil) if aop != 0 { if l.Type.IsInterface() && !r.Type.IsInterface() && !r.Type.IsComparable() { - Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(op, 0), typekind(r.Type)) + Yyerror("invalid operation: %v (operator %v not defined on %s)", n, oconv(op, 0), typekind(r.Type)) n.Type = nil return n } @@ -664,7 +664,7 @@ OpSwitch: } if !okfor[op][et] { - Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(op, 0), typekind(t)) + Yyerror("invalid operation: %v (operator %v not defined on %s)", n, oconv(op, 0), typekind(t)) n.Type = nil return n } @@ -774,7 +774,7 @@ OpSwitch: return n } if !okfor[n.Op][t.Etype] { - Yyerror("invalid operation: %v %v", Oconv(n.Op, 0), t) + Yyerror("invalid operation: %v %v", oconv(n.Op, 0), t) n.Type = nil return n } @@ -1314,7 +1314,7 @@ OpSwitch: case OCAP, OLEN, OREAL, OIMAG: ok |= Erv - if !onearg(n, "%v", Oconv(n.Op, 0)) { + if !onearg(n, "%v", oconv(n.Op, 0)) { n.Type = nil return n } @@ -1380,7 +1380,7 @@ OpSwitch: break OpSwitch badcall1: - Yyerror("invalid argument %v for %v", Nconv(n.Left, FmtLong), Oconv(n.Op, 0)) + Yyerror("invalid argument %v for %v", Nconv(n.Left, FmtLong), oconv(n.Op, 0)) n.Type = nil return n @@ -1463,7 +1463,7 @@ OpSwitch: break OpSwitch case OCLOSE: - if !onearg(n, "%v", Oconv(n.Op, 0)) { + if !onearg(n, "%v", oconv(n.Op, 0)) { n.Type = nil return n } @@ -2284,19 +2284,19 @@ func twoarg(n *Node) bool { return true } if n.List.Len() == 0 { - Yyerror("missing argument to %v - %v", Oconv(n.Op, 0), n) + Yyerror("missing argument to %v - %v", oconv(n.Op, 0), n) return false } n.Left = n.List.First() if n.List.Len() == 1 { - Yyerror("missing argument to %v - %v", Oconv(n.Op, 0), n) + Yyerror("missing argument to %v - %v", oconv(n.Op, 0), n) n.List.Set(nil) return false } if n.List.Len() > 2 { - Yyerror("too many arguments to %v - %v", Oconv(n.Op, 0), n) + Yyerror("too many arguments to %v - %v", oconv(n.Op, 0), n) n.List.Set(nil) return false } @@ -2662,7 +2662,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl Nodes, desc if call != nil { Yyerror("invalid use of ... in call to %v", call) } else { - Yyerror("invalid use of ... in %v", Oconv(op, 0)) + Yyerror("invalid use of ... in %v", oconv(op, 0)) } } @@ -2682,7 +2682,7 @@ notenough: Yyerror("not enough arguments in call to %v", call) } } else { - Yyerror("not enough arguments to %v", Oconv(op, 0)) + Yyerror("not enough arguments to %v", oconv(op, 0)) } if n != nil { n.Diag = 1 @@ -2695,7 +2695,7 @@ toomany: if call != nil { Yyerror("too many arguments in call to %v", call) } else { - Yyerror("too many arguments to %v", Oconv(op, 0)) + Yyerror("too many arguments to %v", oconv(op, 0)) } goto out } @@ -3606,7 +3606,7 @@ func typecheckdef(n *Node) *Node { switch n.Op { default: - Fatalf("typecheckdef %v", Oconv(n.Op, 0)) + Fatalf("typecheckdef %v", oconv(n.Op, 0)) // not really syms case OGOTO, OLABEL: diff --git a/src/cmd/compile/internal/gc/unsafe.go b/src/cmd/compile/internal/gc/unsafe.go index e1d3b40098..5935cd98ff 100644 --- a/src/cmd/compile/internal/gc/unsafe.go +++ b/src/cmd/compile/internal/gc/unsafe.go @@ -82,7 +82,7 @@ func unsafenmagic(nn *Node) *Node { v += r1.Xoffset default: Dump("unsafenmagic", r) - Fatalf("impossible %v node after dot insertion", Oconv(r1.Op, FmtSharp)) + Fatalf("impossible %v node after dot insertion", oconv(r1.Op, FmtSharp)) goto bad } } diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index 0c7c5fa7aa..bce34374e8 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -159,7 +159,7 @@ func walkstmt(n *Node) *Node { if n.Op == ONAME { Yyerror("%v is not a top level statement", n.Sym) } else { - Yyerror("%v is not a top level statement", Oconv(n.Op, 0)) + Yyerror("%v is not a top level statement", oconv(n.Op, 0)) } Dump("nottop", n) @@ -1505,7 +1505,7 @@ opswitch: // ifaceeq(i1 any-1, i2 any-2) (ret bool); case OCMPIFACE: if !Eqtype(n.Left.Type, n.Right.Type) { - Fatalf("ifaceeq %v %v %v", Oconv(n.Op, 0), n.Left.Type, n.Right.Type) + Fatalf("ifaceeq %v %v %v", oconv(n.Op, 0), n.Left.Type, n.Right.Type) } var fn *Node if n.Left.Type.IsEmptyInterface() { @@ -1651,7 +1651,7 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { var nln, nrn Nodes nln.Set(nl) nrn.Set(nr) - Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nln, FmtSign), Oconv(op, 0), Hconv(nrn, FmtSign), len(nl), len(nr), Curfn.Func.Nname.Sym.Name) + Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nln, FmtSign), oconv(op, 0), Hconv(nrn, FmtSign), len(nl), len(nr), Curfn.Func.Nname.Sym.Name) } return nn } @@ -1866,9 +1866,9 @@ func ascompatte(op Op, call *Node, isddd bool, nl *Type, lr []*Node, fp int, ini l1 := dumptypes(nl, "expected") l2 := dumpnodetypes(lr0, "given") if l != nil { - Yyerror("not enough arguments to %v\n\t%s\n\t%s", Oconv(op, 0), l1, l2) + Yyerror("not enough arguments to %v\n\t%s\n\t%s", oconv(op, 0), l1, l2) } else { - Yyerror("too many arguments to %v\n\t%s\n\t%s", Oconv(op, 0), l1, l2) + Yyerror("too many arguments to %v\n\t%s\n\t%s", oconv(op, 0), l1, l2) } } @@ -2142,7 +2142,7 @@ func applywritebarrier(n *Node) *Node { func convas(n *Node, init *Nodes) *Node { if n.Op != OAS { - Fatalf("convas: not OAS %v", Oconv(n.Op, 0)) + Fatalf("convas: not OAS %v", oconv(n.Op, 0)) } n.Typecheck = 1 @@ -2285,7 +2285,7 @@ func reorder3(all []*Node) []*Node { switch l.Op { default: - Fatalf("reorder3 unexpected lvalue %v", Oconv(l.Op, FmtSharp)) + Fatalf("reorder3 unexpected lvalue %v", oconv(l.Op, FmtSharp)) case ONAME: break @@ -3788,7 +3788,7 @@ func usefield(n *Node) { switch n.Op { default: - Fatalf("usefield %v", Oconv(n.Op, 0)) + Fatalf("usefield %v", oconv(n.Op, 0)) case ODOT, ODOTPTR: break diff --git a/src/cmd/compile/internal/mips64/gsubr.go b/src/cmd/compile/internal/mips64/gsubr.go index 60805270af..a2bff29ecc 100644 --- a/src/cmd/compile/internal/mips64/gsubr.go +++ b/src/cmd/compile/internal/mips64/gsubr.go @@ -148,7 +148,7 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { case gc.TFLOAT32: switch op { default: - gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(op, 0), t) + gc.Fatalf("ginscmp: no entry for op=%s type=%v", op, t) case gc.OEQ, gc.ONE: @@ -165,7 +165,7 @@ func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { case gc.TFLOAT64: switch op { default: - gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(op, 0), t) + gc.Fatalf("ginscmp: no entry for op=%s type=%v", op, t) case gc.OEQ, gc.ONE: @@ -715,7 +715,7 @@ func optoas(op gc.Op, t *gc.Type) obj.As { a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: - gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t) + gc.Fatalf("optoas: no entry for op=%s type=%v", op, t) case OEQ_ | gc.TBOOL, OEQ_ | gc.TINT8, diff --git a/src/cmd/compile/internal/ppc64/gsubr.go b/src/cmd/compile/internal/ppc64/gsubr.go index eb6cd2c5e9..1137c50678 100644 --- a/src/cmd/compile/internal/ppc64/gsubr.go +++ b/src/cmd/compile/internal/ppc64/gsubr.go @@ -712,7 +712,7 @@ func optoas(op gc.Op, t *gc.Type) obj.As { a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: - gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t) + gc.Fatalf("optoas: no entry for op=%v type=%v", op, t) case OEQ_ | gc.TBOOL, OEQ_ | gc.TINT8, diff --git a/src/cmd/compile/internal/s390x/gsubr.go b/src/cmd/compile/internal/s390x/gsubr.go index e9cfd23e42..3e8782f5e6 100644 --- a/src/cmd/compile/internal/s390x/gsubr.go +++ b/src/cmd/compile/internal/s390x/gsubr.go @@ -639,7 +639,7 @@ func optoas(op gc.Op, t *gc.Type) obj.As { a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: - gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t) + gc.Fatalf("optoas: no entry for op=%v type=%v", op, t) case OEQ_ | gc.TBOOL, OEQ_ | gc.TINT8, diff --git a/src/cmd/compile/internal/x86/cgen64.go b/src/cmd/compile/internal/x86/cgen64.go index 61e191f87c..ea52d6951a 100644 --- a/src/cmd/compile/internal/x86/cgen64.go +++ b/src/cmd/compile/internal/x86/cgen64.go @@ -19,12 +19,12 @@ func cgen64(n *gc.Node, res *gc.Node) { if res.Op != gc.OINDREG && res.Op != gc.ONAME { gc.Dump("n", n) gc.Dump("res", res) - gc.Fatalf("cgen64 %v of %v", gc.Oconv(n.Op, 0), gc.Oconv(res.Op, 0)) + gc.Fatalf("cgen64 %v of %v", n.Op, res.Op) } switch n.Op { default: - gc.Fatalf("cgen64 %v", gc.Oconv(n.Op, 0)) + gc.Fatalf("cgen64 %v", n.Op) case gc.OMINUS: gc.Cgen(n.Left, res) @@ -531,7 +531,7 @@ func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) { var br *obj.Prog switch op { default: - gc.Fatalf("cmp64 %v %v", gc.Oconv(op, 0), t) + gc.Fatalf("cmp64 %v %v", op, t) // cmp hi // jne L diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go index 38c3f8fc0e..21d989c98d 100644 --- a/src/cmd/compile/internal/x86/ggen.go +++ b/src/cmd/compile/internal/x86/ggen.go @@ -661,7 +661,7 @@ func cgen_floatsse(n *gc.Node, res *gc.Node) { switch n.Op { default: gc.Dump("cgen_floatsse", n) - gc.Fatalf("cgen_floatsse %v", gc.Oconv(n.Op, 0)) + gc.Fatalf("cgen_floatsse %v", n.Op) return case gc.OMINUS, diff --git a/src/cmd/compile/internal/x86/gsubr.go b/src/cmd/compile/internal/x86/gsubr.go index 91c009116c..569ffd11bd 100644 --- a/src/cmd/compile/internal/x86/gsubr.go +++ b/src/cmd/compile/internal/x86/gsubr.go @@ -91,7 +91,7 @@ func optoas(op gc.Op, t *gc.Type) obj.As { a := obj.AXXX switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { default: - gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(op, 0), t) + gc.Fatalf("optoas: no entry %v-%v", op, t) case OADDR_ | gc.TPTR32: a = x86.ALEAL @@ -454,7 +454,7 @@ func foptoas(op gc.Op, t *gc.Type, flg int) obj.As { if !gc.Thearch.Use387 { switch uint32(op)<<16 | uint32(et) { default: - gc.Fatalf("foptoas-sse: no entry %v-%v", gc.Oconv(op, 0), t) + gc.Fatalf("foptoas-sse: no entry %v-%v", op, t) case OCMP_ | gc.TFLOAT32: a = x86.AUCOMISS @@ -587,7 +587,7 @@ func foptoas(op gc.Op, t *gc.Type, flg int) obj.As { return x86.AFCHS } - gc.Fatalf("foptoas %v %v %#x", gc.Oconv(op, 0), t, flg) + gc.Fatalf("foptoas %v %v %#x", op, t, flg) return 0 } -- cgit v1.3 From a19e60b2c3c388abd3503da3fc2659bef1e76b46 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Tue, 26 Apr 2016 15:22:33 -0700 Subject: cmd/compile: don't use line numbers from ONAME and named OLITERALs The line numbers of ONAMEs are the location of their declaration, not their use. The line numbers of named OLITERALs are also the location of their declaration. Ignore both of these. Instead, we will inherit the line number from the containing syntactic item. Fixes #14742 Fixes #15430 Change-Id: Ie43b5b9f6321cbf8cead56e37ccc9364d0702f2f Reviewed-on: https://go-review.googlesource.com/22479 Reviewed-by: Robert Griesemer Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/ssa.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 758f29d098..1006fcd40e 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -1414,8 +1414,12 @@ func (s *state) ssaRotateOp(op Op, t *Type) ssa.Op { // expr converts the expression n to ssa, adds it to s and returns the ssa result. func (s *state) expr(n *Node) *ssa.Value { - s.pushLine(n.Lineno) - defer s.popLine() + if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) { + // ONAMEs and named OLITERALs have the line number + // of the decl, not the use. See issue 14742. + s.pushLine(n.Lineno) + defer s.popLine() + } s.stmtList(n.Ninit) switch n.Op { @@ -1463,14 +1467,7 @@ func (s *state) expr(n *Node) *ssa.Value { } return s.entryNewValue0A(ssa.OpConstString, n.Type, u) case bool: - v := s.constBool(u) - // For some reason the frontend gets the line numbers of - // CTBOOL literals totally wrong. Fix it here by grabbing - // the line number of the enclosing AST node. - if len(s.line) >= 2 { - v.Line = s.line[len(s.line)-2] - } - return v + return s.constBool(u) case *NilVal: t := n.Type switch { -- cgit v1.3 From 86c93c989e73e823e9e66f3d3e319b616544c320 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Tue, 26 Apr 2016 22:31:02 -0700 Subject: cmd/compile: don't write pos info for builtin packages TestBuiltin will fail if run on Windows and builtin.go was generated on a non-Windows machine (or vice versa) because path names have different separators. Avoid problem altogether by not writing pos info for builtin packages. It's not needed. Affects -newexport only. Change-Id: I8944f343452faebaea9a08b5fb62829bed77c148 Reviewed-on: https://go-review.googlesource.com/22498 Run-TryBot: Robert Griesemer TryBot-Result: Gobot Gobot Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 5c9a2734d4..b3ee9b8054 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -112,10 +112,6 @@ import ( // (suspected) format errors, and whenever a change is made to the format. const debugFormat = false // default: false -// If posInfoFormat is set, position information (file, lineno) is written -// for each exported object, including methods and struct fields. -const posInfoFormat = true // default: true - // TODO(gri) remove eventually const forceNewExport = false // force new export format - do NOT submit with this flag set @@ -144,8 +140,9 @@ type exporter struct { funcList []*Func // position encoding - prevFile string - prevLine int + posInfoFormat bool + prevFile string + prevLine int // debugging support written int // bytes written @@ -160,7 +157,11 @@ func export(out *bufio.Writer, trace bool) int { strIndex: map[string]int{"": 0}, // empty string is mapped to 0 pkgIndex: make(map[*Pkg]int), typIndex: make(map[*Type]int), - trace: trace, + // don't emit pos info for builtin packages + // (not needed and avoids path name diffs in builtin.go between + // Windows and non-Windows machines, exposed via builtin_test.go) + posInfoFormat: Debug['A'] == 0, + trace: trace, } // first byte indicates low-level encoding format @@ -171,7 +172,7 @@ func export(out *bufio.Writer, trace bool) int { p.rawByte(format) // posInfo exported or not? - p.bool(posInfoFormat) + p.bool(p.posInfoFormat) // --- generic export data --- @@ -506,7 +507,7 @@ func (p *exporter) obj(sym *Sym) { } func (p *exporter) pos(n *Node) { - if !posInfoFormat { + if !p.posInfoFormat { return } -- cgit v1.3 From 7538b1db8ec0d82a623847fe5987f1988fe16448 Mon Sep 17 00:00:00 2001 From: Robert Griesemer Date: Fri, 15 Apr 2016 14:14:04 -0700 Subject: cmd/compile: switch to compact export format by default builtin.go was auto-generated via go generate; all other changes were manual. The new format reduces the export data size by ~65% on average for the std library packages (and there is still quite a bit of room for improvement). The average time to write export data is reduced by (at least) 62% as measured in one run over the std lib, it is likely more. The average time to read import data is reduced by (at least) 37% as measured in one run over the std lib, it is likely more. There is also room to improve this time. The compiler transparently handles both packages using the old and the new format. Comparing the -S output of the go build for each package via the cmp.bash script (added) shows identical assembly code for all packages, but 6 files show file:line differences: The following files have differences because they use cgo and cgo uses different temp. directories for different builds. Harmless. src/crypto/x509 src/net src/os/user src/runtime/cgo The following files have file:line differences that are not yet fully explained; however the differences exist w/ and w/o new export format (pre-existing condition). See issue #15453. src/go/internal/gccgoimporter src/go/internal/gcimporter In summary, switching to the new export format produces the same package files as before for all practical purposes. How can you tell which one you have (if you care): Open a package (.a) file in an editor. Textual export data starts with a $$ after the header and is more or less legible; binary export data starts with a $$B after the header and is mostly unreadable. A stand-alone decoder (for debugging) is in the works. In case of a problem, please first try reverting back to the old textual format to determine if the cause is the new export format: For a stand-alone compiler invocation: - go tool compile -newexport=0 For a single package: - go build -gcflags="-newexport=0" For make/all.bash: - (export GO_GCFLAGS="-newexport=0"; sh make.bash) Fixes #13241. Change-Id: I2588cb463be80af22446bf80c225e92ab79878b8 Reviewed-on: https://go-review.googlesource.com/22123 Reviewed-by: Brad Fitzpatrick Run-TryBot: Robert Griesemer Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/bexport.go | 3 - src/cmd/compile/internal/gc/builtin.go | 233 ++++++++++++++------------------- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/main.go | 2 +- src/cmp.bash | 61 +++++++++ 5 files changed, 164 insertions(+), 137 deletions(-) create mode 100644 src/cmp.bash (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index b3ee9b8054..5ab7fdc0e9 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -112,9 +112,6 @@ import ( // (suspected) format errors, and whenever a change is made to the format. const debugFormat = false // default: false -// TODO(gri) remove eventually -const forceNewExport = false // force new export format - do NOT submit with this flag set - // forceObjFileStability enforces additional constraints in export data // and other parts of the compiler to eliminate object file differences // only due to the choice of export format. diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index b593d11296..cc64e73f25 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -3,137 +3,106 @@ package gc const runtimeimport = "" + - "package runtime safe\n" + - "func @\"\".newobject (@\"\".typ·2 *byte) (? *any)\n" + - "func @\"\".panicindex ()\n" + - "func @\"\".panicslice ()\n" + - "func @\"\".panicdivide ()\n" + - "func @\"\".throwreturn ()\n" + - "func @\"\".throwinit ()\n" + - "func @\"\".panicwrap (? string, ? string, ? string)\n" + - "func @\"\".gopanic (? interface {})\n" + - "func @\"\".gorecover (? *int32) (? interface {})\n" + - "func @\"\".printbool (? bool)\n" + - "func @\"\".printfloat (? float64)\n" + - "func @\"\".printint (? int64)\n" + - "func @\"\".printhex (? uint64)\n" + - "func @\"\".printuint (? uint64)\n" + - "func @\"\".printcomplex (? complex128)\n" + - "func @\"\".printstring (? string)\n" + - "func @\"\".printpointer (? any)\n" + - "func @\"\".printiface (? any)\n" + - "func @\"\".printeface (? any)\n" + - "func @\"\".printslice (? any)\n" + - "func @\"\".printnl ()\n" + - "func @\"\".printsp ()\n" + - "func @\"\".printlock ()\n" + - "func @\"\".printunlock ()\n" + - "func @\"\".concatstring2 (? *[32]byte, ? string, ? string) (? string)\n" + - "func @\"\".concatstring3 (? *[32]byte, ? string, ? string, ? string) (? string)\n" + - "func @\"\".concatstring4 (? *[32]byte, ? string, ? string, ? string, ? string) (? string)\n" + - "func @\"\".concatstring5 (? *[32]byte, ? string, ? string, ? string, ? string, ? string) (? string)\n" + - "func @\"\".concatstrings (? *[32]byte, ? []string) (? string)\n" + - "func @\"\".cmpstring (? string, ? string) (? int)\n" + - "func @\"\".eqstring (? string, ? string) (? bool)\n" + - "func @\"\".intstring (? *[4]byte, ? int64) (? string)\n" + - "func @\"\".slicebytetostring (? *[32]byte, ? []byte) (? string)\n" + - "func @\"\".slicebytetostringtmp (? []byte) (? string)\n" + - "func @\"\".slicerunetostring (? *[32]byte, ? []rune) (? string)\n" + - "func @\"\".stringtoslicebyte (? *[32]byte, ? string) (? []byte)\n" + - "func @\"\".stringtoslicebytetmp (? string) (? []byte)\n" + - "func @\"\".stringtoslicerune (? *[32]rune, ? string) (? []rune)\n" + - "func @\"\".stringiter (? string, ? int) (? int)\n" + - "func @\"\".stringiter2 (? string, ? int) (@\"\".retk·1 int, @\"\".retv·2 rune)\n" + - "func @\"\".slicecopy (@\"\".to·2 any, @\"\".fr·3 any, @\"\".wid·4 uintptr \"unsafe-uintptr\") (? int)\n" + - "func @\"\".slicestringcopy (@\"\".to·2 any, @\"\".fr·3 any) (? int)\n" + - "func @\"\".convI2E (@\"\".elem·2 any) (@\"\".ret·1 any)\n" + - "func @\"\".convI2I (@\"\".typ·2 *byte, @\"\".elem·3 any) (@\"\".ret·1 any)\n" + - "func @\"\".convT2E (@\"\".typ·2 *byte, @\"\".elem·3 *any, @\"\".buf·4 *any) (@\"\".ret·1 any)\n" + - "func @\"\".convT2I (@\"\".tab·2 *byte, @\"\".elem·3 *any, @\"\".buf·4 *any) (@\"\".ret·1 any)\n" + - "func @\"\".assertE2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + - "func @\"\".assertE2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + - "func @\"\".assertE2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + - "func @\"\".assertE2I2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + - "func @\"\".assertE2T (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + - "func @\"\".assertE2T2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + - "func @\"\".assertI2E (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + - "func @\"\".assertI2E2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + - "func @\"\".assertI2I (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + - "func @\"\".assertI2I2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + - "func @\"\".assertI2T (@\"\".typ·1 *byte, @\"\".iface·2 any, @\"\".ret·3 *any)\n" + - "func @\"\".assertI2T2 (@\"\".typ·2 *byte, @\"\".iface·3 any, @\"\".ret·4 *any) (? bool)\n" + - "func @\"\".panicdottype (@\"\".have·1 *byte, @\"\".want·2 *byte, @\"\".iface·3 *byte)\n" + - "func @\"\".ifaceeq (@\"\".i1·2 any, @\"\".i2·3 any) (@\"\".ret·1 bool)\n" + - "func @\"\".efaceeq (@\"\".i1·2 any, @\"\".i2·3 any) (@\"\".ret·1 bool)\n" + - "func @\"\".makemap (@\"\".mapType·2 *byte, @\"\".hint·3 int64, @\"\".mapbuf·4 *any, @\"\".bucketbuf·5 *any) (@\"\".hmap·1 map[any]any)\n" + - "func @\"\".mapaccess1 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 *any) (@\"\".val·1 *any)\n" + - "func @\"\".mapaccess1_fast32 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + - "func @\"\".mapaccess1_fast64 (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + - "func @\"\".mapaccess1_faststr (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 any) (@\"\".val·1 *any)\n" + - "func @\"\".mapaccess1_fat (@\"\".mapType·2 *byte, @\"\".hmap·3 map[any]any, @\"\".key·4 *any, @\"\".zero·5 *byte) (@\"\".val·1 *any)\n" + - "func @\"\".mapaccess2 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 *any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + - "func @\"\".mapaccess2_fast32 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + - "func @\"\".mapaccess2_fast64 (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + - "func @\"\".mapaccess2_faststr (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 any) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + - "func @\"\".mapaccess2_fat (@\"\".mapType·3 *byte, @\"\".hmap·4 map[any]any, @\"\".key·5 *any, @\"\".zero·6 *byte) (@\"\".val·1 *any, @\"\".pres·2 bool)\n" + - "func @\"\".mapassign1 (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any, @\"\".val·4 *any)\n" + - "func @\"\".mapiterinit (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".hiter·3 *any)\n" + - "func @\"\".mapdelete (@\"\".mapType·1 *byte, @\"\".hmap·2 map[any]any, @\"\".key·3 *any)\n" + - "func @\"\".mapiternext (@\"\".hiter·1 *any)\n" + - "func @\"\".makechan (@\"\".chanType·2 *byte, @\"\".hint·3 int64) (@\"\".hchan·1 chan any)\n" + - "func @\"\".chanrecv1 (@\"\".chanType·1 *byte, @\"\".hchan·2 <-chan any, @\"\".elem·3 *any)\n" + - "func @\"\".chanrecv2 (@\"\".chanType·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (? bool)\n" + - "func @\"\".chansend1 (@\"\".chanType·1 *byte, @\"\".hchan·2 chan<- any, @\"\".elem·3 *any)\n" + - "func @\"\".closechan (@\"\".hchan·1 any)\n" + - "var @\"\".writeBarrier struct { @\"\".enabled bool; @\"\".needed bool; @\"\".cgo bool }\n" + - "func @\"\".writebarrierptr (@\"\".dst·1 *any, @\"\".src·2 any)\n" + - "func @\"\".typedmemmove (@\"\".typ·1 *byte, @\"\".dst·2 *any, @\"\".src·3 *any)\n" + - "func @\"\".typedslicecopy (@\"\".typ·2 *byte, @\"\".dst·3 any, @\"\".src·4 any) (? int)\n" + - "func @\"\".selectnbsend (@\"\".chanType·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (? bool)\n" + - "func @\"\".selectnbrecv (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".hchan·4 <-chan any) (? bool)\n" + - "func @\"\".selectnbrecv2 (@\"\".chanType·2 *byte, @\"\".elem·3 *any, @\"\".received·4 *bool, @\"\".hchan·5 <-chan any) (? bool)\n" + - "func @\"\".newselect (@\"\".sel·1 *byte, @\"\".selsize·2 int64, @\"\".size·3 int32)\n" + - "func @\"\".selectsend (@\"\".sel·2 *byte, @\"\".hchan·3 chan<- any, @\"\".elem·4 *any) (@\"\".selected·1 bool)\n" + - "func @\"\".selectrecv (@\"\".sel·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any) (@\"\".selected·1 bool)\n" + - "func @\"\".selectrecv2 (@\"\".sel·2 *byte, @\"\".hchan·3 <-chan any, @\"\".elem·4 *any, @\"\".received·5 *bool) (@\"\".selected·1 bool)\n" + - "func @\"\".selectdefault (@\"\".sel·2 *byte) (@\"\".selected·1 bool)\n" + - "func @\"\".selectgo (@\"\".sel·1 *byte)\n" + - "func @\"\".block ()\n" + - "func @\"\".makeslice (@\"\".typ·2 *byte, @\"\".nel·3 int64, @\"\".cap·4 int64) (@\"\".ary·1 []any)\n" + - "func @\"\".growslice (@\"\".typ·2 *byte, @\"\".old·3 []any, @\"\".cap·4 int) (@\"\".ary·1 []any)\n" + - "func @\"\".memmove (@\"\".to·1 *any, @\"\".frm·2 *any, @\"\".length·3 uintptr \"unsafe-uintptr\")\n" + - "func @\"\".memclr (@\"\".ptr·1 *byte, @\"\".length·2 uintptr \"unsafe-uintptr\")\n" + - "func @\"\".memequal (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr \"unsafe-uintptr\") (? bool)\n" + - "func @\"\".memequal8 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + - "func @\"\".memequal16 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + - "func @\"\".memequal32 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + - "func @\"\".memequal64 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + - "func @\"\".memequal128 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n" + - "func @\"\".int64div (? int64, ? int64) (? int64)\n" + - "func @\"\".uint64div (? uint64, ? uint64) (? uint64)\n" + - "func @\"\".int64mod (? int64, ? int64) (? int64)\n" + - "func @\"\".uint64mod (? uint64, ? uint64) (? uint64)\n" + - "func @\"\".float64toint64 (? float64) (? int64)\n" + - "func @\"\".float64touint64 (? float64) (? uint64)\n" + - "func @\"\".int64tofloat64 (? int64) (? float64)\n" + - "func @\"\".uint64tofloat64 (? uint64) (? float64)\n" + - "func @\"\".complex128div (@\"\".num·2 complex128, @\"\".den·3 complex128) (@\"\".quo·1 complex128)\n" + - "func @\"\".racefuncenter (? uintptr \"unsafe-uintptr\")\n" + - "func @\"\".racefuncexit ()\n" + - "func @\"\".raceread (? uintptr \"unsafe-uintptr\")\n" + - "func @\"\".racewrite (? uintptr \"unsafe-uintptr\")\n" + - "func @\"\".racereadrange (@\"\".addr·1 uintptr \"unsafe-uintptr\", @\"\".size·2 uintptr \"unsafe-uintptr\")\n" + - "func @\"\".racewriterange (@\"\".addr·1 uintptr \"unsafe-uintptr\", @\"\".size·2 uintptr \"unsafe-uintptr\")\n" + - "func @\"\".msanread (@\"\".addr·1 uintptr \"unsafe-uintptr\", @\"\".size·2 uintptr \"unsafe-uintptr\")\n" + - "func @\"\".msanwrite (@\"\".addr·1 uintptr \"unsafe-uintptr\", @\"\".size·2 uintptr \"unsafe-uintptr\")\n" + - "\n" + - "$$\n" + "c\x00\x03v0\x01\rruntime\x00\t\x11newobject\x00\x02\x17\"\vtyp·2\x00\x00\x01" + + "\x17:\x00\t\x13panicindex\x00\x00\x00\t\x13panicslice\x00\x00\x00\t\x15panic" + + "divide\x00\x00\x00\t\x15throwreturn\x00\x00\x00\t\x11throwinit\x00\x00\x00\t" + + "\x11panicwrap\x00\x05 \x00 \x00 \x00\x00\t\rgopanic\x00\x01\x1b\x00\x00\x00\x00\t\x11gor" + + "ecover\x00\x01\x17\b\x00\x01\x1b\x00\x00\x00\t\x11printbool\x00\x01\x00\x00\x00\t\x13printf" + + "loat\x00\x01\x1a\x00\x00\t\x0fprintint\x00\x01\n\x00\x00\t\x0fprinthex\x00\x01\x14\x00\x00\t" + + "\x11printuint\x00\x01\x14\x00\x00\t\x17printcomplex\x00\x01\x1e\x00\x00\t\x15prin" + + "tstring\x00\x01 \x00\x00\t\x17printpointer\x00\x01:\x00\x00\t\x13printif" + + "ace\x00\x01:\x00\x00\t\x13printeface\x00\x01:\x00\x00\t\x13printslice\x00\x01:" + + "\x00\x00\t\rprintnl\x00\x00\x00\t\rprintsp\x00\x00\x00\t\x11printlock\x00\x00\x00" + + "\t\x15printunlock\x00\x00\x00\t\x19concatstring2\x00\x05\x17\x0f@\"\x00 \x00" + + " \x00\x01 \x00\t\x19concatstring3\x00\a\x17\x0f@\"\x00 \x00 \x00 \x00\x01 \x00\t\x19co" + + "ncatstring4\x00\t\x17\x0f@\"\x00 \x00 \x00 \x00 \x00\x01 \x00\t\x19concatstr" + + "ing5\x00\v\x17\x0f@\"\x00 \x00 \x00 \x00 \x00 \x00\x01 \x00\t\x19concatstrings\x00" + + "\x03\x17\x0f@\"\x00\x11 \x00\x01 \x00\t\x11cmpstring\x00\x03 \x00 \x00\x01\x02\x00\t\x0feqstri" + + "ng\x00\x03 \x00 \x00\x01\x00\x00\t\x11intstring\x00\x03\x17\x0f\b\"\x00\n\x00\x01 \x00\t!slic" + + "ebytetostring\x00\x03\x17\x0f@\"\x00\x11\"\x00\x01 \x00\t'slicebytetos" + + "tringtmp\x00\x01\x11\"\x00\x01 \x00\t!slicerunetostring\x00\x03\x17\x0f@" + + "\"\x00\x11|S\x00\x01 \x00\t!stringtoslicebyte\x00\x03\x17\x0f@\"\x00 \x00\x01\x11\"" + + "\x00\t'stringtoslicebytetmp\x00\x01 \x00\x01\x11\"\x00\t!stringt" + + "oslicerune\x00\x03\x17\x0f@|S\x00 \x00\x01\x11|S\x00\t\x13stringiter\x00\x03 " + + "\x00\x02\x00\x01\x02\x00\t\x15stringiter2\x00\x03 \x00\x02\x00\x04\x02\rretk·1\x00\x00|S\r" + + "retv·2\x00\x00\t\x11slicecopy\x00\x06:\tto·2\x00\x00:\tfr·3\x00\x00" + + "\x16\vwid·4\x00\x1bunsafe-uintptr\x01\x02\x00\t\x1dslicestring" + + "copy\x00\x04:^\x00\x00:`\x00\x00\x01\x02\x00\t\rconvI2E\x00\x02:\relem·2\x00\x00\x02" + + ":\vret·1\x00\x00\t\rconvI2I\x00\x04\x17\"\b\x00\x00:\relem·3\x00\x00\x02:l" + + "\x00\x00\t\rconvT2E\x00\x06\x17\"\b\x00\x00>p\x00\x00>\vbuf·4\x00\x00\x02:l\x00\x00\t\rc" + + "onvT2I\x00\x06\x17\"\vtab·2\x00\x00>p\x00\x00>t\x00\x00\x02:l\x00\x00\t\x11assert" + + "E2E\x00\x06\x17\"\vtyp·1\x00\x00:\x0fiface·2\x00\x00>\vret·3\x00\x00\x00\t" + + "\x13assertE2E2\x00\x06\x17\"\b\x00\x00:\x0fiface·3\x00\x00>\vret·4\x00\x00" + + "\x01\x00\x00\t\x11assertE2I\x00\x06\x17\"||\x00\x00:~\x00\x00>\x80\x01\x00\x00\x00\t\x13assert" + + "E2I2\x00\x06\x17\"\b\x00\x00:\x84\x01\x00\x00>\x86\x01\x00\x00\x01\x00\x00\t\x11assertE2T\x00\x06\x17\"|" + + "|\x00\x00:~\x00\x00>\x80\x01\x00\x00\x00\t\x13assertE2T2\x00\x06\x17\"\b\x00\x00:\x84\x01\x00\x00>\x86\x01" + + "\x00\x00\x01\x00\x00\t\x11assertI2E\x00\x06\x17\"||\x00\x00:~\x00\x00>\x80\x01\x00\x00\x00\t\x13asse" + + "rtI2E2\x00\x06\x17\"\b\x00\x00:\x84\x01\x00\x00>\x86\x01\x00\x00\x01\x00\x00\t\x11assertI2I\x00\x06\x17" + + "\"||\x00\x00:~\x00\x00>\x80\x01\x00\x00\x00\t\x13assertI2I2\x00\x06\x17\"\b\x00\x00:\x84\x01\x00\x00>" + + "\x86\x01\x00\x00\x01\x00\x00\t\x11assertI2T\x00\x06\x17\"||\x00\x00:~\x00\x00>\x80\x01\x00\x00\x00\t\x13as" + + "sertI2T2\x00\x06\x17\"\b\x00\x00:\x84\x01\x00\x00>\x86\x01\x00\x00\x01\x00\x00\t\x17panicdotty" + + "pe\x00\x06\x17\"\rhave·1\x00\x00\x9a\x01\rwant·2\x00\x00\x9a\x01\x84\x01\x00\x00\x00\t\rifa" + + "ceeq\x00\x04:\ti1·2\x00\x00:\ti2·3\x00\x00\x02\x00l\x00\x00\t\refaceeq\x00\x04" + + ":\xa4\x01\x00\x00:\xa6\x01\x00\x00\x02\x00l\x00\x00\t\rmakemap\x00\b\x17\"\x13mapType·2\x00" + + "\x00\n\rhint·3\x00\x00>\x11mapbuf·4\x00\x00>\x17bucketbuf·5\x00" + + "\x00\x02\x1d::\rhmap·1\x00\x00\t\x13mapaccess1\x00\x06\x17\"\xac\x01\x00\x00\x1d::\rh" + + "map·3\x00\x00>\vkey·4\x00\x00\x02>\vval·1\x00\x00\t!mapaccess" + + "1_fast32\x00\x06\x17\"\xac\x01\x00\x00\x1d::\xb8\x01\x00\x00:\xba\x01\x00\x00\x02>\xbc\x01\x00\x00\t!mapa" + + "ccess1_fast64\x00\x06\x17\"\xac\x01\x00\x00\x1d::\xb8\x01\x00\x00:\xba\x01\x00\x00\x02>\xbc\x01\x00\x00\t" + + "#mapaccess1_faststr\x00\x06\x17\"\xac\x01\x00\x00\x1d::\xb8\x01\x00\x00:\xba\x01\x00\x00\x02" + + ">\xbc\x01\x00\x00\t\x1bmapaccess1_fat\x00\b\x17\"\xac\x01\x00\x00\x1d::\xb8\x01\x00\x00>\xba\x01\x00" + + "\x00\x17\"\rzero·5\x00\x00\x02>\xbc\x01\x00\x00\t\x13mapaccess2\x00\x06\x17\"\x13mapT" + + "ype·3\x00\x00\x1d::\rhmap·4\x00\x00>\vkey·5\x00\x00\x04>\xbc\x01\x00\x00\x00\rp" + + "res·2\x00\x00\t!mapaccess2_fast32\x00\x06\x17\"\xca\x01\x00\x00\x1d::\xcc\x01" + + "\x00\x00:\xce\x01\x00\x00\x04>\xbc\x01\x00\x00\x00\xd0\x01\x00\x00\t!mapaccess2_fast64\x00\x06\x17" + + "\"\xca\x01\x00\x00\x1d::\xcc\x01\x00\x00:\xce\x01\x00\x00\x04>\xbc\x01\x00\x00\x00\xd0\x01\x00\x00\t#mapaccess2" + + "_faststr\x00\x06\x17\"\xca\x01\x00\x00\x1d::\xcc\x01\x00\x00:\xce\x01\x00\x00\x04>\xbc\x01\x00\x00\x00\xd0\x01\x00\x00\t" + + "\x1bmapaccess2_fat\x00\b\x17\"\xca\x01\x00\x00\x1d::\xcc\x01\x00\x00>\xce\x01\x00\x00\x17\"\rze" + + "ro·6\x00\x00\x04>\xbc\x01\x00\x00\x00\xd0\x01\x00\x00\t\x13mapassign1\x00\b\x17\"\x13mapTy" + + "pe·1\x00\x00\x1d::\rhmap·2\x00\x00>\vkey·3\x00\x00>\vval·4\x00\x00" + + "\x00\t\x15mapiterinit\x00\x06\x17\"\xde\x01\x00\x00\x1d::\xe0\x01\x00\x00>\x0fhiter·3\x00" + + "\x00\x00\t\x11mapdelete\x00\x06\x17\"\xde\x01\x00\x00\x1d::\xe0\x01\x00\x00>\xe2\x01\x00\x00\x00\t\x15mapi" + + "ternext\x00\x02>\x0fhiter·1\x00\x00\x00\t\x0fmakechan\x00\x04\x17\"\x15cha" + + "nType·2\x00\x00\n\xae\x01\x00\x00\x02\x1f\x06:\x0fhchan·1\x00\x00\t\x11chanrecv" + + "1\x00\x06\x17\"\x15chanType·1\x00\x00\x1f\x02:\x0fhchan·2\x00\x00>p\x00\x00\x00\t\x11" + + "chanrecv2\x00\x06\x17\"\xf2\x01\x00\x00\x1f\x02:\x0fhchan·3\x00\x00>\relem·4" + + "\x00\x00\x01\x00\x00\t\x11chansend1\x00\x06\x17\"\xf8\x01\x00\x00\x1f\x04:\xfa\x01\x00\x00>p\x00\x00\x00\t\x11cl" + + "osechan\x00\x02:\xf4\x01\x00\x00\x00\a\x17writeBarrier\x00\x15\x06\renabled" + + "\x00\x00\x00\vneeded\x00\x00\x00\x05cgo\x00\x00\x00\t\x1dwritebarrierptr\x00\x04>" + + "\vdst·1\x00\x00:\vsrc·2\x00\x00\x00\t\x17typedmemmove\x00\x06\x17\"||" + + "\x00\x00>\vdst·2\x00\x00>\vsrc·3\x00\x00\x00\t\x1btypedslicecopy\x00" + + "\x06\x17\"\b\x00\x00:\vdst·3\x00\x00:\vsrc·4\x00\x00\x01\x02\x00\t\x17selectnbs" + + "end\x00\x06\x17\"\xf2\x01\x00\x00\x1f\x04:\xfe\x01\x00\x00>\x80\x02\x00\x00\x01\x00\x00\t\x17selectnbrecv" + + "\x00\x06\x17\"\xf2\x01\x00\x00>p\x00\x00\x1f\x02:\x0fhchan·4\x00\x00\x01\x00\x00\t\x19selectnbr" + + "ecv2\x00\b\x17\"\xf2\x01\x00\x00>p\x00\x00\x17\x00\x15received·4\x00\x00\x1f\x02:\x0fhcha" + + "n·5\x00\x00\x01\x00\x00\t\x11newselect\x00\x06\x17\"\vsel·1\x00\x00\n\x13selsi" + + "ze·2\x00\x00\b\rsize·3\x00\x00\x00\t\x13selectsend\x00\x06\x17\"\vsel\xc2" + + "\xb72\x00\x00\x1f\x04:\xfe\x01\x00\x00>\x80\x02\x00\x00\x02\x00\x15selected·1\x00\x00\t\x13select" + + "recv\x00\x06\x17\"\xb6\x02\x00\x00\x1f\x02:\xfe\x01\x00\x00>\x80\x02\x00\x00\x02\x00\xb8\x02\x00\x00\t\x15selectre" + + "cv2\x00\b\x17\"\xb6\x02\x00\x00\x1f\x02:\xfe\x01\x00\x00>\x80\x02\x00\x00\xf8\x01\x15received·5\x00\x00\x02" + + "\x00\xb8\x02\x00\x00\t\x19selectdefault\x00\x02\x17\"\xb6\x02\x00\x00\x02\x00\xb8\x02\x00\x00\t\x0fsele" + + "ctgo\x00\x02\x17\"\xae\x02\x00\x00\x00\t\tblock\x00\x00\x00\t\x11makeslice\x00\x06\x17\"\b\x00" + + "\x00\n\vnel·3\x00\x00\n\vcap·4\x00\x00\x02\x11:\vary·1\x00\x00\t\x11grows" + + "lice\x00\x06\x17\"\b\x00\x00\x11:\vold·3\x00\x00\x02\xca\x02\x00\x00\x02\x11:\xcc\x02\x00\x00\t\rmemm" + + "ove\x00\x06>\tto·1\x00\x00>\vfrm·2\x00\x00\x16\x11length·3\x00d\x00\t\v" + + "memclr\x00\x04\x17\"\vptr·1\x00\x00\x16\x11length·2\x00d\x00\t\x0fmemeq" + + "ual\x00\x06>\ax·2\x00\x00>\ay·3\x00\x00\x16\rsize·4\x00d\x01\x00\x00\t\x11mem" + + "equal8\x00\x04>\xe2\x02\x00\x00>\xe4\x02\x00\x00\x01\x00\x00\t\x13memequal16\x00\x04>\xe2\x02\x00\x00" + + ">\xe4\x02\x00\x00\x01\x00\x00\t\x13memequal32\x00\x04>\xe2\x02\x00\x00>\xe4\x02\x00\x00\x01\x00\x00\t\x13mem" + + "equal64\x00\x04>\xe2\x02\x00\x00>\xe4\x02\x00\x00\x01\x00\x00\t\x15memequal128\x00\x04>\xe2\x02" + + "\x00\x00>\xe4\x02\x00\x00\x01\x00\x00\t\x0fint64div\x00\x03\n\x00\n\x00\x01\n\x00\t\x11uint64div" + + "\x00\x03\x14\x00\x14\x00\x01\x14\x00\t\x0fint64mod\x00\x03\n\x00\n\x00\x01\n\x00\t\x11uint64mod\x00" + + "\x03\x14\x00\x14\x00\x01\x14\x00\t\x1bfloat64toint64\x00\x01\x1a\x00\x01\n\x00\t\x1dfloat64" + + "touint64\x00\x01\x1a\x00\x01\x14\x00\t\x1bint64tofloat64\x00\x01\n\x00\x01\x1a\x00\t\x1d" + + "uint64tofloat64\x00\x01\x14\x00\x01\x1a\x00\t\x19complex128div\x00\x04\x1e" + + "\vnum·2\x00\x00\x1e\vden·3\x00\x00\x02\x1e\vquo·1\x00\x00\t\x19racefunc" + + "enter\x00\x01\x16d\x00\t\x17racefuncexit\x00\x00\x00\t\x0fraceread\x00\x01\x16" + + "d\x00\t\x11racewrite\x00\x01\x16d\x00\t\x19racereadrange\x00\x04\x16\radd" + + "r·1\x00d\x16\rsize·2\x00d\x00\t\x1bracewriterange\x00\x04\x16\x94\x03\x00" + + "d\x16\x96\x03\x00d\x00\t\x0fmsanread\x00\x04\x16\x94\x03\x00d\x16\x96\x03\x00d\x00\t\x11msanwrit" + + "e\x00\x04\x16\x94\x03\x00d\x16\x96\x03\x00d\x00\v\xf4\x01\x02\v\x00\x01\x00\n$$\n" const unsafeimport = "" + - "package unsafe\n" + - "type @\"\".Pointer uintptr\n" + - "func @\"\".Offsetof (? any) (? uintptr)\n" + - "func @\"\".Sizeof (? any) (? uintptr)\n" + - "func @\"\".Alignof (? any) (? uintptr)\n" + - "\n" + - "$$\n" + "c\x00\x03v0\x01\vunsafe\x00\x05\r\rPointer\x00\x16\x00\t\x0fOffsetof\x00\x01:" + + "\x00\x01\x16\x00\t\vSizeof\x00\x01:\x00\x01\x16\x00\t\rAlignof\x00\x01:\x00\x01\x16\x00\v\b\x00\v\x00" + + "\x01\x00\n$$\n" diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index b6280ab30b..9bb01056e4 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -377,7 +377,7 @@ func dumpexport() { } size := 0 // size of export section without enclosing markers - if forceNewExport || newexport { + if newexport { // binary export // The linker also looks for the $$ marker - use char after $$ to distinguish format. exportf("\n$$B\n") // indicate binary format diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index c3a0481ffd..54211e4892 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -181,7 +181,7 @@ func Main() { obj.Flagcount("live", "debug liveness analysis", &debuglive) obj.Flagcount("m", "print optimization decisions", &Debug['m']) flag.BoolVar(&flag_msan, "msan", false, "build code compatible with C/C++ memory sanitizer") - flag.BoolVar(&newexport, "newexport", false, "use new export format") // TODO(gri) remove eventually (issue 13241) + flag.BoolVar(&newexport, "newexport", true, "use new export format") // TODO(gri) remove eventually (issue 15323) flag.BoolVar(&nolocalimports, "nolocalimports", false, "reject local (relative) imports") flag.StringVar(&outfile, "o", "", "write output to `file`") flag.StringVar(&myimportpath, "p", "", "set expected package import `path`") diff --git a/src/cmp.bash b/src/cmp.bash new file mode 100644 index 0000000000..68086c31f2 --- /dev/null +++ b/src/cmp.bash @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# A simple script to compare differences between +# assembly listings for packages built with different +# compiler flags. It is useful to inspect the impact +# of a compiler change across all std lib packages. +# +# The script builds the std library (make.bash) once +# with FLAGS1 and once with FLAGS2 and compares the +# "go build " assembly output for each package +# and lists the packages with differences. +# +# It leaves and old.txt and new.txt file in the package +# directories for the packages with differences. + +FLAGS1="-newexport=0" +FLAGS2="-newexport=1" + +echo +echo +echo "1a) clean build using $FLAGS1" +(export GO_GCFLAGS="$FLAGS1"; sh make.bash) + +echo +echo +echo "1b) save go build output for all packages" +for pkg in `go list std`; do + echo $pkg + DIR=$GOROOT/src/$pkg + go build -gcflags "$FLAGS1 -S" -o /dev/null $pkg &> $DIR/old.txt +done + +echo +echo +echo "2a) clean build using $FLAGS2" +(export GO_GCFLAGS="$FLAGS2"; sh make.bash) + +echo +echo +echo "2b) save go build output for all packages" +for pkg in `go list std`; do + echo $pkg + DIR=$GOROOT/src/$pkg + go build -gcflags "$FLAGS2 -S" -o /dev/null $pkg &> $DIR/new.txt +done + +echo +echo +echo "3) compare assembly files" +for pkg in `go list std`; do + DIR=$GOROOT/src/$pkg + + if cmp $DIR/old.txt $DIR/new.txt &> /dev/null + then rm $DIR/old.txt $DIR/new.txt + else echo "==> $DIR" + fi +done -- cgit v1.3 From 74a9bad63899ffb02b747678c2c181ffb13983b9 Mon Sep 17 00:00:00 2001 From: Zhongwei Yao Date: Mon, 25 Apr 2016 11:08:38 +0800 Subject: cmd/compile: enable const division for arm64 performance: benchmark old ns/op new ns/op delta BenchmarkDivconstI64-8 8.28 2.70 -67.39% BenchmarkDivconstU64-8 8.28 4.69 -43.36% BenchmarkDivconstI32-8 8.28 6.39 -22.83% BenchmarkDivconstU32-8 8.28 4.43 -46.50% BenchmarkDivconstI16-8 5.17 5.17 +0.00% BenchmarkDivconstU16-8 5.33 5.34 +0.19% BenchmarkDivconstI8-8 3.50 3.50 +0.00% BenchmarkDivconstU8-8 3.51 3.50 -0.28% Fixes #15382 Change-Id: Ibce7b28f0586d593b33c4d4ecc5d5e7e7c905d13 Reviewed-on: https://go-review.googlesource.com/22292 Reviewed-by: Michael Munday Reviewed-by: David Chase --- src/cmd/compile/internal/arm64/galign.go | 2 + src/cmd/compile/internal/arm64/ggen.go | 47 ++++++++++++++++++++ src/cmd/compile/internal/arm64/gsubr.go | 12 ------ src/cmd/compile/internal/arm64/peep.go | 3 ++ src/cmd/compile/internal/arm64/prog.go | 3 ++ src/cmd/compile/internal/gc/cgen.go | 49 +++++++++++++++++---- src/cmd/compile/internal/gc/go.go | 36 ++++++++-------- src/cmd/compile/internal/gc/walk.go | 12 +++++- src/cmd/internal/obj/arm64/asm7.go | 4 ++ src/runtime/vlrt.go | 1 - test/bench/go1/divconst_test.go | 73 ++++++++++++++++++++++++++++++++ 11 files changed, 202 insertions(+), 40 deletions(-) create mode 100644 test/bench/go1/divconst_test.go (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go index 17c851cb14..7acc4e08eb 100644 --- a/src/cmd/compile/internal/arm64/galign.go +++ b/src/cmd/compile/internal/arm64/galign.go @@ -29,6 +29,8 @@ func Main() { gc.Thearch.Betypeinit = betypeinit gc.Thearch.Cgen_hmul = cgen_hmul + gc.Thearch.AddSetCarry = AddSetCarry + gc.Thearch.RightShiftWithCarry = RightShiftWithCarry gc.Thearch.Cgen_shift = cgen_shift gc.Thearch.Clearfat = clearfat gc.Thearch.Defframe = defframe diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go index 9abd901d7a..bddfed631a 100644 --- a/src/cmd/compile/internal/arm64/ggen.go +++ b/src/cmd/compile/internal/arm64/ggen.go @@ -252,6 +252,53 @@ func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) { } } +// RightShiftWithCarry generates a constant unsigned +// right shift with carry. +// +// res = n >> shift // with carry +func RightShiftWithCarry(n *gc.Node, shift uint, res *gc.Node) { + // Extra 1 is for carry bit. + maxshift := uint(n.Type.Width*8 + 1) + if shift == 0 { + gmove(n, res) + } else if shift < maxshift { + // 1. clear rightmost bit of target + var n1 gc.Node + gc.Nodconst(&n1, n.Type, 1) + gins(optoas(gc.ORSH, n.Type), &n1, n) + gins(optoas(gc.OLSH, n.Type), &n1, n) + // 2. add carry flag to target + var n2 gc.Node + gc.Nodconst(&n1, n.Type, 0) + gc.Regalloc(&n2, n.Type, nil) + gins(optoas(gc.OAS, n.Type), &n1, &n2) + gins(arm64.AADC, &n2, n) + // 3. right rotate 1 bit + gc.Nodconst(&n1, n.Type, 1) + gins(arm64.AROR, &n1, n) + + // ARM64 backend doesn't eliminate shifts by 0. It is manually checked here. + if shift > 1 { + var n3 gc.Node + gc.Nodconst(&n3, n.Type, int64(shift-1)) + cgen_shift(gc.ORSH, true, n, &n3, res) + } else { + gmove(n, res) + } + gc.Regfree(&n2) + } else { + gc.Fatalf("RightShiftWithCarry: shift(%v) is bigger than max size(%v)", shift, maxshift) + } +} + +// AddSetCarry generates add and set carry. +// +// res = nl + nr // with carry flag set +func AddSetCarry(nl *gc.Node, nr *gc.Node, res *gc.Node) { + gins(arm64.AADDS, nl, nr) + gmove(nr, res) +} + /* * generate high multiply: * res = (nl*nr) >> width diff --git a/src/cmd/compile/internal/arm64/gsubr.go b/src/cmd/compile/internal/arm64/gsubr.go index efa66a09d3..f193291d01 100644 --- a/src/cmd/compile/internal/arm64/gsubr.go +++ b/src/cmd/compile/internal/arm64/gsubr.go @@ -890,18 +890,6 @@ func optoas(op gc.Op, t *gc.Type) obj.As { ORSH_ | gc.TINT64: a = arm64.AASR - // TODO(minux): handle rotates - //case CASE(ORROTC, TINT8): - //case CASE(ORROTC, TUINT8): - //case CASE(ORROTC, TINT16): - //case CASE(ORROTC, TUINT16): - //case CASE(ORROTC, TINT32): - //case CASE(ORROTC, TUINT32): - //case CASE(ORROTC, TINT64): - //case CASE(ORROTC, TUINT64): - // a = 0//??? RLDC?? - // break; - case OHMUL_ | gc.TINT64: a = arm64.ASMULH diff --git a/src/cmd/compile/internal/arm64/peep.go b/src/cmd/compile/internal/arm64/peep.go index 887353c889..22be1afebc 100644 --- a/src/cmd/compile/internal/arm64/peep.go +++ b/src/cmd/compile/internal/arm64/peep.go @@ -534,10 +534,13 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int { return 0 case arm64.AADD, /* read p->from, read p->reg, write p->to */ + arm64.AADDS, arm64.ASUB, + arm64.AADC, arm64.AAND, arm64.AORR, arm64.AEOR, + arm64.AROR, arm64.AMUL, arm64.ASMULL, arm64.AUMULL, diff --git a/src/cmd/compile/internal/arm64/prog.go b/src/cmd/compile/internal/arm64/prog.go index 3091c4a840..d504d0f0ee 100644 --- a/src/cmd/compile/internal/arm64/prog.go +++ b/src/cmd/compile/internal/arm64/prog.go @@ -59,6 +59,9 @@ var progtable = [arm64.ALAST & obj.AMask]obj.ProgInfo{ arm64.ALSR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, arm64.AASR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, arm64.ACMP & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead}, + arm64.AADC & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite | gc.UseCarry}, + arm64.AROR & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite}, + arm64.AADDS & obj.AMask: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite | gc.SetCarry}, // Floating point. arm64.AFADDD & obj.AMask: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite}, diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index bb7487c958..8db752ec51 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -2642,9 +2642,9 @@ func cgen_ret(n *Node) { // signed and unsigned high multiplication (OHMUL). func hasHMUL64() bool { switch Ctxt.Arch.Family { - case sys.AMD64, sys.S390X: + case sys.AMD64, sys.S390X, sys.ARM64: return true - case sys.ARM, sys.ARM64, sys.I386, sys.MIPS64, sys.PPC64: + case sys.ARM, sys.I386, sys.MIPS64, sys.PPC64: return false } Fatalf("unknown architecture") @@ -2664,6 +2664,28 @@ func hasRROTC64() bool { return false } +func hasRightShiftWithCarry() bool { + switch Ctxt.Arch.Family { + case sys.ARM64: + return true + case sys.AMD64, sys.ARM, sys.I386, sys.MIPS64, sys.PPC64, sys.S390X: + return false + } + Fatalf("unknown architecture") + return false +} + +func hasAddSetCarry() bool { + switch Ctxt.Arch.Family { + case sys.ARM64: + return true + case sys.AMD64, sys.ARM, sys.I386, sys.MIPS64, sys.PPC64, sys.S390X: + return false + } + Fatalf("unknown architecture") + return false +} + // generate division according to op, one of: // res = nl / nr // res = nl % nr @@ -2699,8 +2721,9 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { // the MSB. For now this needs the RROTC instruction. // TODO(mundaym): Hacker's Delight 2nd ed. chapter 10 proposes // an alternative sequence of instructions for architectures - // that do not have a shift right with carry instruction. - if m.Ua != 0 && !hasRROTC64() { + // (TODO: MIPS64, PPC64, S390X) that do not have a shift + // right with carry instruction. + if m.Ua != 0 && !hasRROTC64() && !hasRightShiftWithCarry() { goto longdiv } if op == OMOD { @@ -2717,12 +2740,20 @@ func cgen_div(op Op, nl *Node, nr *Node, res *Node) { if m.Ua != 0 { // Need to add numerator accounting for overflow. - Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3) + if hasAddSetCarry() { + Thearch.AddSetCarry(&n1, &n3, &n3) + } else { + Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3) + } - Nodconst(&n2, nl.Type, 1) - Thearch.Gins(Thearch.Optoas(ORROTC, nl.Type), &n2, &n3) - Nodconst(&n2, nl.Type, int64(m.S)-1) - Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) + if !hasRROTC64() { + Thearch.RightShiftWithCarry(&n3, uint(m.S), &n3) + } else { + Nodconst(&n2, nl.Type, 1) + Thearch.Gins(Thearch.Optoas(ORROTC, nl.Type), &n2, &n3) + Nodconst(&n2, nl.Type, int64(m.S)-1) + Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) + } } else { Nodconst(&n2, nl.Type, int64(m.S)) Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift dx diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index 87b6121c8e..f9a372dcce 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -378,23 +378,25 @@ type Arch struct { MAXWIDTH int64 ReservedRegs []int - AddIndex func(*Node, int64, *Node) bool // optional - Betypeinit func() - Bgen_float func(*Node, bool, int, *obj.Prog) // optional - Cgen64 func(*Node, *Node) // only on 32-bit systems - Cgenindex func(*Node, *Node, bool) *obj.Prog - Cgen_bmul func(Op, *Node, *Node, *Node) bool - Cgen_float func(*Node, *Node) // optional - Cgen_hmul func(*Node, *Node, *Node) - Cgen_shift func(Op, bool, *Node, *Node, *Node) - Clearfat func(*Node) - Cmp64 func(*Node, *Node, Op, int, *obj.Prog) // only on 32-bit systems - Defframe func(*obj.Prog) - Dodiv func(Op, *Node, *Node, *Node) - Excise func(*Flow) - Expandchecks func(*obj.Prog) - Getg func(*Node) - Gins func(obj.As, *Node, *Node) *obj.Prog + AddIndex func(*Node, int64, *Node) bool // optional + Betypeinit func() + Bgen_float func(*Node, bool, int, *obj.Prog) // optional + Cgen64 func(*Node, *Node) // only on 32-bit systems + Cgenindex func(*Node, *Node, bool) *obj.Prog + Cgen_bmul func(Op, *Node, *Node, *Node) bool + Cgen_float func(*Node, *Node) // optional + Cgen_hmul func(*Node, *Node, *Node) + RightShiftWithCarry func(*Node, uint, *Node) // only on systems without RROTC instruction + AddSetCarry func(*Node, *Node, *Node) // only on systems when ADD does not update carry flag + Cgen_shift func(Op, bool, *Node, *Node, *Node) + Clearfat func(*Node) + Cmp64 func(*Node, *Node, Op, int, *obj.Prog) // only on 32-bit systems + Defframe func(*obj.Prog) + Dodiv func(Op, *Node, *Node, *Node) + Excise func(*Flow) + Expandchecks func(*obj.Prog) + Getg func(*Node) + Gins func(obj.As, *Node, *Node) *obj.Prog // Ginscmp generates code comparing n1 to n2 and jumping away if op is satisfied. // The returned prog should be Patch'ed with the jump target. diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index bce34374e8..cc9a50e6a8 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -3424,7 +3424,7 @@ func walkdiv(n *Node, init *Nodes) *Node { // if >= 0, nr is 1< Date: Wed, 27 Apr 2016 15:10:10 +1000 Subject: cmd/compile/internal/gc: remove oconv(op, 0) calls Updates #15462 Automatic refactor with sed -e. Replace all oconv(op, 0) to string conversion with the raw op value which fmt's %v verb can print directly. The remaining oconv(op, FmtSharp) will be replaced with op.GoString and %#v in the next CL. Change-Id: I5e2f7ee0bd35caa65c6dd6cb1a866b5e4519e641 Reviewed-on: https://go-review.googlesource.com/22499 Run-TryBot: Dave Cheney TryBot-Result: Gobot Gobot Reviewed-by: Brad Fitzpatrick --- src/cmd/compile/internal/gc/bexport.go | 2 +- src/cmd/compile/internal/gc/cgen.go | 4 ++-- src/cmd/compile/internal/gc/const.go | 4 ++-- src/cmd/compile/internal/gc/cplx.go | 4 ++-- src/cmd/compile/internal/gc/dcl.go | 8 ++++---- src/cmd/compile/internal/gc/esc.go | 6 +++--- src/cmd/compile/internal/gc/export.go | 2 +- src/cmd/compile/internal/gc/fmt.go | 30 +++++++++++++++--------------- src/cmd/compile/internal/gc/gen.go | 2 +- src/cmd/compile/internal/gc/gsubr.go | 6 +++--- src/cmd/compile/internal/gc/order.go | 12 ++++++------ src/cmd/compile/internal/gc/racewalk.go | 6 +++--- src/cmd/compile/internal/gc/select.go | 8 ++++---- src/cmd/compile/internal/gc/ssa.go | 4 ++-- src/cmd/compile/internal/gc/subr.go | 10 +++++----- src/cmd/compile/internal/gc/swt.go | 2 +- src/cmd/compile/internal/gc/typecheck.go | 30 +++++++++++++++--------------- src/cmd/compile/internal/gc/walk.go | 14 +++++++------- 18 files changed, 77 insertions(+), 77 deletions(-) (limited to 'src/cmd/compile') diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 5ab7fdc0e9..53662620aa 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -499,7 +499,7 @@ func (p *exporter) obj(sym *Sym) { } default: - Fatalf("exporter: unexpected export symbol: %v %v", oconv(n.Op, 0), sym) + Fatalf("exporter: unexpected export symbol: %v %v", n.Op, sym) } } diff --git a/src/cmd/compile/internal/gc/cgen.go b/src/cmd/compile/internal/gc/cgen.go index 8db752ec51..fd57fbd4a7 100644 --- a/src/cmd/compile/internal/gc/cgen.go +++ b/src/cmd/compile/internal/gc/cgen.go @@ -1807,7 +1807,7 @@ func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) { } if !n.Type.IsBoolean() { - Fatalf("bgen: bad type %v for %v", n.Type, oconv(n.Op, 0)) + Fatalf("bgen: bad type %v for %v", n.Type, n.Op) } for n.Op == OCONVNOP { @@ -2454,7 +2454,7 @@ func Ginscall(f *Node, proc int) { func cgen_callinter(n *Node, res *Node, proc int) { i := n.Left if i.Op != ODOTINTER { - Fatalf("cgen_callinter: not ODOTINTER %v", oconv(i.Op, 0)) + Fatalf("cgen_callinter: not ODOTINTER %v", i.Op) } i = i.Left // interface diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index 5a7e9f34dd..e0f5e977fe 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -695,7 +695,7 @@ func evconst(n *Node) { switch uint32(n.Op)<<16 | uint32(v.Ctype()) { default: if n.Diag == 0 { - Yyerror("illegal constant expression %v %v", oconv(n.Op, 0), nl.Type) + Yyerror("illegal constant expression %v %v", n.Op, nl.Type) n.Diag = 1 } return @@ -1179,7 +1179,7 @@ setfalse: illegal: if n.Diag == 0 { - Yyerror("illegal constant expression: %v %v %v", nl.Type, oconv(n.Op, 0), nr.Type) + Yyerror("illegal constant expression: %v %v %v", nl.Type, n.Op, nr.Type) n.Diag = 1 } } diff --git a/src/cmd/compile/internal/gc/cplx.go b/src/cmd/compile/internal/gc/cplx.go index 4218117711..9bb2027520 100644 --- a/src/cmd/compile/internal/gc/cplx.go +++ b/src/cmd/compile/internal/gc/cplx.go @@ -399,7 +399,7 @@ func Complexgen(n *Node, res *Node) { switch n.Op { default: Dump("complexgen: unknown op", n) - Fatalf("complexgen: unknown op %v", oconv(n.Op, 0)) + Fatalf("complexgen: unknown op %v", n.Op) case ODOT, ODOTPTR, @@ -458,7 +458,7 @@ func Complexgen(n *Node, res *Node) { switch n.Op { default: - Fatalf("complexgen: unknown op %v", oconv(n.Op, 0)) + Fatalf("complexgen: unknown op %v", n.Op) case OCONV: Complexmove(nl, res) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index 53d4ad4d10..6d46d9a73c 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -551,7 +551,7 @@ func funchdr(n *Node) { func funcargs(nt *Node) { if nt.Op != OTFUNC { - Fatalf("funcargs %v", oconv(nt.Op, 0)) + Fatalf("funcargs %v", nt.Op) } // re-start the variable generation number @@ -565,7 +565,7 @@ func funcargs(nt *Node) { if nt.Left != nil { n := nt.Left if n.Op != ODCLFIELD { - Fatalf("funcargs receiver %v", oconv(n.Op, 0)) + Fatalf("funcargs receiver %v", n.Op) } if n.Left != nil { n.Left.Op = ONAME @@ -580,7 +580,7 @@ func funcargs(nt *Node) { for _, n := range nt.List.Slice() { if n.Op != ODCLFIELD { - Fatalf("funcargs in %v", oconv(n.Op, 0)) + Fatalf("funcargs in %v", n.Op) } if n.Left != nil { n.Left.Op = ONAME @@ -598,7 +598,7 @@ func funcargs(nt *Node) { var i int = 0 for _, n := range nt.Rlist.Slice() { if n.Op != ODCLFIELD { - Fatalf("funcargs out %v", oconv(n.Op, 0)) + Fatalf("funcargs out %v", n.Op) } if n.Left == nil { diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index a7bc88e5c1..52c09e47f9 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -998,8 +998,8 @@ func escassign(e *EscState, dst, src *Node, step *EscStep) { if Debug['m'] > 2 { fmt.Printf("%v:[%d] %v escassign: %v(%v)[%v] = %v(%v)[%v]\n", linestr(lineno), e.loopdepth, funcSym(Curfn), - Nconv(dst, FmtShort), Jconv(dst, FmtShort), oconv(dst.Op, 0), - Nconv(src, FmtShort), Jconv(src, FmtShort), oconv(src.Op, 0)) + Nconv(dst, FmtShort), Jconv(dst, FmtShort), dst.Op, + Nconv(src, FmtShort), Jconv(src, FmtShort), src.Op) } setlineno(dst) @@ -1741,7 +1741,7 @@ func escwalkBody(e *EscState, level Level, dst *Node, src *Node, step *EscStep, if Debug['m'] > 2 { fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %v(%v) scope:%v[%d] extraloopdepth=%v\n", - level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", oconv(src.Op, 0), Nconv(src, FmtShort), Jconv(src, FmtShort), e.curfnSym(src), srcE.Escloopdepth, extraloopdepth) + level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", src.Op, Nconv(src, FmtShort), Jconv(src, FmtShort), e.curfnSym(src), srcE.Escloopdepth, extraloopdepth) } e.pdepth++ diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index 9bb01056e4..4b48c53b91 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -354,7 +354,7 @@ func dumpsym(s *Sym) { switch s.Def.Op { default: - Yyerror("unexpected export symbol: %v %v", oconv(s.Def.Op, 0), s) + Yyerror("unexpected export symbol: %v %v", s.Def.Op, s) case OLITERAL: dumpexportconst(s) diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index ee12e35975..2c3afb0ecc 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -914,7 +914,7 @@ func stmtfmt(n *Node) string { case OSELECT, OSWITCH: if fmtmode == FErr { - f += fmt.Sprintf("%v statement", oconv(n.Op, 0)) + f += fmt.Sprintf("%v statement", n.Op) break } @@ -1472,7 +1472,7 @@ func exprfmt(n *Node, prec int) string { } } - return fmt.Sprintf("", oconv(n.Op, 0)) + return fmt.Sprintf("", n.Op) } func nodefmt(n *Node, flag FmtFlag) string { @@ -1527,40 +1527,40 @@ func nodedump(n *Node, flag FmtFlag) string { } if n.Ninit.Len() != 0 { - fmt.Fprintf(&buf, "%v-init%v", oconv(n.Op, 0), n.Ninit) + fmt.Fprintf(&buf, "%v-init%v", n.Op, n.Ninit) indent(&buf) } } switch n.Op { default: - fmt.Fprintf(&buf, "%v%v", oconv(n.Op, 0), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v%v", n.Op, Jconv(n, 0)) case OREGISTER, OINDREG: - fmt.Fprintf(&buf, "%v-%v%v", oconv(n.Op, 0), obj.Rconv(int(n.Reg)), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v-%v%v", n.Op, obj.Rconv(int(n.Reg)), Jconv(n, 0)) case OLITERAL: - fmt.Fprintf(&buf, "%v-%v%v", oconv(n.Op, 0), Vconv(n.Val(), 0), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v-%v%v", n.Op, Vconv(n.Val(), 0), Jconv(n, 0)) case ONAME, ONONAME: if n.Sym != nil { - fmt.Fprintf(&buf, "%v-%v%v", oconv(n.Op, 0), n.Sym, Jconv(n, 0)) + fmt.Fprintf(&buf, "%v-%v%v", n.Op, n.Sym, Jconv(n, 0)) } else { - fmt.Fprintf(&buf, "%v%v", oconv(n.Op, 0), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v%v", n.Op, Jconv(n, 0)) } if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil { indent(&buf) - fmt.Fprintf(&buf, "%v-ntype%v", oconv(n.Op, 0), n.Name.Param.Ntype) + fmt.Fprintf(&buf, "%v-ntype%v", n.Op, n.Name.Param.Ntype) } case OASOP: - fmt.Fprintf(&buf, "%v-%v%v", oconv(n.Op, 0), oconv(Op(n.Etype), 0), Jconv(n, 0)) + fmt.Fprintf(&buf, "%v-%v%v", n.Op, Op(n.Etype), Jconv(n, 0)) case OTYPE: - fmt.Fprintf(&buf, "%v %v%v type=%v", oconv(n.Op, 0), n.Sym, Jconv(n, 0), n.Type) + fmt.Fprintf(&buf, "%v %v%v type=%v", n.Op, n.Sym, Jconv(n, 0), n.Type) if recur && n.Type == nil && n.Name.Param.Ntype != nil { indent(&buf) - fmt.Fprintf(&buf, "%v-ntype%v", oconv(n.Op, 0), n.Name.Param.Ntype) + fmt.Fprintf(&buf, "%v-ntype%v", n.Op, n.Name.Param.Ntype) } } @@ -1581,17 +1581,17 @@ func nodedump(n *Node, flag FmtFlag) string { } if n.List.Len() != 0 { indent(&buf) - fmt.Fprintf(&buf, "%v-list%v", oconv(n.Op, 0), n.List) + fmt.Fprintf(&buf, "%v-list%v", n.Op, n.List) } if n.Rlist.Len() != 0 { indent(&buf) - fmt.Fprintf(&buf, "%v-rlist%v", oconv(n.Op, 0), n.Rlist) + fmt.Fprintf(&buf, "%v-rlist%v", n.Op, n.Rlist) } if n.Nbody.Len() != 0 { indent(&buf) - fmt.Fprintf(&buf, "%v-body%v", oconv(n.Op, 0), n.Nbody) + fmt.Fprintf(&buf, "%v-body%v", n.Op, n.Nbody) } } diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 6fb27cf8e1..275e6a7507 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -218,7 +218,7 @@ func Genlist(l Nodes) { func cgen_proc(n *Node, proc int) { switch n.Left.Op { default: - Fatalf("cgen_proc: unknown call %v", oconv(n.Left.Op, 0)) + Fatalf("cgen_proc: unknown call %v", n.Left.Op) case OCALLMETH: cgen_callmeth(n.Left, proc) diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index 3d9ab626f6..7e64194957 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -327,7 +327,7 @@ func Naddr(a *obj.Addr, n *Node) { a := a // copy to let escape into Ctxt.Dconv Debug['h'] = 1 Dump("naddr", n) - Fatalf("naddr: bad %v %v", oconv(n.Op, 0), Ctxt.Dconv(a)) + Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a)) case OREGISTER: a.Type = obj.TYPE_REG @@ -422,7 +422,7 @@ func Naddr(a *obj.Addr, n *Node) { if !n.Left.Type.IsStruct() || n.Left.Type.Field(0).Sym != n.Sym { Debug['h'] = 1 Dump("naddr", n) - Fatalf("naddr: bad %v %v", oconv(n.Op, 0), Ctxt.Dconv(a)) + Fatalf("naddr: bad %v %v", n.Op, Ctxt.Dconv(a)) } Naddr(a, n.Left) @@ -465,7 +465,7 @@ func Naddr(a *obj.Addr, n *Node) { } if a.Type != obj.TYPE_MEM { a := a // copy to let escape into Ctxt.Dconv - Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), oconv(n.Left.Op, 0)) + Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), n.Left.Op) } a.Type = obj.TYPE_ADDR diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index aea05caee2..7026ad79ef 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -165,7 +165,7 @@ func ordersafeexpr(n *Node, order *Order) *Node { a.Right = r return typecheck(&a, Erv) default: - Fatalf("ordersafeexpr %v", oconv(n.Op, 0)) + Fatalf("ordersafeexpr %v", n.Op) return nil // not reached } } @@ -416,7 +416,7 @@ func ordercall(n *Node, order *Order) { func ordermapassign(n *Node, order *Order) { switch n.Op { default: - Fatalf("ordermapassign %v", oconv(n.Op, 0)) + Fatalf("ordermapassign %v", n.Op) case OAS: order.out = append(order.out, n) @@ -478,7 +478,7 @@ func orderstmt(n *Node, order *Order) { switch n.Op { default: - Fatalf("orderstmt %v", oconv(n.Op, 0)) + Fatalf("orderstmt %v", n.Op) case OVARKILL, OVARLIVE: order.out = append(order.out, n) @@ -790,7 +790,7 @@ func orderstmt(n *Node, order *Order) { var r *Node for _, n2 := range n.List.Slice() { if n2.Op != OXCASE { - Fatalf("order select case %v", oconv(n2.Op, 0)) + Fatalf("order select case %v", n2.Op) } r = n2.Left setlineno(n2) @@ -803,7 +803,7 @@ func orderstmt(n *Node, order *Order) { if r != nil { switch r.Op { default: - Yyerror("unknown op in select %v", oconv(r.Op, 0)) + Yyerror("unknown op in select %v", r.Op) Dump("select case", r) // If this is case x := <-ch or case x, y := <-ch, the case has @@ -943,7 +943,7 @@ func orderstmt(n *Node, order *Order) { n.Left = orderexpr(n.Left, order, nil) for _, n4 := range n.List.Slice() { if n4.Op != OXCASE { - Fatalf("order switch case %v", oconv(n4.Op, 0)) + Fatalf("order switch case %v", n4.Op) } orderexprlistinplace(n4.List, order) orderblockNodes(&n4.Nbody) diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index dc7d0068a1..5bcaf89d50 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -132,7 +132,7 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { switch n.Op { default: - Fatalf("instrument: unknown node type %v", oconv(n.Op, 0)) + Fatalf("instrument: unknown node type %v", n.Op) case OAS, OASWB, OAS2FUNC: instrumentnode(&n.Left, init, 1, 0) @@ -374,13 +374,13 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { OAS2RECV, OAS2MAPR, OASOP: - Yyerror("instrument: %v must be lowered by now", oconv(n.Op, 0)) + Yyerror("instrument: %v must be lowered by now", n.Op) goto ret // impossible nodes: only appear in backend. case ORROTC, OEXTEND: - Yyerror("instrument: %v cannot exist now", oconv(n.Op, 0)) + Yyerror("instrument: %v cannot exist now", n.Op) goto ret case OGETG: diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index c197fdd57d..120a9b8cf1 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -18,7 +18,7 @@ func typecheckselect(sel *Node) { ncase = n1 setlineno(ncase) if ncase.Op != OXCASE { - Fatalf("typecheckselect %v", oconv(ncase.Op, 0)) + Fatalf("typecheckselect %v", ncase.Op) } if ncase.List.Len() == 0 { @@ -120,7 +120,7 @@ func walkselect(sel *Node) { var ch *Node switch n.Op { default: - Fatalf("select %v", oconv(n.Op, 0)) + Fatalf("select %v", n.Op) // ok already case OSEND: @@ -218,7 +218,7 @@ func walkselect(sel *Node) { r.Ninit.Set(cas.Ninit.Slice()) switch n.Op { default: - Fatalf("select %v", oconv(n.Op, 0)) + Fatalf("select %v", n.Op) // if selectnbsend(c, v) { body } else { default body } case OSEND: @@ -282,7 +282,7 @@ func walkselect(sel *Node) { } else { switch n.Op { default: - Fatalf("select %v", oconv(n.Op, 0)) + Fatalf("select %v", n.Op) // selectsend(sel *byte, hchan *chan any, elem *any) (selected bool); case OSEND: diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 1006fcd40e..b31cd878cd 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -2560,7 +2560,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // want to set it here. case OCALLINTER: if fn.Op != ODOTINTER { - Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", oconv(fn.Op, 0)) + Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op) } i := s.expr(fn.Left) itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i) @@ -2784,7 +2784,7 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value { return s.call(n, callNormal) default: - s.Unimplementedf("unhandled addr %v", oconv(n.Op, 0)) + s.Unimplementedf("unhandled addr %v", n.Op) return nil } } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index ff491f5dc2..6f2ed6a839 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -1203,9 +1203,9 @@ func printframenode(n *Node) { } switch n.Op { case ONAME: - fmt.Printf("%v %v G%d %v width=%d\n", oconv(n.Op, 0), n.Sym, n.Name.Vargen, n.Type, w) + fmt.Printf("%v %v G%d %v width=%d\n", n.Op, n.Sym, n.Name.Vargen, n.Type, w) case OTYPE: - fmt.Printf("%v %v width=%d\n", oconv(n.Op, 0), n.Type, w) + fmt.Printf("%v %v width=%d\n", n.Op, n.Type, w) } } @@ -1286,7 +1286,7 @@ func badtype(op Op, tl *Type, tr *Type) { } s := fmt_ - Yyerror("illegal types for operand: %v%s", oconv(op, 0), s) + Yyerror("illegal types for operand: %v%s", op, s) } // Brcom returns !(op). @@ -1306,7 +1306,7 @@ func Brcom(op Op) Op { case OGE: return OLT } - Fatalf("brcom: no com for %v\n", oconv(op, 0)) + Fatalf("brcom: no com for %v\n", op) return op } @@ -1327,7 +1327,7 @@ func Brrev(op Op) Op { case OGE: return OLE } - Fatalf("brrev: no rev for %v\n", oconv(op, 0)) + Fatalf("brrev: no rev for %v\n", op) return op } diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index a8e6e15e0a..aac92fd311 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -350,7 +350,7 @@ func casebody(sw *Node, typeswvar *Node) { for i, n := range sw.List.Slice() { setlineno(n) if n.Op != OXCASE { - Fatalf("casebody %v", oconv(n.Op, 0)) + Fatalf("casebody %v", n.Op) } n.Op = OCASE needvar := n.List.Len() != 1 || n.List.First().Op == OLITERAL diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 8c51802ac6..cf44ac8678 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -278,7 +278,7 @@ OpSwitch: default: Dump("typecheck", n) - Fatalf("typecheck %v", oconv(n.Op, 0)) + Fatalf("typecheck %v", n.Op) // names case OLITERAL: @@ -611,7 +611,7 @@ OpSwitch: aop = assignop(l.Type, r.Type, nil) if aop != 0 { if r.Type.IsInterface() && !l.Type.IsInterface() && !l.Type.IsComparable() { - Yyerror("invalid operation: %v (operator %v not defined on %s)", n, oconv(op, 0), typekind(l.Type)) + Yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type)) n.Type = nil return n } @@ -633,7 +633,7 @@ OpSwitch: aop = assignop(r.Type, l.Type, nil) if aop != 0 { if l.Type.IsInterface() && !r.Type.IsInterface() && !r.Type.IsComparable() { - Yyerror("invalid operation: %v (operator %v not defined on %s)", n, oconv(op, 0), typekind(r.Type)) + Yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type)) n.Type = nil return n } @@ -664,7 +664,7 @@ OpSwitch: } if !okfor[op][et] { - Yyerror("invalid operation: %v (operator %v not defined on %s)", n, oconv(op, 0), typekind(t)) + Yyerror("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) n.Type = nil return n } @@ -774,7 +774,7 @@ OpSwitch: return n } if !okfor[n.Op][t.Etype] { - Yyerror("invalid operation: %v %v", oconv(n.Op, 0), t) + Yyerror("invalid operation: %v %v", n.Op, t) n.Type = nil return n } @@ -1314,7 +1314,7 @@ OpSwitch: case OCAP, OLEN, OREAL, OIMAG: ok |= Erv - if !onearg(n, "%v", oconv(n.Op, 0)) { + if !onearg(n, "%v", n.Op) { n.Type = nil return n } @@ -1380,7 +1380,7 @@ OpSwitch: break OpSwitch badcall1: - Yyerror("invalid argument %v for %v", Nconv(n.Left, FmtLong), oconv(n.Op, 0)) + Yyerror("invalid argument %v for %v", Nconv(n.Left, FmtLong), n.Op) n.Type = nil return n @@ -1463,7 +1463,7 @@ OpSwitch: break OpSwitch case OCLOSE: - if !onearg(n, "%v", oconv(n.Op, 0)) { + if !onearg(n, "%v", n.Op) { n.Type = nil return n } @@ -2284,19 +2284,19 @@ func twoarg(n *Node) bool { return true } if n.List.Len() == 0 { - Yyerror("missing argument to %v - %v", oconv(n.Op, 0), n) + Yyerror("missing argument to %v - %v", n.Op, n) return false } n.Left = n.List.First() if n.List.Len() == 1 { - Yyerror("missing argument to %v - %v", oconv(n.Op, 0), n) + Yyerror("missing argument to %v - %v", n.Op, n) n.List.Set(nil) return false } if n.List.Len() > 2 { - Yyerror("too many arguments to %v - %v", oconv(n.Op, 0), n) + Yyerror("too many arguments to %v - %v", n.Op, n) n.List.Set(nil) return false } @@ -2662,7 +2662,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl Nodes, desc if call != nil { Yyerror("invalid use of ... in call to %v", call) } else { - Yyerror("invalid use of ... in %v", oconv(op, 0)) + Yyerror("invalid use of ... in %v", op) } } @@ -2682,7 +2682,7 @@ notenough: Yyerror("not enough arguments in call to %v", call) } } else { - Yyerror("not enough arguments to %v", oconv(op, 0)) + Yyerror("not enough arguments to %v", op) } if n != nil { n.Diag = 1 @@ -2695,7 +2695,7 @@ toomany: if call != nil { Yyerror("too many arguments in call to %v", call) } else { - Yyerror("too many arguments to %v", oconv(op, 0)) + Yyerror("too many arguments to %v", op) } goto out } @@ -3606,7 +3606,7 @@ func typecheckdef(n *Node) *Node { switch n.Op { default: - Fatalf("typecheckdef %v", oconv(n.Op, 0)) + Fatalf("typecheckdef %v", n.Op) // not really syms case OGOTO, OLABEL: diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go index cc9a50e6a8..6ec06453ef 100644 --- a/src/cmd/compile/internal/gc/walk.go +++ b/src/cmd/compile/internal/gc/walk.go @@ -159,7 +159,7 @@ func walkstmt(n *Node) *Node { if n.Op == ONAME { Yyerror("%v is not a top level statement", n.Sym) } else { - Yyerror("%v is not a top level statement", oconv(n.Op, 0)) + Yyerror("%v is not a top level statement", n.Op) } Dump("nottop", n) @@ -1505,7 +1505,7 @@ opswitch: // ifaceeq(i1 any-1, i2 any-2) (ret bool); case OCMPIFACE: if !Eqtype(n.Left.Type, n.Right.Type) { - Fatalf("ifaceeq %v %v %v", oconv(n.Op, 0), n.Left.Type, n.Right.Type) + Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type) } var fn *Node if n.Left.Type.IsEmptyInterface() { @@ -1651,7 +1651,7 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { var nln, nrn Nodes nln.Set(nl) nrn.Set(nr) - Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nln, FmtSign), oconv(op, 0), Hconv(nrn, FmtSign), len(nl), len(nr), Curfn.Func.Nname.Sym.Name) + Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nln, FmtSign), op, Hconv(nrn, FmtSign), len(nl), len(nr), Curfn.Func.Nname.Sym.Name) } return nn } @@ -1866,9 +1866,9 @@ func ascompatte(op Op, call *Node, isddd bool, nl *Type, lr []*Node, fp int, ini l1 := dumptypes(nl, "expected") l2 := dumpnodetypes(lr0, "given") if l != nil { - Yyerror("not enough arguments to %v\n\t%s\n\t%s", oconv(op, 0), l1, l2) + Yyerror("not enough arguments to %v\n\t%s\n\t%s", op, l1, l2) } else { - Yyerror("too many arguments to %v\n\t%s\n\t%s", oconv(op, 0), l1, l2) + Yyerror("too many arguments to %v\n\t%s\n\t%s", op, l1, l2) } } @@ -2142,7 +2142,7 @@ func applywritebarrier(n *Node) *Node { func convas(n *Node, init *Nodes) *Node { if n.Op != OAS { - Fatalf("convas: not OAS %v", oconv(n.Op, 0)) + Fatalf("convas: not OAS %v", n.Op) } n.Typecheck = 1 @@ -3798,7 +3798,7 @@ func usefield(n *Node) { switch n.Op { default: - Fatalf("usefield %v", oconv(n.Op, 0)) + Fatalf("usefield %v", n.Op) case ODOT, ODOTPTR: break -- cgit v1.3