aboutsummaryrefslogtreecommitdiff
path: root/src/cmd
diff options
context:
space:
mode:
authorFilippo Valsorda <filippo@golang.org>2018-10-15 17:09:34 -0400
committerFilippo Valsorda <filippo@golang.org>2018-10-15 17:09:34 -0400
commit623650b27aa42dd2ccd20fc4a79f8fe7b8559987 (patch)
treebfafa16d1bfd57fc1d9831c22e6e236be3d52281 /src/cmd
parent36c789b1fd72af5ff6e756794597a3a85e069998 (diff)
parent1961d8d72a53e780effa18bfa8dbe4e4282df0b2 (diff)
downloadgo-623650b27aa42dd2ccd20fc4a79f8fe7b8559987.tar.xz
[dev.boringcrypto] all: merge master into dev.boringcrypto
Change-Id: I218ba1b89a2df6e4335c6a5846889d9a04affe5d
Diffstat (limited to 'src/cmd')
-rw-r--r--src/cmd/asm/internal/asm/testdata/386.s2
-rw-r--r--src/cmd/asm/internal/asm/testdata/arm64.s1
-rw-r--r--src/cmd/asm/internal/asm/testdata/arm64enc.s4
-rw-r--r--src/cmd/asm/internal/asm/testdata/arm64error.s36
-rw-r--r--src/cmd/asm/internal/asm/testdata/ppc64.s2
-rw-r--r--src/cmd/compile/fmt_test.go1
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go28
-rw-r--r--src/cmd/compile/internal/arm/ssa.go49
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go20
-rw-r--r--src/cmd/compile/internal/gc/bexport.go1744
-rw-r--r--src/cmd/compile/internal/gc/bimport.go1323
-rw-r--r--src/cmd/compile/internal/gc/const.go15
-rw-r--r--src/cmd/compile/internal/gc/dump.go287
-rw-r--r--src/cmd/compile/internal/gc/esc.go126
-rw-r--r--src/cmd/compile/internal/gc/export.go10
-rw-r--r--src/cmd/compile/internal/gc/float_test.go14
-rw-r--r--src/cmd/compile/internal/gc/fmt.go45
-rw-r--r--src/cmd/compile/internal/gc/inl.go4
-rw-r--r--src/cmd/compile/internal/gc/main.go24
-rw-r--r--src/cmd/compile/internal/gc/mpfloat.go45
-rw-r--r--src/cmd/compile/internal/gc/mpint.go11
-rw-r--r--src/cmd/compile/internal/gc/obj.go5
-rw-r--r--src/cmd/compile/internal/gc/op_string.go4
-rw-r--r--src/cmd/compile/internal/gc/order.go12
-rw-r--r--src/cmd/compile/internal/gc/pgen.go20
-rw-r--r--src/cmd/compile/internal/gc/plive.go235
-rw-r--r--src/cmd/compile/internal/gc/sinit.go18
-rw-r--r--src/cmd/compile/internal/gc/ssa.go94
-rw-r--r--src/cmd/compile/internal/gc/subr.go33
-rw-r--r--src/cmd/compile/internal/gc/syntax.go1
-rw-r--r--src/cmd/compile/internal/gc/typecheck.go49
-rw-r--r--src/cmd/compile/internal/gc/walk.go6
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go35
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules12
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules84
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go30
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules202
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go18
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules135
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64Ops.go30
-rw-r--r--src/cmd/compile/internal/ssa/gen/Wasm.rules4
-rw-r--r--src/cmd/compile/internal/ssa/gen/generic.rules158
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go5
-rw-r--r--src/cmd/compile/internal/ssa/gen/main.go9
-rw-r--r--src/cmd/compile/internal/ssa/html.go2
-rw-r--r--src/cmd/compile/internal/ssa/op.go12
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go798
-rw-r--r--src/cmd/compile/internal/ssa/prove.go4
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go2
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go40
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go104
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go2057
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go5943
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go3362
-rw-r--r--src/cmd/compile/internal/ssa/rewriteWasm.go8
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go434
-rw-r--r--src/cmd/compile/internal/ssa/softfloat.go2
-rw-r--r--src/cmd/compile/internal/ssa/stmtlines_test.go3
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts20
-rw-r--r--src/cmd/compile/internal/types/type.go2
-rw-r--r--src/cmd/compile/internal/x86/ssa.go26
-rw-r--r--src/cmd/dist/build.go4
-rw-r--r--src/cmd/dist/buildruntime.go4
-rw-r--r--src/cmd/dist/main.go3
-rw-r--r--src/cmd/dist/test.go4
-rw-r--r--src/cmd/fix/main.go2
-rw-r--r--src/cmd/fix/typecheck.go4
-rw-r--r--src/cmd/go/alldocs.go8
-rw-r--r--src/cmd/go/go_test.go55
-rw-r--r--src/cmd/go/internal/clean/clean.go14
-rw-r--r--src/cmd/go/internal/envcmd/env.go2
-rw-r--r--src/cmd/go/internal/get/vcs.go2
-rw-r--r--src/cmd/go/internal/help/helpdoc.go6
-rw-r--r--src/cmd/go/internal/modconv/convert_test.go2
-rw-r--r--src/cmd/go/internal/modfetch/codehost/codehost.go2
-rw-r--r--src/cmd/go/internal/modfetch/coderepo_test.go16
-rw-r--r--src/cmd/go/internal/modfetch/fetch.go2
-rw-r--r--src/cmd/go/internal/modfetch/proxy.go2
-rw-r--r--src/cmd/go/internal/modload/build.go18
-rw-r--r--src/cmd/go/internal/modload/import.go5
-rw-r--r--src/cmd/go/internal/modload/import_test.go2
-rw-r--r--src/cmd/go/internal/modload/query.go4
-rw-r--r--src/cmd/go/internal/modload/query_test.go2
-rw-r--r--src/cmd/go/internal/search/search.go4
-rw-r--r--src/cmd/go/internal/work/build.go6
-rw-r--r--src/cmd/go/internal/work/buildid.go8
-rw-r--r--src/cmd/go/internal/work/exec.go10
-rw-r--r--src/cmd/go/internal/work/security.go4
-rw-r--r--src/cmd/go/main.go18
-rw-r--r--src/cmd/go/proxy_test.go4
-rw-r--r--src/cmd/go/script_test.go12
-rw-r--r--src/cmd/go/testdata/addmod.go2
-rw-r--r--src/cmd/go/testdata/script/help.txt5
-rw-r--r--src/cmd/go/testdata/script/list_bad_import.txt2
-rw-r--r--src/cmd/go/testdata/script/mod_clean_cache.txt23
-rw-r--r--src/cmd/go/testdata/script/mod_help.txt6
-rw-r--r--src/cmd/go/testdata/script/mod_list_bad_import.txt2
-rw-r--r--src/cmd/go/testdata/script/mod_string_alias.txt14
-rw-r--r--src/cmd/go/testdata/script/mod_test.txt7
-rw-r--r--src/cmd/go/vendor_test.go2
-rw-r--r--src/cmd/gofmt/gofmt_test.go2
-rw-r--r--src/cmd/internal/dwarf/dwarf.go13
-rw-r--r--src/cmd/internal/goobj/read.go2
-rw-r--r--src/cmd/internal/obj/arm/asm5.go2
-rw-r--r--src/cmd/internal/obj/arm64/asm7.go72
-rw-r--r--src/cmd/internal/obj/link.go7
-rw-r--r--src/cmd/internal/objabi/funcdata.go1
-rw-r--r--src/cmd/internal/objabi/funcid.go2
-rw-r--r--src/cmd/internal/objabi/head.go5
-rw-r--r--src/cmd/link/dwarf_test.go3
-rw-r--r--src/cmd/link/internal/ld/dwarf.go155
-rw-r--r--src/cmd/link/internal/ld/dwarf_test.go114
-rw-r--r--src/cmd/link/internal/ld/lib.go73
-rw-r--r--src/cmd/link/internal/ld/pcln.go97
-rw-r--r--src/cmd/link/internal/ld/util.go10
-rw-r--r--src/cmd/link/internal/objfile/objfile.go2
-rw-r--r--src/cmd/link/internal/ppc64/asm.go12
-rw-r--r--src/cmd/trace/trace.go12
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/aliases.go13
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/asm_windows_arm.s11
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go2
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/security_windows.go4
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/service.go18
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/config.go38
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go113
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go135
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/svc/sys_arm.s38
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go60
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/syscall_windows_test.go6
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/types_windows.go176
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/types_windows_arm.go22
-rw-r--r--src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go2
-rw-r--r--src/cmd/vendor/vendor.json32
-rw-r--r--src/cmd/vet/all/main.go12
-rw-r--r--src/cmd/vet/all/whitelist/all.txt1
-rw-r--r--src/cmd/vet/all/whitelist/windows_386.txt1
-rw-r--r--src/cmd/vet/all/whitelist/windows_amd64.txt1
-rw-r--r--src/cmd/vet/main.go3
-rw-r--r--src/cmd/vet/shadow.go9
-rw-r--r--src/cmd/vet/testdata/shadow.go32
-rw-r--r--src/cmd/vet/types.go24
-rw-r--r--src/cmd/vet/vet_test.go2
142 files changed, 14958 insertions, 4482 deletions
diff --git a/src/cmd/asm/internal/asm/testdata/386.s b/src/cmd/asm/internal/asm/testdata/386.s
index 90a66167a1..d524a4c8c1 100644
--- a/src/cmd/asm/internal/asm/testdata/386.s
+++ b/src/cmd/asm/internal/asm/testdata/386.s
@@ -70,7 +70,7 @@ label:
// LTYPEM spec6 { outcode(int($1), &$2); }
MOVL AX, BX
MOVL $4, BX
-
+
// LTYPEI spec7 { outcode(int($1), &$2); }
IMULL AX
IMULL $4, CX
diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s
index 361b7a45c0..9e2e2b1dc5 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64.s
@@ -654,6 +654,7 @@ again:
CALL foo(SB)
// LDP/STP
+ LDP (R0), (R0, R1) // 000440a9
LDP (R0), (R1, R2) // 010840a9
LDP 8(R0), (R1, R2) // 018840a9
LDP -8(R0), (R1, R2) // 01887fa9
diff --git a/src/cmd/asm/internal/asm/testdata/arm64enc.s b/src/cmd/asm/internal/asm/testdata/arm64enc.s
index ee4673c1ae..432ab74493 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64enc.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64enc.s
@@ -188,8 +188,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8
MOVBU (R18)(R14<<0), R23 // 577a6e38
MOVBU (R2)(R8.SXTX), R19 // 53e86838
MOVBU (R27)(R23), R14 // MOVBU (R27)(R23*1), R14 // 6e6b7738
- MOVHU.P 107(R13), R13 // adb54678
- MOVHU.W 192(R2), R2 // 420c4c78
+ MOVHU.P 107(R14), R13 // cdb54678
+ MOVHU.W 192(R3), R2 // 620c4c78
MOVHU 6844(R4), R18 // 92787579
MOVHU (R5)(R25.SXTW), R15 // afc87978
//TODO MOVBW.P 77(R18), R11 // 4bd6c438
diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s
index b2ec0cc425..357db80222 100644
--- a/src/cmd/asm/internal/asm/testdata/arm64error.s
+++ b/src/cmd/asm/internal/asm/testdata/arm64error.s
@@ -8,7 +8,19 @@ TEXT errors(SB),$0
ADDSW R7->32, R14, R13 // ERROR "shift amount out of range 0 to 31"
ADD R1.UXTB<<5, R2, R3 // ERROR "shift amount out of range 0 to 4"
ADDS R1.UXTX<<7, R2, R3 // ERROR "shift amount out of range 0 to 4"
+ AND $0x22220000, R2, RSP // ERROR "illegal combination"
+ ANDS $0x22220000, R2, RSP // ERROR "illegal combination"
+ ADD R1, R2, R3, R4 // ERROR "illegal combination"
BICW R7@>33, R5, R16 // ERROR "shift amount out of range 0 to 31"
+ CINC CS, R2, R3, R4 // ERROR "illegal combination"
+ CSEL LT, R1, R2 // ERROR "illegal combination"
+ LDP.P 8(R2), (R2, R3) // ERROR "constrained unpredictable behavior"
+ LDP.W 8(R3), (R2, R3) // ERROR "constrained unpredictable behavior"
+ LDP (R1), (R2, R2) // ERROR "constrained unpredictable behavior"
+ LDP (R0), (F0, F1) // ERROR "invalid register pair"
+ LDP (R0), (R3, ZR) // ERROR "invalid register pair"
+ LDXPW (RSP), (R2, R2) // ERROR "constrained unpredictable behavior"
+ LDAXPW (R5), (R2, R2) // ERROR "constrained unpredictable behavior"
MOVD.P 300(R2), R3 // ERROR "offset out of range [-255,254]"
MOVD.P R3, 344(R2) // ERROR "offset out of range [-255,254]"
MOVD (R3)(R7.SXTX<<2), R8 // ERROR "invalid index shift amount"
@@ -16,6 +28,17 @@ TEXT errors(SB),$0
MOVWU (R5)(R4<<1), R10 // ERROR "invalid index shift amount"
MOVB (R5)(R4.SXTW<<5), R10 // ERROR "invalid index shift amount"
MOVH R5, (R6)(R2<<3) // ERROR "invalid index shift amount"
+ MADD R1, R2, R3 // ERROR "illegal combination"
+ MOVD.P R1, 8(R1) // ERROR "constrained unpredictable behavior"
+ MOVD.W 16(R2), R2 // ERROR "constrained unpredictable behavior"
+ STP (F2, F3), (R0) // ERROR "invalid register pair"
+ STP.W (R1, R2), 8(R1) // ERROR "constrained unpredictable behavior"
+ STP.P (R1, R2), 8(R2) // ERROR "constrained unpredictable behavior"
+ STLXP (R6, R11), (RSP), R6 // ERROR "constrained unpredictable behavior"
+ STXP (R6, R11), (R2), R2 // ERROR "constrained unpredictable behavior"
+ STLXR R3, (RSP), R3 // ERROR "constrained unpredictable behavior"
+ STXR R3, (R4), R4 // ERROR "constrained unpredictable behavior"
+ STLXRB R2, (R5), R5 // ERROR "constrained unpredictable behavior"
VLD1 (R8)(R13), [V2.B16] // ERROR "illegal combination"
VLD1 8(R9), [V2.B16] // ERROR "illegal combination"
VST1 [V1.B16], (R8)(R13) // ERROR "illegal combination"
@@ -83,15 +106,10 @@ TEXT errors(SB),$0
VST1.P [V1.B16], (R8)(R9<<1) // ERROR "invalid extended register"
VREV64 V1.H4, V2.H8 // ERROR "invalid arrangement"
VREV64 V1.D1, V2.D1 // ERROR "invalid arrangement"
- ADD R1, R2, R3, R4 // ERROR "illegal combination"
- MADD R1, R2, R3 // ERROR "illegal combination"
- CINC CS, R2, R3, R4 // ERROR "illegal combination"
- CSEL LT, R1, R2 // ERROR "illegal combination"
- AND $0x22220000, R2, RSP // ERROR "illegal combination"
- ANDS $0x22220000, R2, RSP // ERROR "illegal combination"
- LDP (R0), (F0, F1) // ERROR "invalid register pair"
- LDP (R0), (R3, ZR) // ERROR "invalid register pair"
- STP (F2, F3), (R0) // ERROR "invalid register pair"
FLDPD (R0), (R1, R2) // ERROR "invalid register pair"
+ FLDPD (R1), (F2, F2) // ERROR "constrained unpredictable behavior"
+ FLDPS (R2), (F3, F3) // ERROR "constrained unpredictable behavior"
FSTPD (R1, R2), (R0) // ERROR "invalid register pair"
+ FMOVS (F2), F0 // ERROR "illegal combination"
+ FMOVD F0, (F1) // ERROR "illegal combination"
RET
diff --git a/src/cmd/asm/internal/asm/testdata/ppc64.s b/src/cmd/asm/internal/asm/testdata/ppc64.s
index ba00b8f6e2..9e8929dac4 100644
--- a/src/cmd/asm/internal/asm/testdata/ppc64.s
+++ b/src/cmd/asm/internal/asm/testdata/ppc64.s
@@ -550,7 +550,7 @@ label1:
// ftsqrt BF, FRB
FTSQRT F2,$7
-// FCFID
+// FCFID
// FCFIDS
FCFID F2,F3
diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go
index e28e428a17..65f88dfff9 100644
--- a/src/cmd/compile/fmt_test.go
+++ b/src/cmd/compile/fmt_test.go
@@ -700,6 +700,7 @@ var knownFormats = map[string]string{
"int8 %x": "",
"interface{} %#v": "",
"interface{} %T": "",
+ "interface{} %p": "",
"interface{} %q": "",
"interface{} %s": "",
"interface{} %v": "",
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index ae6141dd12..b4c4b1f4cd 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -583,7 +583,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
- case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst:
+ case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst,
+ ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
+ ssa.OpAMD64BTSLconst, ssa.OpAMD64BTSQconst,
+ ssa.OpAMD64BTCLconst, ssa.OpAMD64BTCQconst,
+ ssa.OpAMD64BTRLconst, ssa.OpAMD64BTRQconst:
op := v.Op
if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
// Emit 32-bit version because it's shorter
@@ -594,15 +598,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
- case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
- ssa.OpAMD64BTSLconst, ssa.OpAMD64BTSQconst,
- ssa.OpAMD64BTCLconst, ssa.OpAMD64BTCQconst,
- ssa.OpAMD64BTRLconst, ssa.OpAMD64BTRQconst:
- p := s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = v.AuxInt
- p.To.Type = obj.TYPE_REG
- p.To.Reg = v.Args[0].Reg()
case ssa.OpAMD64CMPQload, ssa.OpAMD64CMPLload, ssa.OpAMD64CMPWload, ssa.OpAMD64CMPBload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
@@ -700,6 +695,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
+ ssa.OpAMD64BTCQmodify, ssa.OpAMD64BTCLmodify, ssa.OpAMD64BTRQmodify, ssa.OpAMD64BTRLmodify, ssa.OpAMD64BTSQmodify, ssa.OpAMD64BTSLmodify,
ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify,
ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify:
p := s.Prog(v.Op.Asm())
@@ -764,16 +760,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
- } else {
- p := s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = val
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, off)
+ break
}
+ fallthrough
case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
- ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
+ ssa.OpAMD64BTCQconstmodify, ssa.OpAMD64BTCLconstmodify, ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTSLconstmodify,
+ ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTRLconstmodify, ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
val := sc.Val()
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index 98627344b8..9a8fabf622 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -7,6 +7,7 @@ package arm
import (
"fmt"
"math"
+ "math/bits"
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
@@ -119,6 +120,28 @@ func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *
return p
}
+// find a (lsb, width) pair for BFC
+// lsb must be in [0, 31], width must be in [1, 32 - lsb]
+// return (0xffffffff, 0) if v is not a binary like 0...01...10...0
+func getBFC(v uint32) (uint32, uint32) {
+ var m, l uint32
+ // BFC is not applicable with zero
+ if v == 0 {
+ return 0xffffffff, 0
+ }
+ // find the lowest set bit, for example l=2 for 0x3ffffffc
+ l = uint32(bits.TrailingZeros32(v))
+ // m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
+ m = 32 - uint32(bits.LeadingZeros32(v))
+ // check if v is a binary like 0...01...10...0
+ if (1<<m)-(1<<l) == v {
+ // it must be m > l for non-zero v
+ return l, m - l
+ }
+ // invalid
+ return 0xffffffff, 0
+}
+
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARMMOVWreg:
@@ -267,16 +290,38 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.OpARMANDconst, ssa.OpARMBICconst:
+ // try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
+ // BFC is only available on ARMv7, and its result and source are in the same register
+ if objabi.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
+ var val uint32
+ if v.Op == ssa.OpARMANDconst {
+ val = ^uint32(v.AuxInt)
+ } else { // BICconst
+ val = uint32(v.AuxInt)
+ }
+ lsb, width := getBFC(val)
+ // omit BFC for ARM's imm12
+ if 8 < width && width < 24 {
+ p := s.Prog(arm.ABFC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(width)
+ p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: int64(lsb)})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ break
+ }
+ }
+ // fall back to ordinary form
+ fallthrough
case ssa.OpARMADDconst,
ssa.OpARMADCconst,
ssa.OpARMSUBconst,
ssa.OpARMSBCconst,
ssa.OpARMRSBconst,
ssa.OpARMRSCconst,
- ssa.OpARMANDconst,
ssa.OpARMORconst,
ssa.OpARMXORconst,
- ssa.OpARMBICconst,
ssa.OpARMSLLconst,
ssa.OpARMSRLconst,
ssa.OpARMSRAconst:
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index db7064cff0..87703dd80d 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -195,7 +195,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64FNMULS,
ssa.OpARM64FNMULD,
ssa.OpARM64FDIVS,
- ssa.OpARM64FDIVD:
+ ssa.OpARM64FDIVD,
+ ssa.OpARM64ROR,
+ ssa.OpARM64RORW:
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
@@ -253,6 +255,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.OpARM64MVNshiftLL, ssa.OpARM64NEGshiftLL:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64MVNshiftRL, ssa.OpARM64NEGshiftRL:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64MVNshiftRA, ssa.OpARM64NEGshiftRA:
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64ADDshiftLL,
ssa.OpARM64SUBshiftLL,
ssa.OpARM64ANDshiftLL,
@@ -315,11 +323,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
- case ssa.OpARM64CMPshiftLL:
+ case ssa.OpARM64CMPshiftLL, ssa.OpARM64CMNshiftLL, ssa.OpARM64TSTshiftLL:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
- case ssa.OpARM64CMPshiftRL:
+ case ssa.OpARM64CMPshiftRL, ssa.OpARM64CMNshiftRL, ssa.OpARM64TSTshiftRL:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
- case ssa.OpARM64CMPshiftRA:
+ case ssa.OpARM64CMPshiftRA, ssa.OpARM64CMNshiftRA, ssa.OpARM64TSTshiftRA:
genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64MOVDaddr:
p := s.Prog(arm64.AMOVD)
@@ -696,8 +704,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
fallthrough
case ssa.OpARM64MVN,
ssa.OpARM64NEG,
+ ssa.OpARM64FABSD,
ssa.OpARM64FMOVDfpgp,
ssa.OpARM64FMOVDgpfp,
+ ssa.OpARM64FMOVSfpgp,
+ ssa.OpARM64FMOVSgpfp,
ssa.OpARM64FNEGS,
ssa.OpARM64FNEGD,
ssa.OpARM64FSQRTD,
@@ -728,6 +739,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
ssa.OpARM64CLZW,
ssa.OpARM64FRINTAD,
ssa.OpARM64FRINTMD,
+ ssa.OpARM64FRINTND,
ssa.OpARM64FRINTPD,
ssa.OpARM64FRINTZD:
p := s.Prog(v.Op.Asm())
diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go
index d0b1804eb6..3ef1e6af4d 100644
--- a/src/cmd/compile/internal/gc/bexport.go
+++ b/src/cmd/compile/internal/gc/bexport.go
@@ -2,444 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Binary package export.
-
-/*
-1) Export data encoding principles:
-
-The export data is a serialized description of the graph of exported
-"objects": constants, types, variables, and functions. Aliases may be
-directly reexported, and unaliased types may be indirectly reexported
-(as part of the type of a directly exported object). More generally,
-objects referred to from inlined function bodies can be reexported.
-We need to know which package declares these reexported objects, and
-therefore packages are also part of the export graph.
-
-The roots of the graph are two lists of objects. The 1st list (phase 1,
-see Export) contains all objects that are exported at the package level.
-These objects are the full representation of the package's API, and they
-are the only information a platform-independent tool (e.g., go/types)
-needs to know to type-check against a package.
-
-The 2nd list of objects contains all objects referred to from exported
-inlined function bodies. These objects are needed by the compiler to
-make sense of the function bodies; the exact list contents are compiler-
-specific.
-
-Finally, the export data contains a list of representations for inlined
-function bodies. The format of this representation is compiler specific.
-
-The graph is serialized in in-order fashion, starting with the roots.
-Each object in the graph is serialized by writing its fields sequentially.
-If the field is a pointer to another object, that object is serialized in
-place, recursively. Otherwise the field is written in place. Non-pointer
-fields are all encoded as integer or string values.
-
-Some objects (packages, types) may be referred to more than once. When
-reaching an object that was not serialized before, an integer _index_
-is assigned to it, starting at 0. In this case, the encoding starts
-with an integer _tag_ < 0. The tag value indicates the kind of object
-that follows and that this is the first time that we see this object.
-If the object was already serialized, the encoding is simply the object
-index >= 0. An importer can trivially determine if an object needs to
-be read in for the first time (tag < 0) and entered into the respective
-object table, or if the object was seen already (index >= 0), in which
-case the index is used to look up the object in the respective table.
-
-Before exporting or importing, the type tables are populated with the
-predeclared types (int, string, error, unsafe.Pointer, etc.). This way
-they are automatically encoded with a known and fixed type index.
-
-2) Encoding format:
-
-The export data starts with two newline-terminated strings: a version
-string and either an empty string, or "debug", when emitting the debug
-format. These strings are followed by version-specific encoding options.
-
-(The Go1.7 version starts with a couple of bytes specifying the format.
-That format encoding is no longer used but is supported to avoid spurious
-errors when importing old installed package files.)
-
-This header is followed by the package object for the exported package,
-two lists of objects, and the list of inlined function bodies.
-
-The encoding of objects is straight-forward: Constants, variables, and
-functions start with their name, type, and possibly a value. Named types
-record their name and package so that they can be canonicalized: If the
-same type was imported before via another import, the importer must use
-the previously imported type pointer so that we have exactly one version
-(i.e., one pointer) for each named type (and read but discard the current
-type encoding). Unnamed types simply encode their respective fields.
-Aliases are encoded starting with their name followed by the qualified
-identifier denoting the original (aliased) object, which was exported
-earlier.
-
-In the encoding, some lists start with the list length. Some lists are
-terminated with an end marker (usually for lists where we may not know
-the length a priori).
-
-Integers use variable-length encoding for compact representation.
-
-Strings are canonicalized similar to objects that may occur multiple times:
-If the string was exported already, it is represented by its index only.
-Otherwise, the export data starts with the negative string length (negative,
-so we can distinguish from string index), followed by the string bytes.
-The empty string is mapped to index 0. (The initial format string is an
-exception; it is encoded as the string bytes followed by a newline).
-
-The exporter and importer are completely symmetric in implementation: For
-each encoding routine there is a matching and symmetric decoding routine.
-This symmetry makes it very easy to change or extend the format: If a new
-field needs to be encoded, a symmetric change can be made to exporter and
-importer.
-
-3) Making changes to the encoding format:
-
-Any change to the encoding format requires a respective change in the
-exporter below and a corresponding symmetric change to the importer in
-bimport.go.
-
-Furthermore, it requires a corresponding change to go/internal/gcimporter
-and golang.org/x/tools/go/gcimporter15. Changes to the latter must preserve
-compatibility with both the last release of the compiler, and with the
-corresponding compiler at tip. That change is necessarily more involved,
-as it must switch based on the version number in the export data file.
-
-It is recommended to turn on debugFormat temporarily when working on format
-changes as it will help finding encoding/decoding inconsistencies quickly.
-*/
-
package gc
import (
- "bufio"
- "bytes"
"cmd/compile/internal/types"
- "cmd/internal/src"
- "encoding/binary"
- "fmt"
- "math/big"
- "sort"
- "strings"
)
-// If debugFormat is set, each integer and string value is preceded by a marker
-// and position information in the encoding. This mechanism permits an importer
-// to recognize immediately when it is out of sync. The importer recognizes this
-// mode automatically (i.e., it can import export data produced with debugging
-// support even if debugFormat is not set at the time of import). This mode will
-// lead to massively larger export data (by a factor of 2 to 3) and should only
-// be enabled during development and debugging.
-//
-// NOTE: This flag is the first flag to enable if importing dies because of
-// (suspected) format errors, and whenever a change is made to the format.
-const debugFormat = false // default: false
-
-// Current export format version. Increase with each format change.
-// 6: package height (CL 105038)
-// 5: improved position encoding efficiency (issue 20080, CL 41619)
-// 4: type name objects support type aliases, uses aliasTag
-// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
-// 2: removed unused bool in ODCL export (compiler only)
-// 1: header format change (more regular), export package for _ struct fields
-// 0: Go1.7 encoding
-const exportVersion = 6
-
-// exportInlined enables the export of inlined function bodies and related
-// dependencies. The compiler should work w/o any loss of functionality with
-// the flag disabled, but the generated code will lose access to inlined
-// function bodies across packages, leading to performance bugs.
-// Leave for debugging.
-const exportInlined = true // default: true
-
-// trackAllTypes enables cycle tracking for all types, not just named
-// types. The existing compiler invariants assume that unnamed types
-// that are not completely set up are not used, or else there are spurious
-// errors.
-// If disabled, only named types are tracked, possibly leading to slightly
-// less efficient encoding in rare cases. It also prevents the export of
-// some corner-case type declarations (but those were not handled correctly
-// with the former textual export format either).
-// Note that when a type is only seen once, as many unnamed types are,
-// it is less efficient to track it, since we then also record an index for it.
-// See CLs 41622 and 41623 for some data and discussion.
-// TODO(gri) enable selectively and remove once issues caused by it are fixed
-const trackAllTypes = false
-
type exporter struct {
- out *bufio.Writer
-
- // object -> index maps, indexed in order of serialization
- strIndex map[string]int
- pathIndex map[string]int
- pkgIndex map[*types.Pkg]int
- typIndex map[*types.Type]int
- funcList []*Func
-
marked map[*types.Type]bool // types already seen by markType
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
-
- // debugging support
- written int // bytes written
- indent int // for p.trace
- trace bool
-}
-
-// export writes the exportlist for localpkg to out and returns the number of bytes written.
-func export(out *bufio.Writer, trace bool) int {
- p := exporter{
- out: out,
- strIndex: map[string]int{"": 0}, // empty string is mapped to 0
- pathIndex: map[string]int{"": 0}, // empty path is mapped to 0
- pkgIndex: make(map[*types.Pkg]int),
- typIndex: make(map[*types.Type]int),
- posInfoFormat: true,
- trace: trace,
- }
-
- // write version info
- // The version string must start with "version %d" where %d is the version
- // number. Additional debugging information may follow after a blank; that
- // text is ignored by the importer.
- p.rawStringln(fmt.Sprintf("version %d", exportVersion))
- var debug string
- if debugFormat {
- debug = "debug"
- }
- p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
- p.bool(trackAllTypes)
- p.bool(p.posInfoFormat)
-
- // --- generic export data ---
-
- // populate type map with predeclared "known" types
- predecl := predeclared()
- for index, typ := range predecl {
- p.typIndex[typ] = index
- }
- if len(p.typIndex) != len(predecl) {
- Fatalf("exporter: duplicate entries in type map?")
- }
-
- // write package data
- if localpkg.Path != "" {
- Fatalf("exporter: local package path not empty: %q", localpkg.Path)
- }
- p.pkg(localpkg)
- if p.trace {
- p.tracef("\n")
- }
-
- // export objects
- //
- // We've already added all exported (package-level) objects to
- // exportlist. These objects represent all information
- // required to import this package and type-check against it;
- // i.e., this is the platform-independent export data. The
- // format is generic in the sense that different compilers can
- // use the same representation.
- //
- // However, due to inlineable function and their dependencies,
- // we may need to export (or possibly reexport) additional
- // objects. We handle these objects separately. This data is
- // platform-specific as it depends on the inlining decisions
- // of the compiler and the representation of the inlined
- // function bodies.
-
- // Remember initial exportlist length.
- numglobals := len(exportlist)
-
- // Phase 0: Mark all inlineable functions that an importing
- // package could call. This is done by tracking down all
- // inlineable methods reachable from exported declarations.
- //
- // Along the way, we add to exportlist any function and
- // variable declarations needed by the inline bodies.
- if exportInlined {
- p.marked = make(map[*types.Type]bool)
- for _, n := range exportlist {
- sym := n.Sym
- p.markType(asNode(sym.Def).Type)
- }
- p.marked = nil
- }
-
- // Phase 1: Export package-level objects.
- objcount := 0
- for _, n := range exportlist[:numglobals] {
- sym := n.Sym
-
- // TODO(gri) Closures have dots in their names;
- // e.g., TestFloatZeroValue.func1 in math/big tests.
- if strings.Contains(sym.Name, ".") {
- Fatalf("exporter: unexpected symbol: %v", sym)
- }
-
- if sym.Def == nil {
- Fatalf("exporter: unknown export symbol: %v", sym)
- }
-
- // TODO(gri) Optimization: Probably worthwhile collecting
- // long runs of constants and export them "in bulk" (saving
- // tags and types, and making import faster).
-
- if p.trace {
- p.tracef("\n")
- }
- p.obj(sym)
- objcount++
- }
-
- // indicate end of list
- if p.trace {
- p.tracef("\n")
- }
- p.tag(endTag)
-
- // for self-verification only (redundant)
- p.int(objcount)
-
- // --- compiler-specific export data ---
-
- if p.trace {
- p.tracef("\n--- compiler-specific export data ---\n[ ")
- if p.indent != 0 {
- Fatalf("exporter: incorrect indentation")
- }
- }
-
- // write compiler-specific flags
- if p.trace {
- p.tracef("\n")
- }
-
- // Phase 2: Export objects added to exportlist during phase 0.
- objcount = 0
- for _, n := range exportlist[numglobals:] {
- sym := n.Sym
-
- // TODO(gri) The rest of this loop body is identical with
- // the loop body above. Leave alone for now since there
- // are different optimization opportunities, but factor
- // eventually.
-
- // TODO(gri) Closures have dots in their names;
- // e.g., TestFloatZeroValue.func1 in math/big tests.
- if strings.Contains(sym.Name, ".") {
- Fatalf("exporter: unexpected symbol: %v", sym)
- }
-
- if sym.Def == nil {
- Fatalf("exporter: unknown export symbol: %v", sym)
- }
-
- // TODO(gri) Optimization: Probably worthwhile collecting
- // long runs of constants and export them "in bulk" (saving
- // tags and types, and making import faster).
-
- if p.trace {
- p.tracef("\n")
- }
-
- if IsAlias(sym) {
- Fatalf("exporter: unexpected type alias %v in inlined function body", sym)
- }
-
- p.obj(sym)
- objcount++
- }
-
- // indicate end of list
- if p.trace {
- p.tracef("\n")
- }
- p.tag(endTag)
-
- // for self-verification only (redundant)
- p.int(objcount)
-
- // --- inlined function bodies ---
-
- if p.trace {
- p.tracef("\n--- inlined function bodies ---\n")
- if p.indent != 0 {
- Fatalf("exporter: incorrect indentation")
- }
- }
-
- // write inlineable function bodies
- // Don't use range since funcList may grow.
- objcount = 0
- for i := 0; i < len(p.funcList); i++ {
- if f := p.funcList[i]; f.ExportInline() {
- // function has inlineable body:
- // write index and body
- if p.trace {
- p.tracef("\n----\nfunc { %#v }\n", asNodes(f.Inl.Body))
- }
- p.int(i)
- p.int(int(f.Inl.Cost))
- p.stmtList(asNodes(f.Inl.Body))
- if p.trace {
- p.tracef("\n")
- }
- objcount++
- }
- }
-
- // indicate end of list
- if p.trace {
- p.tracef("\n")
- }
- p.int(-1) // invalid index terminates list
-
- // for self-verification only (redundant)
- p.int(objcount)
-
- if p.trace {
- p.tracef("\n--- end ---\n")
- }
-
- // --- end of export data ---
-
- return p.written
-}
-
-func (p *exporter) pkg(pkg *types.Pkg) {
- if pkg == nil {
- Fatalf("exporter: unexpected nil pkg")
- }
-
- // if we saw the package before, write its index (>= 0)
- if i, ok := p.pkgIndex[pkg]; ok {
- p.index('P', i)
- return
- }
-
- // otherwise, remember the package, write the package tag (< 0) and package data
- if p.trace {
- p.tracef("P%d = { ", len(p.pkgIndex))
- defer p.tracef("} ")
- }
- p.pkgIndex[pkg] = len(p.pkgIndex)
-
- p.tag(packageTag)
- p.string(pkg.Name)
- p.path(pkg.Path)
- p.int(pkg.Height)
-}
-
-func unidealType(typ *types.Type, val Val) *types.Type {
- // Untyped (ideal) constants get their own type. This decouples
- // the constant type from the encoding of the constant value.
- if typ == nil || typ.IsUntyped() {
- typ = untype(val.Ctype())
- }
- return typ
}
// markType recursively visits types reachable from t to identify
@@ -508,1287 +78,11 @@ func (p *exporter) markType(t *types.Type) {
}
}
-func (p *exporter) obj(sym *types.Sym) {
- // Exported objects may be from different packages because they
- // may be re-exported via an exported alias or as dependencies in
- // exported inlined function bodies. Thus, exported object names
- // must be fully qualified.
- //
- // (This can only happen for aliased objects or during phase 2
- // (exportInlined enabled) of object export. Unaliased Objects
- // exported in phase 1 (compiler-indendepent objects) are by
- // definition only the objects from the current package and not
- // pulled in via inlined function bodies. In that case the package
- // qualifier is not needed. Possible space optimization.)
-
- n := asNode(sym.Def)
- switch n.Op {
- case OLITERAL:
- // constant
- // TODO(gri) determine if we need the typecheck call here
- n = typecheck(n, Erv)
- if n == nil || n.Op != OLITERAL {
- Fatalf("exporter: dumpexportconst: oconst nil: %v", sym)
- }
-
- p.tag(constTag)
- p.pos(n.Pos)
- // TODO(gri) In inlined functions, constants are used directly
- // so they should never occur as re-exported objects. We may
- // not need the qualified name here. See also comment above.
- // Possible space optimization.
- p.qualifiedName(sym)
- p.typ(unidealType(n.Type, n.Val()))
- p.value(n.Val())
-
- case OTYPE:
- // named type
- t := n.Type
- if t.Etype == TFORW {
- Fatalf("exporter: export of incomplete type %v", sym)
- }
-
- if IsAlias(sym) {
- p.tag(aliasTag)
- p.pos(n.Pos)
- p.qualifiedName(sym)
- } else {
- p.tag(typeTag)
- }
- p.typ(t)
-
- case ONAME:
- // variable or function
- n = typecheck(n, Erv|Ecall)
- if n == nil || n.Type == nil {
- Fatalf("exporter: variable/function exported but not defined: %v", sym)
- }
-
- if n.Type.Etype == TFUNC && n.Class() == PFUNC {
- // function
- p.tag(funcTag)
- p.pos(n.Pos)
- p.qualifiedName(sym)
-
- sig := asNode(sym.Def).Type
-
- // Theoretically, we only need numbered
- // parameters if we're supplying an inline
- // function body. However, it's possible to
- // import a function from a package that
- // didn't supply the inline body, and then
- // another that did. In this case, we would
- // need to rename the parameters during
- // import, which is a little sketchy.
- //
- // For simplicity, just always number
- // parameters.
- p.paramList(sig.Params(), true)
- p.paramList(sig.Results(), true)
-
- p.funcList = append(p.funcList, asNode(sym.Def).Func)
- } else {
- // variable
- p.tag(varTag)
- p.pos(n.Pos)
- p.qualifiedName(sym)
- p.typ(asNode(sym.Def).Type)
- }
-
- default:
- Fatalf("exporter: unexpected export symbol: %v %v", n.Op, sym)
- }
-}
-
// deltaNewFile is a magic line delta offset indicating a new file.
// We use -64 because it is rare; see issue 20080 and CL 41619.
// -64 is the smallest int that fits in a single byte as a varint.
const deltaNewFile = -64
-func (p *exporter) pos(pos src.XPos) {
- if !p.posInfoFormat {
- return
- }
-
- file, line := fileLine(pos)
- if file == p.prevFile {
- // common case: write line delta
- // delta == deltaNewFile means different file
- // if the actual line delta is deltaNewFile,
- // follow up with a negative int to indicate that.
- // only non-negative ints can follow deltaNewFile
- // when writing a new file.
- delta := line - p.prevLine
- p.int(delta)
- if delta == deltaNewFile {
- p.int(-1) // -1 means no file change
- }
- } else {
- // different file
- p.int(deltaNewFile)
- p.int(line) // line >= 0
- p.path(file)
- p.prevFile = file
- }
- p.prevLine = line
-}
-
-func (p *exporter) path(s string) {
- if i, ok := p.pathIndex[s]; ok {
- // Note: Using p.index(i) here requires the use of p.tag(-len(c)) below
- // to get matching debug markers ('t'). But in trace mode p.tag
- // assumes that the tag argument is a valid tag that can be looked
- // up in the tagString list, rather then some arbitrary slice length.
- // Use p.int instead.
- p.int(i) // i >= 0
- return
- }
- p.pathIndex[s] = len(p.pathIndex)
- c := strings.Split(s, "/")
- p.int(-len(c)) // -len(c) < 0
- for _, x := range c {
- p.string(x)
- }
-}
-
-func fileLine(pos0 src.XPos) (file string, line int) {
- pos := Ctxt.PosTable.Pos(pos0)
- file = pos.Base().AbsFilename()
- line = int(pos.RelLine())
- return
-}
-
-func (p *exporter) typ(t *types.Type) {
- if t == nil {
- Fatalf("exporter: nil type")
- }
-
- // Possible optimization: Anonymous pointer types *T where
- // T is a named type are common. We could canonicalize all
- // such types *T to a single type PT = *T. This would lead
- // to at most one *T entry in typIndex, and all future *T's
- // would be encoded as the respective index directly. Would
- // save 1 byte (pointerTag) per *T and reduce the typIndex
- // size (at the cost of a canonicalization map). We can do
- // this later, without encoding format change.
-
- // if we saw the type before, write its index (>= 0)
- if i, ok := p.typIndex[t]; ok {
- p.index('T', i)
- return
- }
-
- // otherwise, remember the type, write the type tag (< 0) and type data
- if trackAllTypes {
- if p.trace {
- p.tracef("T%d = {>\n", len(p.typIndex))
- defer p.tracef("<\n} ")
- }
- p.typIndex[t] = len(p.typIndex)
- }
-
- // pick off named types
- if tsym := t.Sym; tsym != nil {
- if !trackAllTypes {
- // if we don't track all types, track named types now
- p.typIndex[t] = len(p.typIndex)
- }
-
- // Predeclared types should have been found in the type map.
- if t.Orig == t {
- Fatalf("exporter: predeclared type missing from type map?")
- }
-
- n := typenod(t)
- if n.Type != t {
- Fatalf("exporter: named type definition incorrectly set up")
- }
-
- p.tag(namedTag)
- p.pos(n.Pos)
- p.qualifiedName(tsym)
-
- // write underlying type
- p.typ(t.Orig)
-
- // interfaces don't have associated methods
- if t.Orig.IsInterface() {
- return
- }
-
- // sort methods for reproducible export format
- // TODO(gri) Determine if they are already sorted
- // in which case we can drop this step.
- var methods []*types.Field
- methods = append(methods, t.Methods().Slice()...)
- sort.Sort(methodbyname(methods))
- p.int(len(methods))
-
- if p.trace && len(methods) > 0 {
- p.tracef("associated methods {>")
- }
-
- for _, m := range methods {
- if p.trace {
- p.tracef("\n")
- }
- if strings.Contains(m.Sym.Name, ".") {
- Fatalf("invalid symbol name: %s (%v)", m.Sym.Name, m.Sym)
- }
-
- p.pos(m.Pos)
- p.fieldSym(m.Sym, false)
-
- sig := m.Type
- mfn := asNode(sig.FuncType().Nname)
-
- // See comment in (*exporter).obj about
- // numbered parameters.
- p.paramList(sig.Recvs(), true)
- p.paramList(sig.Params(), true)
- p.paramList(sig.Results(), true)
- p.bool(m.Nointerface()) // record go:nointerface pragma value (see also #16243)
-
- p.funcList = append(p.funcList, mfn.Func)
- }
-
- if p.trace && len(methods) > 0 {
- p.tracef("<\n} ")
- }
-
- return
- }
-
- // otherwise we have a type literal
- switch t.Etype {
- case TARRAY:
- if t.IsDDDArray() {
- Fatalf("array bounds should be known at export time: %v", t)
- }
- p.tag(arrayTag)
- p.int64(t.NumElem())
- p.typ(t.Elem())
-
- case TSLICE:
- p.tag(sliceTag)
- p.typ(t.Elem())
-
- case TDDDFIELD:
- // see p.param use of TDDDFIELD
- p.tag(dddTag)
- p.typ(t.DDDField())
-
- case TSTRUCT:
- p.tag(structTag)
- p.fieldList(t)
-
- case TPTR32, TPTR64: // could use Tptr but these are constants
- p.tag(pointerTag)
- p.typ(t.Elem())
-
- case TFUNC:
- p.tag(signatureTag)
- p.paramList(t.Params(), false)
- p.paramList(t.Results(), false)
-
- case TINTER:
- p.tag(interfaceTag)
- p.methodList(t)
-
- case TMAP:
- p.tag(mapTag)
- p.typ(t.Key())
- p.typ(t.Elem())
-
- case TCHAN:
- p.tag(chanTag)
- p.int(int(t.ChanDir()))
- p.typ(t.Elem())
-
- default:
- Fatalf("exporter: unexpected type: %v (Etype = %d)", t, t.Etype)
- }
-}
-
-func (p *exporter) qualifiedName(sym *types.Sym) {
- p.string(sym.Name)
- p.pkg(sym.Pkg)
-}
-
-func (p *exporter) fieldList(t *types.Type) {
- if p.trace && t.NumFields() > 0 {
- p.tracef("fields {>")
- defer p.tracef("<\n} ")
- }
-
- p.int(t.NumFields())
- for _, f := range t.Fields().Slice() {
- if p.trace {
- p.tracef("\n")
- }
- p.field(f)
- }
-}
-
-func (p *exporter) field(f *types.Field) {
- p.pos(f.Pos)
- p.fieldName(f)
- p.typ(f.Type)
- p.string(f.Note)
-}
-
-func (p *exporter) methodList(t *types.Type) {
- var embeddeds, methods []*types.Field
-
- for _, m := range t.Methods().Slice() {
- if m.Sym != nil {
- methods = append(methods, m)
- } else {
- embeddeds = append(embeddeds, m)
- }
- }
-
- if p.trace && len(embeddeds) > 0 {
- p.tracef("embeddeds {>")
- }
- p.int(len(embeddeds))
- for _, m := range embeddeds {
- if p.trace {
- p.tracef("\n")
- }
- p.pos(m.Pos)
- p.typ(m.Type)
- }
- if p.trace && len(embeddeds) > 0 {
- p.tracef("<\n} ")
- }
-
- if p.trace && len(methods) > 0 {
- p.tracef("methods {>")
- }
- p.int(len(methods))
- for _, m := range methods {
- if p.trace {
- p.tracef("\n")
- }
- p.method(m)
- }
- if p.trace && len(methods) > 0 {
- p.tracef("<\n} ")
- }
-}
-
-func (p *exporter) method(m *types.Field) {
- p.pos(m.Pos)
- p.methodName(m.Sym)
- p.paramList(m.Type.Params(), false)
- p.paramList(m.Type.Results(), false)
-}
-
-func (p *exporter) fieldName(t *types.Field) {
- name := t.Sym.Name
- if t.Embedded != 0 {
- // anonymous field - we distinguish between 3 cases:
- // 1) field name matches base type name and is exported
- // 2) field name matches base type name and is not exported
- // 3) field name doesn't match base type name (alias name)
- bname := basetypeName(t.Type)
- if name == bname {
- if types.IsExported(name) {
- name = "" // 1) we don't need to know the field name or package
- } else {
- name = "?" // 2) use unexported name "?" to force package export
- }
- } else {
- // 3) indicate alias and export name as is
- // (this requires an extra "@" but this is a rare case)
- p.string("@")
- }
- }
- p.string(name)
- if name != "" && !types.IsExported(name) {
- p.pkg(t.Sym.Pkg)
- }
-}
-
-// methodName is like qualifiedName but it doesn't record the package for exported names.
-func (p *exporter) methodName(sym *types.Sym) {
- p.string(sym.Name)
- if !types.IsExported(sym.Name) {
- p.pkg(sym.Pkg)
- }
-}
-
-func basetypeName(t *types.Type) string {
- s := t.Sym
- if s == nil && t.IsPtr() {
- s = t.Elem().Sym // deref
- }
- if s != nil {
- return s.Name
- }
- return "" // unnamed type
-}
-
-func (p *exporter) paramList(params *types.Type, numbered bool) {
- if !params.IsFuncArgStruct() {
- Fatalf("exporter: parameter list expected")
- }
-
- // use negative length to indicate unnamed parameters
- // (look at the first parameter only since either all
- // names are present or all are absent)
- //
- // TODO(gri) If we don't have an exported function
- // body, the parameter names are irrelevant for the
- // compiler (though they may be of use for other tools).
- // Possible space optimization.
- n := params.NumFields()
- if n > 0 && parName(params.Field(0), numbered) == "" {
- n = -n
- }
- p.int(n)
- for _, q := range params.Fields().Slice() {
- p.param(q, n, numbered)
- }
-}
-
-func (p *exporter) param(q *types.Field, n int, numbered bool) {
- t := q.Type
- if q.Isddd() {
- // create a fake type to encode ... just for the p.typ call
- t = types.NewDDDField(t.Elem())
- }
- p.typ(t)
- if n > 0 {
- name := parName(q, numbered)
- if name == "" {
- // Sometimes we see an empty name even for n > 0.
- // This appears to happen for interface methods
- // with _ (blank) parameter names. Make sure we
- // have a proper name and package so we don't crash
- // during import (see also issue #15470).
- // (parName uses "" instead of "?" as in fmt.go)
- // TODO(gri) review parameter name encoding
- name = "_"
- }
- p.string(name)
- if name != "_" {
- // Because of (re-)exported inlined functions
- // the importpkg may not be the package to which this
- // function (and thus its parameter) belongs. We need to
- // supply the parameter package here. We need the package
- // when the function is inlined so we can properly resolve
- // the name. The _ (blank) parameter cannot be accessed, so
- // we don't need to export a package.
- //
- // TODO(gri) This is compiler-specific. Try using importpkg
- // here and then update the symbols if we find an inlined
- // body only. Otherwise, the parameter name is ignored and
- // the package doesn't matter. This would remove an int
- // (likely 1 byte) for each named parameter.
- p.pkg(q.Sym.Pkg)
- }
- }
- // TODO(gri) This is compiler-specific (escape info).
- // Move into compiler-specific section eventually?
- // (Not having escape info causes tests to fail, e.g. runtime GCInfoTest)
- p.string(q.Note)
-}
-
-func parName(f *types.Field, numbered bool) string {
- s := origSym(f.Sym)
- if s == nil {
- return ""
- }
-
- if s.Name == "_" {
- return "_"
- }
-
- // print symbol with Vargen number or not as desired
- name := s.Name
- if strings.Contains(name, ".") {
- Fatalf("invalid symbol name: %s", name)
- }
-
- // Functions that can be inlined use numbered parameters so we can distinguish them
- // from other names in their context after inlining (i.e., the parameter numbering
- // is a form of parameter rewriting). See issue 4326 for an example and test case.
- if numbered {
- if n := asNode(f.Nname); !strings.Contains(name, "·") && n != nil && n.Name.Vargen > 0 {
- name = fmt.Sprintf("%s·%d", name, n.Name.Vargen) // append Vargen
- }
- } else {
- if i := strings.Index(name, "·"); i > 0 {
- name = name[:i] // cut off Vargen
- }
- }
- return name
-}
-
-func (p *exporter) value(x Val) {
- if p.trace {
- p.tracef("= ")
- }
-
- switch x := x.U.(type) {
- case bool:
- tag := falseTag
- if x {
- tag = trueTag
- }
- p.tag(tag)
-
- case *Mpint:
- if minintval[TINT64].Cmp(x) <= 0 && x.Cmp(maxintval[TINT64]) <= 0 {
- // common case: x fits into an int64 - use compact encoding
- p.tag(int64Tag)
- p.int64(x.Int64())
- return
- }
- // uncommon case: large x - use float encoding
- // (powers of 2 will be encoded efficiently with exponent)
- f := newMpflt()
- f.SetInt(x)
- p.tag(floatTag)
- p.float(f)
-
- case *Mpflt:
- p.tag(floatTag)
- p.float(x)
-
- case *Mpcplx:
- p.tag(complexTag)
- p.float(&x.Real)
- p.float(&x.Imag)
-
- case string:
- p.tag(stringTag)
- p.string(x)
-
- case *NilVal:
- // not a constant but used in exported function bodies
- p.tag(nilTag)
-
- default:
- Fatalf("exporter: unexpected value %v (%T)", x, x)
- }
-}
-
-func (p *exporter) float(x *Mpflt) {
- // extract sign (there is no -0)
- f := &x.Val
- sign := f.Sign()
- if sign == 0 {
- // x == 0
- p.int(0)
- return
- }
- // x != 0
-
- // extract exponent such that 0.5 <= m < 1.0
- var m big.Float
- exp := f.MantExp(&m)
-
- // extract mantissa as *big.Int
- // - set exponent large enough so mant satisfies mant.IsInt()
- // - get *big.Int from mant
- m.SetMantExp(&m, int(m.MinPrec()))
- mant, acc := m.Int(nil)
- if acc != big.Exact {
- Fatalf("exporter: internal error")
- }
-
- p.int(sign)
- p.int(exp)
- p.string(string(mant.Bytes()))
-}
-
-// ----------------------------------------------------------------------------
-// Inlined function bodies
-
-// Approach: More or less closely follow what fmt.go is doing for FExp mode
-// but instead of emitting the information textually, emit the node tree in
-// binary form.
-
-// TODO(gri) Improve tracing output. The current format is difficult to read.
-
-// stmtList may emit more (or fewer) than len(list) nodes.
-func (p *exporter) stmtList(list Nodes) {
- if p.trace {
- if list.Len() == 0 {
- p.tracef("{}")
- } else {
- p.tracef("{>")
- defer p.tracef("<\n}")
- }
- }
-
- for _, n := range list.Slice() {
- if p.trace {
- p.tracef("\n")
- }
- // TODO inlining produces expressions with ninits. we can't export these yet.
- // (from fmt.go:1461ff)
- p.node(n)
- }
-
- p.op(OEND)
-}
-
-func (p *exporter) node(n *Node) {
- if opprec[n.Op] < 0 {
- p.stmt(n)
- } else {
- p.expr(n)
- }
-}
-
-func (p *exporter) exprList(list Nodes) {
- if p.trace {
- if list.Len() == 0 {
- p.tracef("{}")
- } else {
- p.tracef("{>")
- defer p.tracef("<\n}")
- }
- }
-
- for _, n := range list.Slice() {
- if p.trace {
- p.tracef("\n")
- }
- p.expr(n)
- }
-
- p.op(OEND)
-}
-
-func (p *exporter) elemList(list Nodes) {
- if p.trace {
- p.tracef("[ ")
- }
- p.int(list.Len())
- if p.trace {
- if list.Len() == 0 {
- p.tracef("] {}")
- } else {
- p.tracef("] {>")
- defer p.tracef("<\n}")
- }
- }
-
- for _, n := range list.Slice() {
- if p.trace {
- p.tracef("\n")
- }
- p.fieldSym(n.Sym, false)
- p.expr(n.Left)
- }
-}
-
-func (p *exporter) expr(n *Node) {
- if p.trace {
- p.tracef("( ")
- defer p.tracef(") ")
- }
-
- // from nodefmt (fmt.go)
- //
- // nodefmt reverts nodes back to their original - we don't need to do
- // it because we are not bound to produce valid Go syntax when exporting
- //
- // if (fmtmode != FExp || n.Op != OLITERAL) && n.Orig != nil {
- // n = n.Orig
- // }
-
- // from exprfmt (fmt.go)
- for n != nil && n.Implicit() && (n.Op == OIND || n.Op == OADDR) {
- n = n.Left
- }
-
- switch op := n.Op; op {
- // expressions
- // (somewhat closely following the structure of exprfmt in fmt.go)
- case OPAREN:
- p.expr(n.Left) // unparen
-
- // case ODDDARG:
- // unimplemented - handled by default case
-
- case OLITERAL:
- if n.Val().Ctype() == CTNIL && n.Orig != nil && n.Orig != n {
- p.expr(n.Orig)
- break
- }
- p.op(OLITERAL)
- p.pos(n.Pos)
- p.typ(unidealType(n.Type, n.Val()))
- p.value(n.Val())
-
- case ONAME:
- // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method,
- // but for export, this should be rendered as (*pkg.T).meth.
- // These nodes have the special property that they are names with a left OTYPE and a right ONAME.
- if n.isMethodExpression() {
- p.op(OXDOT)
- p.pos(n.Pos)
- p.expr(n.Left) // n.Left.Op == OTYPE
- p.fieldSym(n.Right.Sym, true)
- break
- }
-
- p.op(ONAME)
- p.pos(n.Pos)
- p.sym(n)
-
- // case OPACK, ONONAME:
- // should have been resolved by typechecking - handled by default case
-
- case OTYPE:
- p.op(OTYPE)
- p.pos(n.Pos)
- p.typ(n.Type)
-
- // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
- // should have been resolved by typechecking - handled by default case
-
- // case OCLOSURE:
- // unimplemented - handled by default case
-
- // case OCOMPLIT:
- // should have been resolved by typechecking - handled by default case
-
- case OPTRLIT:
- p.op(OPTRLIT)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.bool(n.Implicit())
-
- case OSTRUCTLIT:
- p.op(OSTRUCTLIT)
- p.pos(n.Pos)
- p.typ(n.Type)
- p.elemList(n.List) // special handling of field names
-
- case OARRAYLIT, OSLICELIT, OMAPLIT:
- p.op(OCOMPLIT)
- p.pos(n.Pos)
- p.typ(n.Type)
- p.exprList(n.List)
-
- case OKEY:
- p.op(OKEY)
- p.pos(n.Pos)
- p.exprsOrNil(n.Left, n.Right)
-
- // case OSTRUCTKEY:
- // unreachable - handled in case OSTRUCTLIT by elemList
-
- // case OCALLPART:
- // unimplemented - handled by default case
-
- case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
- p.op(OXDOT)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.fieldSym(n.Sym, true)
-
- case ODOTTYPE, ODOTTYPE2:
- p.op(ODOTTYPE)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.typ(n.Type)
-
- case OINDEX, OINDEXMAP:
- p.op(OINDEX)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.expr(n.Right)
-
- case OSLICE, OSLICESTR, OSLICEARR:
- p.op(OSLICE)
- p.pos(n.Pos)
- p.expr(n.Left)
- low, high, _ := n.SliceBounds()
- p.exprsOrNil(low, high)
-
- case OSLICE3, OSLICE3ARR:
- p.op(OSLICE3)
- p.pos(n.Pos)
- p.expr(n.Left)
- low, high, max := n.SliceBounds()
- p.exprsOrNil(low, high)
- p.expr(max)
-
- case OCOPY, OCOMPLEX:
- // treated like other builtin calls (see e.g., OREAL)
- p.op(op)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.expr(n.Right)
- p.op(OEND)
-
- case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR:
- p.op(OCONV)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.typ(n.Type)
-
- case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
- p.op(op)
- p.pos(n.Pos)
- if n.Left != nil {
- p.expr(n.Left)
- p.op(OEND)
- } else {
- p.exprList(n.List) // emits terminating OEND
- }
- // only append() calls may contain '...' arguments
- if op == OAPPEND {
- p.bool(n.Isddd())
- } else if n.Isddd() {
- Fatalf("exporter: unexpected '...' with %v call", op)
- }
-
- case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
- p.op(OCALL)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.exprList(n.List)
- p.bool(n.Isddd())
-
- case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- p.op(op) // must keep separate from OMAKE for importer
- p.pos(n.Pos)
- p.typ(n.Type)
- switch {
- default:
- // empty list
- p.op(OEND)
- case n.List.Len() != 0: // pre-typecheck
- p.exprList(n.List) // emits terminating OEND
- case n.Right != nil:
- p.expr(n.Left)
- p.expr(n.Right)
- p.op(OEND)
- case n.Left != nil && (n.Op == OMAKESLICE || !n.Left.Type.IsUntyped()):
- p.expr(n.Left)
- p.op(OEND)
- }
-
- // unary expressions
- case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV:
- p.op(op)
- p.pos(n.Pos)
- p.expr(n.Left)
-
- // binary expressions
- case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
- OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
- p.op(op)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.expr(n.Right)
-
- case OADDSTR:
- p.op(OADDSTR)
- p.pos(n.Pos)
- p.exprList(n.List)
-
- case OCMPSTR, OCMPIFACE:
- p.op(n.SubOp())
- p.pos(n.Pos)
- p.expr(n.Left)
- p.expr(n.Right)
-
- case ODCLCONST:
- // if exporting, DCLCONST should just be removed as its usage
- // has already been replaced with literals
- // TODO(gri) these should not be exported in the first place
- // TODO(gri) why is this considered an expression in fmt.go?
- p.op(ODCLCONST)
- p.pos(n.Pos)
-
- default:
- Fatalf("cannot export %v (%d) node\n"+
- "==> please file an issue and assign to gri@\n", n.Op, int(n.Op))
- }
-}
-
-// Caution: stmt will emit more than one node for statement nodes n that have a non-empty
-// n.Ninit and where n cannot have a natural init section (such as in "if", "for", etc.).
-func (p *exporter) stmt(n *Node) {
- if p.trace {
- p.tracef("( ")
- defer p.tracef(") ")
- }
-
- if n.Ninit.Len() > 0 && !stmtwithinit(n.Op) {
- if p.trace {
- p.tracef("( /* Ninits */ ")
- }
-
- // can't use stmtList here since we don't want the final OEND
- for _, n := range n.Ninit.Slice() {
- p.stmt(n)
- }
-
- if p.trace {
- p.tracef(") ")
- }
- }
-
- switch op := n.Op; op {
- case ODCL:
- p.op(ODCL)
- p.pos(n.Left.Pos) // use declared variable's pos
- p.sym(n.Left)
- p.typ(n.Left.Type)
-
- // case ODCLFIELD:
- // unimplemented - handled by default case
-
- case OAS:
- // Don't export "v = <N>" initializing statements, hope they're always
- // preceded by the DCL which will be re-parsed and typecheck to reproduce
- // the "v = <N>" again.
- if n.Right != nil {
- p.op(OAS)
- p.pos(n.Pos)
- p.expr(n.Left)
- p.expr(n.Right)
- }
-
- case OASOP:
- p.op(OASOP)
- p.pos(n.Pos)
- p.op(n.SubOp())
- p.expr(n.Left)
- if p.bool(!n.Implicit()) {
- p.expr(n.Right)
- }
-
- case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- p.op(OAS2)
- p.pos(n.Pos)
- p.exprList(n.List)
- p.exprList(n.Rlist)
-
- case ORETURN:
- p.op(ORETURN)
- p.pos(n.Pos)
- p.exprList(n.List)
-
- // case ORETJMP:
- // unreachable - generated by compiler for trampolin routines
-
- case OPROC, ODEFER:
- p.op(op)
- p.pos(n.Pos)
- p.expr(n.Left)
-
- case OIF:
- p.op(OIF)
- p.pos(n.Pos)
- p.stmtList(n.Ninit)
- p.expr(n.Left)
- p.stmtList(n.Nbody)
- p.stmtList(n.Rlist)
-
- case OFOR:
- p.op(OFOR)
- p.pos(n.Pos)
- p.stmtList(n.Ninit)
- p.exprsOrNil(n.Left, n.Right)
- p.stmtList(n.Nbody)
-
- case ORANGE:
- p.op(ORANGE)
- p.pos(n.Pos)
- p.stmtList(n.List)
- p.expr(n.Right)
- p.stmtList(n.Nbody)
-
- case OSELECT, OSWITCH:
- p.op(op)
- p.pos(n.Pos)
- p.stmtList(n.Ninit)
- p.exprsOrNil(n.Left, nil)
- p.stmtList(n.List)
-
- case OCASE, OXCASE:
- p.op(OXCASE)
- p.pos(n.Pos)
- p.stmtList(n.List)
- p.stmtList(n.Nbody)
-
- case OFALL:
- p.op(OFALL)
- p.pos(n.Pos)
-
- case OBREAK, OCONTINUE:
- p.op(op)
- p.pos(n.Pos)
- p.exprsOrNil(n.Left, nil)
-
- case OEMPTY:
- // nothing to emit
-
- case OGOTO, OLABEL:
- p.op(op)
- p.pos(n.Pos)
- p.expr(n.Left)
-
- default:
- Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op)
- }
-}
-
-func (p *exporter) exprsOrNil(a, b *Node) {
- ab := 0
- if a != nil {
- ab |= 1
- }
- if b != nil {
- ab |= 2
- }
- p.int(ab)
- if ab&1 != 0 {
- p.expr(a)
- }
- if ab&2 != 0 {
- p.node(b)
- }
-}
-
-func (p *exporter) fieldSym(s *types.Sym, short bool) {
- name := s.Name
-
- // remove leading "type." in method names ("(T).m" -> "m")
- if short {
- if i := strings.LastIndex(name, "."); i >= 0 {
- name = name[i+1:]
- }
- }
-
- // we should never see a _ (blank) here - these are accessible ("read") fields
- // TODO(gri) can we assert this with an explicit check?
- p.string(name)
- if !types.IsExported(name) {
- p.pkg(s.Pkg)
- }
-}
-
-// sym must encode the _ (blank) identifier as a single string "_" since
-// encoding for some nodes is based on this assumption (e.g. ONAME nodes).
-func (p *exporter) sym(n *Node) {
- s := n.Sym
- if s.Pkg != nil {
- if len(s.Name) > 0 && s.Name[0] == '.' {
- Fatalf("exporter: exporting synthetic symbol %s", s.Name)
- }
- }
-
- if p.trace {
- p.tracef("{ SYM ")
- defer p.tracef("} ")
- }
-
- name := s.Name
-
- // remove leading "type." in method names ("(T).m" -> "m")
- if i := strings.LastIndex(name, "."); i >= 0 {
- name = name[i+1:]
- }
-
- if strings.Contains(name, "·") && n.Name.Vargen > 0 {
- Fatalf("exporter: unexpected · in symbol name")
- }
-
- if i := n.Name.Vargen; i > 0 {
- name = fmt.Sprintf("%s·%d", name, i)
- }
-
- p.string(name)
- if name != "_" {
- p.pkg(s.Pkg)
- }
- // Fixes issue #18167.
- p.string(s.Linkname)
-}
-
-func (p *exporter) bool(b bool) bool {
- if p.trace {
- p.tracef("[")
- defer p.tracef("= %v] ", b)
- }
-
- x := 0
- if b {
- x = 1
- }
- p.int(x)
- return b
-}
-
-func (p *exporter) op(op Op) {
- if p.trace {
- p.tracef("[")
- defer p.tracef("= %v] ", op)
- }
-
- p.int(int(op))
-}
-
-// ----------------------------------------------------------------------------
-// Low-level encoders
-
-func (p *exporter) index(marker byte, index int) {
- if index < 0 {
- Fatalf("exporter: invalid index < 0")
- }
- if debugFormat {
- p.marker('t')
- }
- if p.trace {
- p.tracef("%c%d ", marker, index)
- }
- p.rawInt64(int64(index))
-}
-
-func (p *exporter) tag(tag int) {
- if tag >= 0 {
- Fatalf("exporter: invalid tag >= 0")
- }
- if debugFormat {
- p.marker('t')
- }
- if p.trace {
- p.tracef("%s ", tagString[-tag])
- }
- p.rawInt64(int64(tag))
-}
-
-func (p *exporter) int(x int) {
- p.int64(int64(x))
-}
-
-func (p *exporter) int64(x int64) {
- if debugFormat {
- p.marker('i')
- }
- if p.trace {
- p.tracef("%d ", x)
- }
- p.rawInt64(x)
-}
-
-func (p *exporter) string(s string) {
- if debugFormat {
- p.marker('s')
- }
- if p.trace {
- p.tracef("%q ", s)
- }
- // if we saw the string before, write its index (>= 0)
- // (the empty string is mapped to 0)
- if i, ok := p.strIndex[s]; ok {
- p.rawInt64(int64(i))
- return
- }
- // otherwise, remember string and write its negative length and bytes
- p.strIndex[s] = len(p.strIndex)
- p.rawInt64(-int64(len(s)))
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
-}
-
-// marker emits a marker byte and position information which makes
-// it easy for a reader to detect if it is "out of sync". Used only
-// if debugFormat is set.
-func (p *exporter) marker(m byte) {
- p.rawByte(m)
- // Uncomment this for help tracking down the location
- // of an incorrect marker when running in debugFormat.
- // if p.trace {
- // p.tracef("#%d ", p.written)
- // }
- p.rawInt64(int64(p.written))
-}
-
-// rawInt64 should only be used by low-level encoders.
-func (p *exporter) rawInt64(x int64) {
- var tmp [binary.MaxVarintLen64]byte
- n := binary.PutVarint(tmp[:], x)
- for i := 0; i < n; i++ {
- p.rawByte(tmp[i])
- }
-}
-
-// rawStringln should only be used to emit the initial version string.
-func (p *exporter) rawStringln(s string) {
- for i := 0; i < len(s); i++ {
- p.rawByte(s[i])
- }
- p.rawByte('\n')
-}
-
-// rawByte is the bottleneck interface to write to p.out.
-// rawByte escapes b as follows (any encoding does that
-// hides '$'):
-//
-// '$' => '|' 'S'
-// '|' => '|' '|'
-//
-// Necessary so other tools can find the end of the
-// export data by searching for "$$".
-// rawByte should only be used by low-level encoders.
-func (p *exporter) rawByte(b byte) {
- switch b {
- case '$':
- // write '$' as '|' 'S'
- b = 'S'
- fallthrough
- case '|':
- // write '|' as '|' '|'
- p.out.WriteByte('|')
- p.written++
- }
- p.out.WriteByte(b)
- p.written++
-}
-
-// tracef is like fmt.Printf but it rewrites the format string
-// to take care of indentation.
-func (p *exporter) tracef(format string, args ...interface{}) {
- if strings.ContainsAny(format, "<>\n") {
- var buf bytes.Buffer
- for i := 0; i < len(format); i++ {
- // no need to deal with runes
- ch := format[i]
- switch ch {
- case '>':
- p.indent++
- continue
- case '<':
- p.indent--
- continue
- }
- buf.WriteByte(ch)
- if ch == '\n' {
- for j := p.indent; j > 0; j-- {
- buf.WriteString(". ")
- }
- }
- }
- format = buf.String()
- }
- fmt.Printf(format, args...)
-}
-
// ----------------------------------------------------------------------------
// Export format
@@ -1829,44 +123,6 @@ const (
aliasTag
)
-// Debugging support.
-// (tagString is only used when tracing is enabled)
-var tagString = [...]string{
- // Objects
- -packageTag: "package",
- -constTag: "const",
- -typeTag: "type",
- -varTag: "var",
- -funcTag: "func",
- -endTag: "end",
-
- // Types
- -namedTag: "named type",
- -arrayTag: "array",
- -sliceTag: "slice",
- -dddTag: "ddd",
- -structTag: "struct",
- -pointerTag: "pointer",
- -signatureTag: "signature",
- -interfaceTag: "interface",
- -mapTag: "map",
- -chanTag: "chan",
-
- // Values
- -falseTag: "false",
- -trueTag: "true",
- -int64Tag: "int64",
- -floatTag: "float",
- -fractionTag: "fraction",
- -complexTag: "complex",
- -stringTag: "string",
- -nilTag: "nil",
- -unknownTag: "unknown",
-
- // Type aliases
- -aliasTag: "alias",
-}
-
// untype returns the "pseudo" untyped type for a Ctype (import/export use only).
// (we can't use an pre-initialized array because we must be sure all types are
// set up)
diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go
index c19f548e18..7aabae764e 100644
--- a/src/cmd/compile/internal/gc/bimport.go
+++ b/src/cmd/compile/internal/gc/bimport.go
@@ -2,340 +2,19 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Binary package import.
-// See bexport.go for the export data format and how
-// to make a format change.
-
package gc
import (
- "bufio"
"cmd/compile/internal/types"
"cmd/internal/src"
- "encoding/binary"
- "fmt"
- "math/big"
- "strconv"
- "strings"
)
-// The overall structure of Import is symmetric to Export: For each
-// export method in bexport.go there is a matching and symmetric method
-// in bimport.go. Changing the export format requires making symmetric
-// changes to bimport.go and bexport.go.
-
-type importer struct {
- in *bufio.Reader
- imp *types.Pkg // imported package
- buf []byte // reused for reading strings
- version int // export format version
-
- // object lists, in order of deserialization
- strList []string
- pathList []string
- pkgList []*types.Pkg
- typList []*types.Type
- funcList []*Node // nil entry means already declared
- trackAllTypes bool
-
- // for delayed type verification
- cmpList []struct{ pt, t *types.Type }
-
- // position encoding
- posInfoFormat bool
- prevFile string
- prevLine int
- posBase *src.PosBase
-
- // debugging support
- debugFormat bool
- read int // bytes read
-}
-
-// Import populates imp from the serialized package data read from in.
-func Import(imp *types.Pkg, in *bufio.Reader) {
- inimport = true
- defer func() { inimport = false }()
-
- p := importer{
- in: in,
- imp: imp,
- version: -1, // unknown version
- strList: []string{""}, // empty string is mapped to 0
- pathList: []string{""}, // empty path is mapped to 0
- }
-
- // read version info
- var versionstr string
- if b := p.rawByte(); b == 'c' || b == 'd' {
- // Go1.7 encoding; first byte encodes low-level
- // encoding format (compact vs debug).
- // For backward-compatibility only (avoid problems with
- // old installed packages). Newly compiled packages use
- // the extensible format string.
- // TODO(gri) Remove this support eventually; after Go1.8.
- if b == 'd' {
- p.debugFormat = true
- }
- p.trackAllTypes = p.rawByte() == 'a'
- p.posInfoFormat = p.bool()
- versionstr = p.string()
- if versionstr == "v1" {
- p.version = 0
- }
- } else {
- // Go1.8 extensible encoding
- // read version string and extract version number (ignore anything after the version number)
- versionstr = p.rawStringln(b)
- if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
- if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
- p.version = v
- }
- }
- }
-
- // read version specific flags - extend as necessary
- switch p.version {
- // case 7:
- // ...
- // fallthrough
- case 6, 5, 4, 3, 2, 1:
- p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
- p.trackAllTypes = p.bool()
- p.posInfoFormat = p.bool()
- case 0:
- // Go1.7 encoding format - nothing to do here
- default:
- p.formatErrorf("unknown export format version %d (%q)", p.version, versionstr)
- }
-
- // --- generic export data ---
-
- // populate typList with predeclared "known" types
- p.typList = append(p.typList, predeclared()...)
-
- // read package data
- p.pkg()
-
- // defer some type-checking until all types are read in completely
- tcok := typecheckok
- typecheckok = true
- defercheckwidth()
-
- // read objects
-
- // phase 1
- objcount := 0
- for {
- tag := p.tagOrIndex()
- if tag == endTag {
- break
- }
- p.obj(tag)
- objcount++
- }
-
- // self-verification
- if count := p.int(); count != objcount {
- p.formatErrorf("got %d objects; want %d", objcount, count)
- }
-
- // --- compiler-specific export data ---
-
- // read compiler-specific flags
-
- // phase 2
- objcount = 0
- for {
- tag := p.tagOrIndex()
- if tag == endTag {
- break
- }
- p.obj(tag)
- objcount++
- }
-
- // self-verification
- if count := p.int(); count != objcount {
- p.formatErrorf("got %d objects; want %d", objcount, count)
- }
-
- // read inlineable functions bodies
- if dclcontext != PEXTERN {
- p.formatErrorf("unexpected context %d", dclcontext)
- }
-
- objcount = 0
- for i0 := -1; ; {
- i := p.int() // index of function with inlineable body
- if i < 0 {
- break
- }
-
- // don't process the same function twice
- if i <= i0 {
- p.formatErrorf("index not increasing: %d <= %d", i, i0)
- }
- i0 = i
-
- if Curfn != nil {
- p.formatErrorf("unexpected Curfn %v", Curfn)
- }
-
- // Note: In the original code, funchdr and funcbody are called for
- // all functions (that were not yet imported). Now, we are calling
- // them only for functions with inlineable bodies. funchdr does
- // parameter renaming which doesn't matter if we don't have a body.
-
- inlCost := p.int()
- if f := p.funcList[i]; f != nil && f.Func.Inl == nil {
- // function not yet imported - read body and set it
- funchdr(f)
- body := p.stmtList()
- funcbody()
- f.Func.Inl = &Inline{
- Cost: int32(inlCost),
- Body: body,
- }
- importlist = append(importlist, f)
- if Debug['E'] > 0 && Debug['m'] > 2 {
- if Debug['m'] > 3 {
- fmt.Printf("inl body for %v: %+v\n", f, asNodes(body))
- } else {
- fmt.Printf("inl body for %v: %v\n", f, asNodes(body))
- }
- }
- } else {
- // function already imported - read body but discard declarations
- dclcontext = PDISCARD // throw away any declarations
- p.stmtList()
- dclcontext = PEXTERN
- }
-
- objcount++
- }
-
- // self-verification
- if count := p.int(); count != objcount {
- p.formatErrorf("got %d functions; want %d", objcount, count)
- }
-
- if dclcontext != PEXTERN {
- p.formatErrorf("unexpected context %d", dclcontext)
- }
-
- p.verifyTypes()
-
- // --- end of export data ---
-
- typecheckok = tcok
- resumecheckwidth()
-
- if debug_dclstack != 0 {
- testdclstack()
- }
-}
-
-func (p *importer) formatErrorf(format string, args ...interface{}) {
- if debugFormat {
- Fatalf(format, args...)
- }
-
- yyerror("cannot import %q due to version skew - reinstall package (%s)",
- p.imp.Path, fmt.Sprintf(format, args...))
- errorexit()
-}
-
-func (p *importer) verifyTypes() {
- for _, pair := range p.cmpList {
- pt := pair.pt
- t := pair.t
- if !eqtype(pt.Orig, t) {
- p.formatErrorf("inconsistent definition for type %v during import\n\t%L (in %q)\n\t%L (in %q)", pt.Sym, pt, pt.Sym.Importdef.Path, t, p.imp.Path)
- }
- }
-}
-
// numImport tracks how often a package with a given name is imported.
// It is used to provide a better error message (by using the package
// path to disambiguate) if a package that appears multiple times with
// the same name appears in an error message.
var numImport = make(map[string]int)
-func (p *importer) pkg() *types.Pkg {
- // if the package was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.pkgList[i]
- }
-
- // otherwise, i is the package tag (< 0)
- if i != packageTag {
- p.formatErrorf("expected package tag, found tag = %d", i)
- }
-
- // read package data
- name := p.string()
- var path string
- if p.version >= 5 {
- path = p.path()
- } else {
- path = p.string()
- }
- var height int
- if p.version >= 6 {
- height = p.int()
- }
-
- // we should never see an empty package name
- if name == "" {
- p.formatErrorf("empty package name for path %q", path)
- }
-
- // we should never see a bad import path
- if isbadimport(path, true) {
- p.formatErrorf("bad package path %q for package %s", path, name)
- }
-
- // an empty path denotes the package we are currently importing;
- // it must be the first package we see
- if (path == "") != (len(p.pkgList) == 0) {
- p.formatErrorf("package path %q for pkg index %d", path, len(p.pkgList))
- }
-
- if p.version >= 6 {
- if height < 0 || height >= types.MaxPkgHeight {
- p.formatErrorf("bad package height %v for package %s", height, name)
- }
-
- // reexported packages should always have a lower height than
- // the main package
- if len(p.pkgList) != 0 && height >= p.imp.Height {
- p.formatErrorf("package %q (height %d) reexports package %q (height %d)", p.imp.Path, p.imp.Height, path, height)
- }
- }
-
- // add package to pkgList
- pkg := p.imp
- if path != "" {
- pkg = types.NewPkg(path, "")
- }
- if pkg.Name == "" {
- pkg.Name = name
- numImport[name]++
- } else if pkg.Name != name {
- yyerror("conflicting package names %s and %s for path %q", pkg.Name, name, path)
- }
- if myimportpath != "" && path == myimportpath {
- yyerror("import %q: package depends on %q (import cycle)", p.imp.Path, path)
- errorexit()
- }
- pkg.Height = height
- p.pkgList = append(p.pkgList, pkg)
-
- return pkg
-}
-
func idealType(typ *types.Type) *types.Type {
switch typ {
case types.Idealint, types.Idealrune, types.Idealfloat, types.Idealcomplex:
@@ -345,1013 +24,11 @@ func idealType(typ *types.Type) *types.Type {
return typ
}
-func (p *importer) obj(tag int) {
- switch tag {
- case constTag:
- pos := p.pos()
- sym := p.qualifiedName()
- typ := p.typ()
- val := p.value(typ)
- importconst(p.imp, pos, sym, idealType(typ), val)
-
- case aliasTag:
- pos := p.pos()
- sym := p.qualifiedName()
- typ := p.typ()
- importalias(p.imp, pos, sym, typ)
-
- case typeTag:
- p.typ()
-
- case varTag:
- pos := p.pos()
- sym := p.qualifiedName()
- typ := p.typ()
- importvar(p.imp, pos, sym, typ)
-
- case funcTag:
- pos := p.pos()
- sym := p.qualifiedName()
- params := p.paramList()
- result := p.paramList()
-
- sig := functypefield(nil, params, result)
- importfunc(p.imp, pos, sym, sig)
- p.funcList = append(p.funcList, asNode(sym.Def))
-
- default:
- p.formatErrorf("unexpected object (tag = %d)", tag)
- }
-}
-
-func (p *importer) pos() src.XPos {
- if !p.posInfoFormat {
- return src.NoXPos
- }
-
- file := p.prevFile
- line := p.prevLine
- delta := p.int()
- line += delta
- if p.version >= 5 {
- if delta == deltaNewFile {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.path()
- line = n
- }
- }
- } else {
- if delta == 0 {
- if n := p.int(); n >= 0 {
- // file changed
- file = p.prevFile[:n] + p.string()
- line = p.int()
- }
- }
- }
- if file != p.prevFile {
- p.prevFile = file
- p.posBase = src.NewFileBase(file, file)
- }
- p.prevLine = line
-
- pos := src.MakePos(p.posBase, uint(line), 0)
- xpos := Ctxt.PosTable.XPos(pos)
- return xpos
-}
-
-func (p *importer) path() string {
- // if the path was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.int()
- if i >= 0 {
- return p.pathList[i]
- }
- // otherwise, i is the negative path length (< 0)
- a := make([]string, -i)
- for n := range a {
- a[n] = p.string()
- }
- s := strings.Join(a, "/")
- p.pathList = append(p.pathList, s)
- return s
-}
-
-func (p *importer) newtyp(etype types.EType) *types.Type {
- t := types.New(etype)
- if p.trackAllTypes {
- p.typList = append(p.typList, t)
- }
- return t
-}
-
-// importtype declares that pt, an imported named type, has underlying type t.
-func (p *importer) importtype(pt, t *types.Type) {
- if pt.Etype == TFORW {
- copytype(typenod(pt), t)
- checkwidth(pt)
- } else {
- // pt.Orig and t must be identical.
- if p.trackAllTypes {
- // If we track all types, t may not be fully set up yet.
- // Collect the types and verify identity later.
- p.cmpList = append(p.cmpList, struct{ pt, t *types.Type }{pt, t})
- } else if !eqtype(pt.Orig, t) {
- yyerror("inconsistent definition for type %v during import\n\t%L (in %q)\n\t%L (in %q)", pt.Sym, pt, pt.Sym.Importdef.Path, t, p.imp.Path)
- }
- }
-
- if Debug['E'] != 0 {
- fmt.Printf("import type %v %L\n", pt, t)
- }
-}
-
-func (p *importer) typ() *types.Type {
- // if the type was seen before, i is its index (>= 0)
- i := p.tagOrIndex()
- if i >= 0 {
- return p.typList[i]
- }
-
- // otherwise, i is the type tag (< 0)
- var t *types.Type
- switch i {
- case namedTag:
- pos := p.pos()
- tsym := p.qualifiedName()
-
- t = importtype(p.imp, pos, tsym)
- p.typList = append(p.typList, t)
- dup := !t.IsKind(types.TFORW) // type already imported
-
- // read underlying type
- t0 := p.typ()
- p.importtype(t, t0)
-
- // interfaces don't have associated methods
- if t0.IsInterface() {
- break
- }
-
- // set correct import context (since p.typ() may be called
- // while importing the body of an inlined function)
- savedContext := dclcontext
- dclcontext = PEXTERN
-
- // read associated methods
- for i := p.int(); i > 0; i-- {
- mpos := p.pos()
- sym := p.fieldSym()
-
- // during import unexported method names should be in the type's package
- if !types.IsExported(sym.Name) && sym.Pkg != tsym.Pkg {
- Fatalf("imported method name %+v in wrong package %s\n", sym, tsym.Pkg.Name)
- }
-
- recv := p.paramList() // TODO(gri) do we need a full param list for the receiver?
- params := p.paramList()
- result := p.paramList()
- nointerface := p.bool()
-
- mt := functypefield(recv[0], params, result)
- oldm := addmethod(sym, mt, false, nointerface)
-
- if dup {
- // An earlier import already declared this type and its methods.
- // Discard the duplicate method declaration.
- n := asNode(oldm.Type.Nname())
- p.funcList = append(p.funcList, n)
- continue
- }
-
- n := newfuncnamel(mpos, methodSym(recv[0].Type, sym))
- n.Type = mt
- n.SetClass(PFUNC)
- checkwidth(n.Type)
- p.funcList = append(p.funcList, n)
-
- // (comment from parser.go)
- // inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
- // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
- // out by typecheck's lookdot as this $$.ttype. So by providing
- // this back link here we avoid special casing there.
- mt.SetNname(asTypesNode(n))
-
- if Debug['E'] > 0 {
- fmt.Printf("import [%q] meth %v \n", p.imp.Path, n)
- }
- }
-
- dclcontext = savedContext
-
- case arrayTag:
- t = p.newtyp(TARRAY)
- bound := p.int64()
- elem := p.typ()
- t.Extra = &types.Array{Elem: elem, Bound: bound}
-
- case sliceTag:
- t = p.newtyp(TSLICE)
- elem := p.typ()
- t.Extra = types.Slice{Elem: elem}
-
- case dddTag:
- t = p.newtyp(TDDDFIELD)
- t.Extra = types.DDDField{T: p.typ()}
-
- case structTag:
- t = p.newtyp(TSTRUCT)
- t.SetFields(p.fieldList())
- checkwidth(t)
-
- case pointerTag:
- t = p.newtyp(types.Tptr)
- t.Extra = types.Ptr{Elem: p.typ()}
-
- case signatureTag:
- t = p.newtyp(TFUNC)
- params := p.paramList()
- result := p.paramList()
- functypefield0(t, nil, params, result)
-
- case interfaceTag:
- if ml := p.methodList(); len(ml) == 0 {
- t = types.Types[TINTER]
- } else {
- t = p.newtyp(TINTER)
- t.SetInterface(ml)
- }
-
- case mapTag:
- t = p.newtyp(TMAP)
- mt := t.MapType()
- mt.Key = p.typ()
- mt.Elem = p.typ()
-
- case chanTag:
- t = p.newtyp(TCHAN)
- ct := t.ChanType()
- ct.Dir = types.ChanDir(p.int())
- ct.Elem = p.typ()
-
- default:
- p.formatErrorf("unexpected type (tag = %d)", i)
- }
-
- if t == nil {
- p.formatErrorf("nil type (type tag = %d)", i)
- }
-
- return t
-}
-
-func (p *importer) qualifiedName() *types.Sym {
- name := p.string()
- pkg := p.pkg()
- return pkg.Lookup(name)
-}
-
-func (p *importer) fieldList() (fields []*types.Field) {
- if n := p.int(); n > 0 {
- fields = make([]*types.Field, n)
- for i := range fields {
- fields[i] = p.field()
- }
- }
- return
-}
-
-func (p *importer) field() *types.Field {
- pos := p.pos()
- sym, alias := p.fieldName()
- typ := p.typ()
- note := p.string()
-
- f := types.NewField()
- if sym.Name == "" {
- // anonymous field: typ must be T or *T and T must be a type name
- s := typ.Sym
- if s == nil && typ.IsPtr() {
- s = typ.Elem().Sym // deref
- }
- sym = sym.Pkg.Lookup(s.Name)
- f.Embedded = 1
- } else if alias {
- // anonymous field: we have an explicit name because it's a type alias
- f.Embedded = 1
- }
-
- f.Pos = pos
- f.Sym = sym
- f.Type = typ
- f.Note = note
-
- return f
-}
-
-func (p *importer) methodList() (methods []*types.Field) {
- for n := p.int(); n > 0; n-- {
- f := types.NewField()
- f.Pos = p.pos()
- f.Type = p.typ()
- methods = append(methods, f)
- }
-
- for n := p.int(); n > 0; n-- {
- methods = append(methods, p.method())
- }
-
- return
-}
-
-func (p *importer) method() *types.Field {
- pos := p.pos()
- sym := p.methodName()
- params := p.paramList()
- result := p.paramList()
-
- f := types.NewField()
- f.Pos = pos
- f.Sym = sym
- f.Type = functypefield(fakeRecvField(), params, result)
- return f
-}
-
-func (p *importer) fieldName() (*types.Sym, bool) {
- name := p.string()
- if p.version == 0 && name == "_" {
- // version 0 didn't export a package for _ field names
- // but used the builtin package instead
- return builtinpkg.Lookup(name), false
- }
- pkg := localpkg
- alias := false
- switch name {
- case "":
- // 1) field name matches base type name and is exported: nothing to do
- case "?":
- // 2) field name matches base type name and is not exported: need package
- name = ""
- pkg = p.pkg()
- case "@":
- // 3) field name doesn't match base type name (alias name): need name and possibly package
- name = p.string()
- alias = true
- fallthrough
- default:
- if !types.IsExported(name) {
- pkg = p.pkg()
- }
- }
- return pkg.Lookup(name), alias
-}
-
-func (p *importer) methodName() *types.Sym {
- name := p.string()
- if p.version == 0 && name == "_" {
- // version 0 didn't export a package for _ method names
- // but used the builtin package instead
- return builtinpkg.Lookup(name)
- }
- pkg := localpkg
- if !types.IsExported(name) {
- pkg = p.pkg()
- }
- return pkg.Lookup(name)
-}
-
-func (p *importer) paramList() []*types.Field {
- i := p.int()
- if i == 0 {
- return nil
- }
- // negative length indicates unnamed parameters
- named := true
- if i < 0 {
- i = -i
- named = false
- }
- // i > 0
- fs := make([]*types.Field, i)
- for i := range fs {
- fs[i] = p.param(named)
- }
- return fs
-}
-
-func (p *importer) param(named bool) *types.Field {
- f := types.NewField()
- // TODO(mdempsky): Need param position.
- f.Pos = lineno
- f.Type = p.typ()
- if f.Type.Etype == TDDDFIELD {
- // TDDDFIELD indicates wrapped ... slice type
- f.Type = types.NewSlice(f.Type.DDDField())
- f.SetIsddd(true)
- }
-
- if named {
- name := p.string()
- if name == "" {
- p.formatErrorf("expected named parameter")
- }
- // TODO(gri) Supply function/method package rather than
- // encoding the package for each parameter repeatedly.
- pkg := localpkg
- if name != "_" {
- pkg = p.pkg()
- }
- f.Sym = pkg.Lookup(name)
- }
-
- // TODO(gri) This is compiler-specific (escape info).
- // Move into compiler-specific section eventually?
- f.Note = p.string()
-
- return f
-}
-
-func (p *importer) value(typ *types.Type) (x Val) {
- switch tag := p.tagOrIndex(); tag {
- case falseTag:
- x.U = false
-
- case trueTag:
- x.U = true
-
- case int64Tag:
- u := new(Mpint)
- u.SetInt64(p.int64())
- u.Rune = typ == types.Idealrune
- x.U = u
-
- case floatTag:
- f := newMpflt()
- p.float(f)
- if typ == types.Idealint || typ.IsInteger() || typ.IsPtr() || typ.IsUnsafePtr() {
- // uncommon case: large int encoded as float
- //
- // This happens for unsigned typed integers
- // and (on 64-bit platforms) pointers because
- // of values in the range [2^63, 2^64).
- u := new(Mpint)
- u.SetFloat(f)
- x.U = u
- break
- }
- x.U = f
-
- case complexTag:
- u := new(Mpcplx)
- p.float(&u.Real)
- p.float(&u.Imag)
- x.U = u
-
- case stringTag:
- x.U = p.string()
-
- case unknownTag:
- p.formatErrorf("unknown constant (importing package with errors)")
-
- case nilTag:
- x.U = new(NilVal)
-
- default:
- p.formatErrorf("unexpected value tag %d", tag)
- }
-
- // verify ideal type
- if typ.IsUntyped() && untype(x.Ctype()) != typ {
- p.formatErrorf("value %v and type %v don't match", x, typ)
- }
-
- return
-}
-
-func (p *importer) float(x *Mpflt) {
- sign := p.int()
- if sign == 0 {
- x.SetFloat64(0)
- return
- }
-
- exp := p.int()
- mant := new(big.Int).SetBytes([]byte(p.string()))
-
- m := x.Val.SetInt(mant)
- m.SetMantExp(m, exp-mant.BitLen())
- if sign < 0 {
- m.Neg(m)
- }
-}
-
-// ----------------------------------------------------------------------------
-// Inlined function bodies
-
-// Approach: Read nodes and use them to create/declare the same data structures
-// as done originally by the (hidden) parser by closely following the parser's
-// original code. In other words, "parsing" the import data (which happens to
-// be encoded in binary rather textual form) is the best way at the moment to
-// re-establish the syntax tree's invariants. At some future point we might be
-// able to avoid this round-about way and create the rewritten nodes directly,
-// possibly avoiding a lot of duplicate work (name resolution, type checking).
-//
-// Refined nodes (e.g., ODOTPTR as a refinement of OXDOT) are exported as their
-// unrefined nodes (since this is what the importer uses). The respective case
-// entries are unreachable in the importer.
-
-func (p *importer) stmtList() []*Node {
- var list []*Node
- for {
- n := p.node()
- if n == nil {
- break
- }
- // OBLOCK nodes may be created when importing ODCL nodes - unpack them
- if n.Op == OBLOCK {
- list = append(list, n.List.Slice()...)
- } else {
- list = append(list, n)
- }
- }
- return list
-}
-
-func (p *importer) exprList() []*Node {
- var list []*Node
- for {
- n := p.expr()
- if n == nil {
- break
- }
- list = append(list, n)
- }
- return list
-}
-
-func (p *importer) elemList() []*Node {
- c := p.int()
- list := make([]*Node, c)
- for i := range list {
- s := p.fieldSym()
- list[i] = nodSym(OSTRUCTKEY, p.expr(), s)
- }
- return list
-}
-
-func (p *importer) expr() *Node {
- n := p.node()
- if n != nil && n.Op == OBLOCK {
- Fatalf("unexpected block node: %v", n)
- }
- return n
-}
-
func npos(pos src.XPos, n *Node) *Node {
n.Pos = pos
return n
}
-// TODO(gri) split into expr and stmt
-func (p *importer) node() *Node {
- switch op := p.op(); op {
- // expressions
- // case OPAREN:
- // unreachable - unpacked by exporter
-
- // case ODDDARG:
- // unimplemented
-
- case OLITERAL:
- pos := p.pos()
- typ := p.typ()
- n := npos(pos, nodlit(p.value(typ)))
- n.Type = idealType(typ)
- return n
-
- case ONAME:
- return npos(p.pos(), mkname(p.sym()))
-
- // case OPACK, ONONAME:
- // unreachable - should have been resolved by typechecking
-
- case OTYPE:
- return npos(p.pos(), typenod(p.typ()))
-
- // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
- // unreachable - should have been resolved by typechecking
-
- // case OCLOSURE:
- // unimplemented
-
- case OPTRLIT:
- pos := p.pos()
- n := npos(pos, p.expr())
- if !p.bool() /* !implicit, i.e. '&' operator */ {
- if n.Op == OCOMPLIT {
- // Special case for &T{...}: turn into (*T){...}.
- n.Right = nodl(pos, OIND, n.Right, nil)
- n.Right.SetImplicit(true)
- } else {
- n = nodl(pos, OADDR, n, nil)
- }
- }
- return n
-
- case OSTRUCTLIT:
- // TODO(mdempsky): Export position information for OSTRUCTKEY nodes.
- savedlineno := lineno
- lineno = p.pos()
- n := nodl(lineno, OCOMPLIT, nil, typenod(p.typ()))
- n.List.Set(p.elemList()) // special handling of field names
- lineno = savedlineno
- return n
-
- // case OARRAYLIT, OSLICELIT, OMAPLIT:
- // unreachable - mapped to case OCOMPLIT below by exporter
-
- case OCOMPLIT:
- n := nodl(p.pos(), OCOMPLIT, nil, typenod(p.typ()))
- n.List.Set(p.exprList())
- return n
-
- case OKEY:
- pos := p.pos()
- left, right := p.exprsOrNil()
- return nodl(pos, OKEY, left, right)
-
- // case OSTRUCTKEY:
- // unreachable - handled in case OSTRUCTLIT by elemList
-
- // case OCALLPART:
- // unimplemented
-
- // case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
- // unreachable - mapped to case OXDOT below by exporter
-
- case OXDOT:
- // see parser.new_dotname
- return npos(p.pos(), nodSym(OXDOT, p.expr(), p.fieldSym()))
-
- // case ODOTTYPE, ODOTTYPE2:
- // unreachable - mapped to case ODOTTYPE below by exporter
-
- case ODOTTYPE:
- n := nodl(p.pos(), ODOTTYPE, p.expr(), nil)
- n.Type = p.typ()
- return n
-
- // case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
- // unreachable - mapped to cases below by exporter
-
- case OINDEX:
- return nodl(p.pos(), op, p.expr(), p.expr())
-
- case OSLICE, OSLICE3:
- n := nodl(p.pos(), op, p.expr(), nil)
- low, high := p.exprsOrNil()
- var max *Node
- if n.Op.IsSlice3() {
- max = p.expr()
- }
- n.SetSliceBounds(low, high, max)
- return n
-
- // case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR:
- // unreachable - mapped to OCONV case below by exporter
-
- case OCONV:
- n := nodl(p.pos(), OCONV, p.expr(), nil)
- n.Type = p.typ()
- return n
-
- case OCOPY, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC, ORECOVER, OPRINT, OPRINTN:
- n := npos(p.pos(), builtinCall(op))
- n.List.Set(p.exprList())
- if op == OAPPEND {
- n.SetIsddd(p.bool())
- }
- return n
-
- // case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
- // unreachable - mapped to OCALL case below by exporter
-
- case OCALL:
- n := nodl(p.pos(), OCALL, p.expr(), nil)
- n.List.Set(p.exprList())
- n.SetIsddd(p.bool())
- return n
-
- case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- n := npos(p.pos(), builtinCall(OMAKE))
- n.List.Append(typenod(p.typ()))
- n.List.Append(p.exprList()...)
- return n
-
- // unary expressions
- case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV:
- return nodl(p.pos(), op, p.expr(), nil)
-
- // binary expressions
- case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
- OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND, OSUB, OXOR:
- return nodl(p.pos(), op, p.expr(), p.expr())
-
- case OADDSTR:
- pos := p.pos()
- list := p.exprList()
- x := npos(pos, list[0])
- for _, y := range list[1:] {
- x = nodl(pos, OADD, x, y)
- }
- return x
-
- // case OCMPSTR, OCMPIFACE:
- // unreachable - mapped to std comparison operators by exporter
-
- case ODCLCONST:
- // TODO(gri) these should not be exported in the first place
- return nodl(p.pos(), OEMPTY, nil, nil)
-
- // --------------------------------------------------------------------
- // statements
- case ODCL:
- if p.version < 2 {
- // versions 0 and 1 exported a bool here but it
- // was always false - simply ignore in this case
- p.bool()
- }
- pos := p.pos()
- lhs := npos(pos, dclname(p.sym()))
- typ := typenod(p.typ())
- return npos(pos, liststmt(variter([]*Node{lhs}, typ, nil))) // TODO(gri) avoid list creation
-
- // case ODCLFIELD:
- // unimplemented
-
- // case OAS, OASWB:
- // unreachable - mapped to OAS case below by exporter
-
- case OAS:
- return nodl(p.pos(), OAS, p.expr(), p.expr())
-
- case OASOP:
- n := nodl(p.pos(), OASOP, nil, nil)
- n.SetSubOp(p.op())
- n.Left = p.expr()
- if !p.bool() {
- n.Right = nodintconst(1)
- n.SetImplicit(true)
- } else {
- n.Right = p.expr()
- }
- return n
-
- // case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
- // unreachable - mapped to OAS2 case below by exporter
-
- case OAS2:
- n := nodl(p.pos(), OAS2, nil, nil)
- n.List.Set(p.exprList())
- n.Rlist.Set(p.exprList())
- return n
-
- case ORETURN:
- n := nodl(p.pos(), ORETURN, nil, nil)
- n.List.Set(p.exprList())
- return n
-
- // case ORETJMP:
- // unreachable - generated by compiler for trampolin routines (not exported)
-
- case OPROC, ODEFER:
- return nodl(p.pos(), op, p.expr(), nil)
-
- case OIF:
- n := nodl(p.pos(), OIF, nil, nil)
- n.Ninit.Set(p.stmtList())
- n.Left = p.expr()
- n.Nbody.Set(p.stmtList())
- n.Rlist.Set(p.stmtList())
- return n
-
- case OFOR:
- n := nodl(p.pos(), OFOR, nil, nil)
- n.Ninit.Set(p.stmtList())
- n.Left, n.Right = p.exprsOrNil()
- n.Nbody.Set(p.stmtList())
- return n
-
- case ORANGE:
- n := nodl(p.pos(), ORANGE, nil, nil)
- n.List.Set(p.stmtList())
- n.Right = p.expr()
- n.Nbody.Set(p.stmtList())
- return n
-
- case OSELECT, OSWITCH:
- n := nodl(p.pos(), op, nil, nil)
- n.Ninit.Set(p.stmtList())
- n.Left, _ = p.exprsOrNil()
- n.List.Set(p.stmtList())
- return n
-
- // case OCASE, OXCASE:
- // unreachable - mapped to OXCASE case below by exporter
-
- case OXCASE:
- n := nodl(p.pos(), OXCASE, nil, nil)
- n.List.Set(p.exprList())
- // TODO(gri) eventually we must declare variables for type switch
- // statements (type switch statements are not yet exported)
- n.Nbody.Set(p.stmtList())
- return n
-
- // case OFALL:
- // unreachable - mapped to OXFALL case below by exporter
-
- case OFALL:
- n := nodl(p.pos(), OFALL, nil, nil)
- return n
-
- case OBREAK, OCONTINUE:
- pos := p.pos()
- left, _ := p.exprsOrNil()
- if left != nil {
- left = newname(left.Sym)
- }
- return nodl(pos, op, left, nil)
-
- // case OEMPTY:
- // unreachable - not emitted by exporter
-
- case OGOTO, OLABEL:
- return nodl(p.pos(), op, newname(p.expr().Sym), nil)
-
- case OEND:
- return nil
-
- default:
- Fatalf("cannot import %v (%d) node\n"+
- "==> please file an issue and assign to gri@\n", op, int(op))
- panic("unreachable") // satisfy compiler
- }
-}
-
func builtinCall(op Op) *Node {
return nod(OCALL, mkname(builtinpkg.Lookup(goopnames[op])), nil)
}
-
-func (p *importer) exprsOrNil() (a, b *Node) {
- ab := p.int()
- if ab&1 != 0 {
- a = p.expr()
- }
- if ab&2 != 0 {
- b = p.node()
- }
- return
-}
-
-func (p *importer) fieldSym() *types.Sym {
- name := p.string()
- pkg := localpkg
- if !types.IsExported(name) {
- pkg = p.pkg()
- }
- return pkg.Lookup(name)
-}
-
-func (p *importer) sym() *types.Sym {
- name := p.string()
- pkg := localpkg
- if name != "_" {
- pkg = p.pkg()
- }
- linkname := p.string()
- sym := pkg.Lookup(name)
- sym.Linkname = linkname
- return sym
-}
-
-func (p *importer) bool() bool {
- return p.int() != 0
-}
-
-func (p *importer) op() Op {
- return Op(p.int())
-}
-
-// ----------------------------------------------------------------------------
-// Low-level decoders
-
-func (p *importer) tagOrIndex() int {
- if p.debugFormat {
- p.marker('t')
- }
-
- return int(p.rawInt64())
-}
-
-func (p *importer) int() int {
- x := p.int64()
- if int64(int(x)) != x {
- p.formatErrorf("exported integer too large")
- }
- return int(x)
-}
-
-func (p *importer) int64() int64 {
- if p.debugFormat {
- p.marker('i')
- }
-
- return p.rawInt64()
-}
-
-func (p *importer) string() string {
- if p.debugFormat {
- p.marker('s')
- }
- // if the string was seen before, i is its index (>= 0)
- // (the empty string is at index 0)
- i := p.rawInt64()
- if i >= 0 {
- return p.strList[i]
- }
- // otherwise, i is the negative string length (< 0)
- if n := int(-i); n <= cap(p.buf) {
- p.buf = p.buf[:n]
- } else {
- p.buf = make([]byte, n)
- }
- for i := range p.buf {
- p.buf[i] = p.rawByte()
- }
- s := string(p.buf)
- p.strList = append(p.strList, s)
- return s
-}
-
-func (p *importer) marker(want byte) {
- if got := p.rawByte(); got != want {
- p.formatErrorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
- }
-
- pos := p.read
- if n := int(p.rawInt64()); n != pos {
- p.formatErrorf("incorrect position: got %d; want %d", n, pos)
- }
-}
-
-// rawInt64 should only be used by low-level decoders.
-func (p *importer) rawInt64() int64 {
- i, err := binary.ReadVarint(p)
- if err != nil {
- p.formatErrorf("read error: %v", err)
- }
- return i
-}
-
-// rawStringln should only be used to read the initial version string.
-func (p *importer) rawStringln(b byte) string {
- p.buf = p.buf[:0]
- for b != '\n' {
- p.buf = append(p.buf, b)
- b = p.rawByte()
- }
- return string(p.buf)
-}
-
-// needed for binary.ReadVarint in rawInt64
-func (p *importer) ReadByte() (byte, error) {
- return p.rawByte(), nil
-}
-
-// rawByte is the bottleneck interface for reading from p.in.
-// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
-// rawByte should only be used by low-level decoders.
-func (p *importer) rawByte() byte {
- c, err := p.in.ReadByte()
- p.read++
- if err != nil {
- p.formatErrorf("read error: %v", err)
- }
- if c == '|' {
- c, err = p.in.ReadByte()
- p.read++
- if err != nil {
- p.formatErrorf("read error: %v", err)
- }
- switch c {
- case 'S':
- c = '$'
- case '|':
- // nothing to do
- default:
- p.formatErrorf("unexpected escape sequence in export data")
- }
- }
- return c
-}
diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go
index 1403a2be11..02d51678be 100644
--- a/src/cmd/compile/internal/gc/const.go
+++ b/src/cmd/compile/internal/gc/const.go
@@ -234,7 +234,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node {
if n.Op == OLITERAL && !reuse {
// Can't always set n.Type directly on OLITERAL nodes.
// See discussion on CL 20813.
- n = n.copy()
+ n = n.rawcopy()
reuse = true
}
@@ -476,7 +476,7 @@ func toflt(v Val) Val {
f := newMpflt()
f.Set(&u.Real)
if u.Imag.CmpFloat64(0) != 0 {
- yyerror("constant %v%vi truncated to real", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp|FmtSign))
+ yyerror("constant %v truncated to real", u.GoString())
}
v.U = f
}
@@ -509,11 +509,11 @@ func toint(v Val) Val {
// value from the error message.
// (See issue #11371).
var t big.Float
- t.Parse(fconv(u, FmtSharp), 10)
+ t.Parse(u.GoString(), 10)
if t.IsInt() {
yyerror("constant truncated to integer")
} else {
- yyerror("constant %v truncated to integer", fconv(u, FmtSharp))
+ yyerror("constant %v truncated to integer", u.GoString())
}
}
}
@@ -522,7 +522,7 @@ func toint(v Val) Val {
case *Mpcplx:
i := new(Mpint)
if !i.SetFloat(&u.Real) || u.Imag.CmpFloat64(0) != 0 {
- yyerror("constant %v%vi truncated to integer", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp|FmtSign))
+ yyerror("constant %v truncated to integer", u.GoString())
}
v.U = i
@@ -1200,8 +1200,7 @@ func setconst(n *Node, v Val) {
// Ensure n.Orig still points to a semantically-equivalent
// expression after we rewrite n into a constant.
if n.Orig == n {
- n.Orig = n.copy()
- n.Orig.Orig = n.Orig
+ n.Orig = n.sepcopy()
}
*n = Node{
@@ -1331,7 +1330,7 @@ func defaultlitreuse(n *Node, t *types.Type, reuse canReuseNode) *Node {
}
if n.Op == OLITERAL && !reuse {
- n = n.copy()
+ n = n.rawcopy()
reuse = true
}
diff --git a/src/cmd/compile/internal/gc/dump.go b/src/cmd/compile/internal/gc/dump.go
new file mode 100644
index 0000000000..8de90adf05
--- /dev/null
+++ b/src/cmd/compile/internal/gc/dump.go
@@ -0,0 +1,287 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements textual dumping of arbitrary data structures
+// for debugging purposes. The code is customized for Node graphs
+// and may be used for an alternative view of the node structure.
+
+package gc
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "unicode"
+ "unicode/utf8"
+)
+
+// dump is like fdump but prints to stderr.
+func dump(root interface{}, filter string, depth int) {
+ fdump(os.Stderr, root, filter, depth)
+}
+
+// fdump prints the structure of a rooted data structure
+// to w by depth-first traversal of the data structure.
+//
+// The filter parameter is a regular expression. If it is
+// non-empty, only struct fields whose names match filter
+// are printed.
+//
+// The depth parameter controls how deep traversal recurses
+// before it returns (higher value means greater depth).
+// If an empty field filter is given, a good depth default value
+// is 4. A negative depth means no depth limit, which may be fine
+// for small data structures or if there is a non-empty filter.
+//
+// In the output, Node structs are identified by their Op name
+// rather than their type; struct fields with zero values or
+// non-matching field names are omitted, and "…" means recursion
+// depth has been reached or struct fields have been omitted.
+func fdump(w io.Writer, root interface{}, filter string, depth int) {
+ if root == nil {
+ fmt.Fprintln(w, "nil")
+ return
+ }
+
+ if filter == "" {
+ filter = ".*" // default
+ }
+
+ p := dumper{
+ output: w,
+ fieldrx: regexp.MustCompile(filter),
+ ptrmap: make(map[uintptr]int),
+ last: '\n', // force printing of line number on first line
+ }
+
+ p.dump(reflect.ValueOf(root), depth)
+ p.printf("\n")
+}
+
+type dumper struct {
+ output io.Writer
+ fieldrx *regexp.Regexp // field name filter
+ ptrmap map[uintptr]int // ptr -> dump line number
+ lastadr string // last address string printed (for shortening)
+
+ // output
+ indent int // current indentation level
+ last byte // last byte processed by Write
+ line int // current line number
+}
+
+var indentBytes = []byte(". ")
+
+func (p *dumper) Write(data []byte) (n int, err error) {
+ var m int
+ for i, b := range data {
+ // invariant: data[0:n] has been written
+ if b == '\n' {
+ m, err = p.output.Write(data[n : i+1])
+ n += m
+ if err != nil {
+ return
+ }
+ } else if p.last == '\n' {
+ p.line++
+ _, err = fmt.Fprintf(p.output, "%6d ", p.line)
+ if err != nil {
+ return
+ }
+ for j := p.indent; j > 0; j-- {
+ _, err = p.output.Write(indentBytes)
+ if err != nil {
+ return
+ }
+ }
+ }
+ p.last = b
+ }
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
+ return
+}
+
+// printf is a convenience wrapper.
+func (p *dumper) printf(format string, args ...interface{}) {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
+ panic(err)
+ }
+}
+
+// addr returns the (hexadecimal) address string of the object
+// represented by x (or "?" if x is not addressable), with the
+// common prefix between this and the prior address replaced by
+// "0x…" to make it easier to visually match addresses.
+func (p *dumper) addr(x reflect.Value) string {
+ if !x.CanAddr() {
+ return "?"
+ }
+ adr := fmt.Sprintf("%p", x.Addr().Interface())
+ s := adr
+ if i := commonPrefixLen(p.lastadr, adr); i > 0 {
+ s = "0x…" + adr[i:]
+ }
+ p.lastadr = adr
+ return s
+}
+
+// dump prints the contents of x.
+func (p *dumper) dump(x reflect.Value, depth int) {
+ if depth == 0 {
+ p.printf("…")
+ return
+ }
+
+ // special cases
+ switch v := x.Interface().(type) {
+ case Nodes:
+ // unpack Nodes since reflect cannot look inside
+ // due to the unexported field in its struct
+ x = reflect.ValueOf(v.Slice())
+
+ case src.XPos:
+ p.printf("%s", linestr(v))
+ return
+
+ case *types.Node:
+ x = reflect.ValueOf(asNode(v))
+ }
+
+ switch x.Kind() {
+ case reflect.String:
+ p.printf("%q", x.Interface()) // print strings in quotes
+
+ case reflect.Interface:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.dump(x.Elem(), depth-1)
+
+ case reflect.Ptr:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+
+ p.printf("*")
+ ptr := x.Pointer()
+ if line, exists := p.ptrmap[ptr]; exists {
+ p.printf("(@%d)", line)
+ return
+ }
+ p.ptrmap[ptr] = p.line
+ p.dump(x.Elem(), depth) // don't count pointer indirection towards depth
+
+ case reflect.Slice:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.printf("%s (%d entries) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.dump(x.Index(i), depth-1)
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Struct:
+ typ := x.Type()
+
+ isNode := false
+ if n, ok := x.Interface().(Node); ok {
+ isNode = true
+ p.printf("%s %s {", n.Op.String(), p.addr(x))
+ } else {
+ p.printf("%s {", typ)
+ }
+ p.indent++
+
+ first := true
+ omitted := false
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ // Exclude non-exported fields because their
+ // values cannot be accessed via reflection.
+ if name := typ.Field(i).Name; isExported(name) {
+ if !p.fieldrx.MatchString(name) {
+ omitted = true
+ continue // field name not selected by filter
+ }
+
+ // special cases
+ if isNode && name == "Op" {
+ omitted = true
+ continue // Op field already printed for Nodes
+ }
+ x := x.Field(i)
+ if isZeroVal(x) {
+ omitted = true
+ continue // exclude zero-valued fields
+ }
+ if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
+ omitted = true
+ continue // exclude empty Nodes slices
+ }
+
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.dump(x, depth-1)
+ p.printf("\n")
+ }
+ }
+ if omitted {
+ p.printf("…\n")
+ }
+
+ p.indent--
+ p.printf("}")
+
+ default:
+ p.printf("%v", x.Interface())
+ }
+}
+
+func isZeroVal(x reflect.Value) bool {
+ switch x.Kind() {
+ case reflect.Bool:
+ return !x.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return x.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return x.Uint() == 0
+ case reflect.String:
+ return x.String() == ""
+ case reflect.Interface, reflect.Ptr, reflect.Slice:
+ return x.IsNil()
+ }
+ return false
+}
+
+func isExported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
+
+func commonPrefixLen(a, b string) (i int) {
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go
index 9db6c8e0b4..145007f5e1 100644
--- a/src/cmd/compile/internal/gc/esc.go
+++ b/src/cmd/compile/internal/gc/esc.go
@@ -654,9 +654,71 @@ func (e *EscState) esclist(l Nodes, parent *Node) {
}
}
+func (e *EscState) isSliceSelfAssign(dst, src *Node) bool {
+ // Detect the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ // Here we assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+
+ // dst is ONAME dereference.
+ if dst.Op != OIND && dst.Op != ODOTPTR || dst.Left.Op != ONAME {
+ return false
+ }
+ // src is a slice operation.
+ switch src.Op {
+ case OSLICE, OSLICE3, OSLICESTR:
+ // OK.
+ case OSLICEARR, OSLICE3ARR:
+ // Since arrays are embedded into containing object,
+ // slice of non-pointer array will introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ //
+ // Pointer to an array is OK since it's not stored inside b directly.
+ // For slicing an array (not pointer to array), there is an implicit OADDR.
+ // We check that to determine non-pointer array slicing.
+ if src.Left.Op == OADDR {
+ return false
+ }
+ default:
+ return false
+ }
+ // slice is applied to ONAME dereference.
+ if src.Left.Op != OIND && src.Left.Op != ODOTPTR || src.Left.Left.Op != ONAME {
+ return false
+ }
+ // dst and src reference the same base ONAME.
+ return dst.Left == src.Left.Left
+}
+
// isSelfAssign reports whether assignment from src to dst can
// be ignored by the escape analysis as it's effectively a self-assignment.
func (e *EscState) isSelfAssign(dst, src *Node) bool {
+ if e.isSliceSelfAssign(dst, src) {
+ return true
+ }
+
+ // Detect trivial assignments that assign back to the same object.
+ //
+ // It covers these cases:
+ // val.x = val.y
+ // val.x[i] = val.y[j]
+ // val.x1.x2 = val.x1.y2
+ // ... etc
+ //
+ // These assignments do not change assigned object lifetime.
+
if dst == nil || src == nil || dst.Op != src.Op {
return false
}
@@ -689,18 +751,16 @@ func (e *EscState) mayAffectMemory(n *Node) bool {
switch n.Op {
case ONAME, OCLOSUREVAR, OLITERAL:
return false
- case ODOT, ODOTPTR:
- return e.mayAffectMemory(n.Left)
- case OIND, OCONVNOP:
- return e.mayAffectMemory(n.Left)
- case OCONV:
- return e.mayAffectMemory(n.Left)
- case OINDEX:
- return e.mayAffectMemory(n.Left) || e.mayAffectMemory(n.Right)
- case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
+
+ // Left+Right group.
+ case OINDEX, OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
return e.mayAffectMemory(n.Left) || e.mayAffectMemory(n.Right)
- case ONOT, OCOM, OPLUS, OMINUS, OALIGNOF, OOFFSETOF, OSIZEOF:
+
+ // Left group.
+ case ODOT, ODOTPTR, OIND, OCONVNOP, OCONV, OLEN, OCAP,
+ ONOT, OCOM, OPLUS, OMINUS, OALIGNOF, OOFFSETOF, OSIZEOF:
return e.mayAffectMemory(n.Left)
+
default:
return true
}
@@ -832,48 +892,8 @@ opSwitch:
}
}
- // Filter out the following special case.
- //
- // func (b *Buffer) Foo() {
- // n, m := ...
- // b.buf = b.buf[n:m]
- // }
- //
- // This assignment is a no-op for escape analysis,
- // it does not store any new pointers into b that were not already there.
- // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
case OAS, OASOP:
- if (n.Left.Op == OIND || n.Left.Op == ODOTPTR) && n.Left.Left.Op == ONAME && // dst is ONAME dereference
- (n.Right.Op == OSLICE || n.Right.Op == OSLICE3 || n.Right.Op == OSLICESTR) && // src is slice operation
- (n.Right.Left.Op == OIND || n.Right.Left.Op == ODOTPTR) && n.Right.Left.Left.Op == ONAME && // slice is applied to ONAME dereference
- n.Left.Left == n.Right.Left.Left { // dst and src reference the same base ONAME
-
- // Here we also assume that the statement will not contain calls,
- // that is, that order will move any calls to init.
- // Otherwise base ONAME value could change between the moments
- // when we evaluate it for dst and for src.
- //
- // Note, this optimization does not apply to OSLICEARR,
- // because it does introduce a new pointer into b that was not already there
- // (pointer to b itself). After such assignment, if b contents escape,
- // b escapes as well. If we ignore such OSLICEARR, we will conclude
- // that b does not escape when b contents do.
- if Debug['m'] != 0 {
- Warnl(n.Pos, "%v ignoring self-assignment to %S", e.curfnSym(n), n.Left)
- }
-
- break
- }
-
- // Also skip trivial assignments that assign back to the same object.
- //
- // It covers these cases:
- // val.x = val.y
- // val.x[i] = val.y[j]
- // val.x1.x2 = val.x1.y2
- // ... etc
- //
- // These assignments do not change assigned object lifetime.
+ // Filter out some no-op assignments for escape analysis.
if e.isSelfAssign(n.Left, n.Right) {
if Debug['m'] != 0 {
Warnl(n.Pos, "%v ignoring self-assignment in %S", e.curfnSym(n), n)
@@ -1396,7 +1416,7 @@ func describeEscape(em uint16) string {
}
s += "contentToHeap"
}
- for em >>= EscReturnBits; em != 0; em = em >> bitsPerOutputInTag {
+ for em >>= EscReturnBits; em != 0; em >>= bitsPerOutputInTag {
// See encoding description above
if s != "" {
s += " "
@@ -1446,7 +1466,7 @@ func (e *EscState) escassignfromtag(note string, dsts Nodes, src, call *Node) ui
em0 := em
dstsi := 0
- for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em = em >> bitsPerOutputInTag {
+ for em >>= EscReturnBits; em != 0 && dstsi < dsts.Len(); em >>= bitsPerOutputInTag {
// Prefer the lowest-level path to the reference (for escape purposes).
// Two-bit encoding (for example. 1, 3, and 4 bits are other options)
// 01 = 0-level
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index 3aa7c39067..6ee660988a 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -12,8 +12,6 @@ import (
)
var (
- flagiexport bool // if set, use indexed export data format
-
Debug_export int // if set, print debugging information about export data
)
@@ -75,11 +73,7 @@ func dumpexport(bout *bio.Writer) {
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
- if flagiexport {
- iexport(bout.Writer)
- } else {
- export(bout.Writer, Debug_export != 0)
- }
+ iexport(bout.Writer)
size := bout.Offset() - off
exportf(bout, "\n$$\n")
@@ -95,7 +89,7 @@ func importsym(ipkg *types.Pkg, s *types.Sym, op Op) *Node {
// declaration for all imported symbols. The exception
// is declarations for Runtimepkg, which are populated
// by loadsys instead.
- if flagiexport && s.Pkg != Runtimepkg {
+ if s.Pkg != Runtimepkg {
Fatalf("missing ONONAME for %v\n", s)
}
diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go
index c0a8cfc89e..c5c604003a 100644
--- a/src/cmd/compile/internal/gc/float_test.go
+++ b/src/cmd/compile/internal/gc/float_test.go
@@ -6,6 +6,8 @@ package gc
import (
"math"
+ "os"
+ "runtime"
"testing"
)
@@ -364,11 +366,19 @@ func TestFloatConvertFolded(t *testing.T) {
func TestFloat32StoreToLoadConstantFold(t *testing.T) {
// Test that math.Float32{,from}bits constant fold correctly.
- // In particular we need to be careful that signalling NaN (sNaN) values
+ // In particular we need to be careful that signaling NaN (sNaN) values
// are not converted to quiet NaN (qNaN) values during compilation.
// See issue #27193 for more information.
- // signalling NaNs
+ // TODO: this method for detecting 387 won't work if the compiler has been
+ // built using GOARCH=386 GO386=387 and either the target is a different
+ // architecture or the GO386=387 environment variable is not set when the
+ // test is run.
+ if runtime.GOARCH == "386" && os.Getenv("GO386") == "387" {
+ t.Skip("signaling NaNs are not propagated on 387 (issue #27516)")
+ }
+
+ // signaling NaNs
{
const nan = uint32(0x7f800001) // sNaN
if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go
index 5b7445d4db..5d2e36ee51 100644
--- a/src/cmd/compile/internal/gc/fmt.go
+++ b/src/cmd/compile/internal/gc/fmt.go
@@ -119,7 +119,7 @@ const (
// *types.Type:
// %#v Go format
// %#L type definition instead of name
-// %#S omit"func" and receiver in function signature
+// %#S omit "func" and receiver in function signature
//
// %-v type identifiers
// %-S type identifiers without "func" and arg names in type signatures (methodsym)
@@ -514,10 +514,10 @@ func (v Val) vconv(s fmt.State, flag FmtFlag) {
case *Mpint:
if !u.Rune {
if flag&FmtSharp != 0 {
- fmt.Fprint(s, bconv(u, FmtSharp))
+ fmt.Fprint(s, u.String())
return
}
- fmt.Fprint(s, bconv(u, 0))
+ fmt.Fprint(s, u.GoString())
return
}
@@ -537,29 +537,19 @@ func (v Val) vconv(s fmt.State, flag FmtFlag) {
case *Mpflt:
if flag&FmtSharp != 0 {
- fmt.Fprint(s, fconv(u, 0))
+ fmt.Fprint(s, u.String())
return
}
- fmt.Fprint(s, fconv(u, FmtSharp))
+ fmt.Fprint(s, u.GoString())
return
case *Mpcplx:
- switch {
- case flag&FmtSharp != 0:
- fmt.Fprintf(s, "(%v+%vi)", &u.Real, &u.Imag)
-
- case v.U.(*Mpcplx).Real.CmpFloat64(0) == 0:
- fmt.Fprintf(s, "%vi", fconv(&u.Imag, FmtSharp))
-
- case v.U.(*Mpcplx).Imag.CmpFloat64(0) == 0:
- fmt.Fprint(s, fconv(&u.Real, FmtSharp))
-
- case v.U.(*Mpcplx).Imag.CmpFloat64(0) < 0:
- fmt.Fprintf(s, "(%v%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
-
- default:
- fmt.Fprintf(s, "(%v+%vi)", fconv(&u.Real, FmtSharp), fconv(&u.Imag, FmtSharp))
+ if flag&FmtSharp != 0 {
+ fmt.Fprint(s, u.String())
+ return
}
+ fmt.Fprint(s, u.GoString())
+ return
case string:
fmt.Fprint(s, strconv.Quote(u))
@@ -671,7 +661,7 @@ func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string {
return "error"
}
- // Unless the 'l' flag was specified, if the type has a name, just print that name.
+ // Unless the 'L' flag was specified, if the type has a name, just print that name.
if flag&FmtLong == 0 && t.Sym != nil && t != types.Types[t.Etype] {
switch mode {
case FTypeId, FTypeIdName:
@@ -1314,16 +1304,14 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
mode.Fprintf(s, "%v { %v }", n.Type, n.Func.Closure.Nbody)
case OCOMPLIT:
- ptrlit := n.Right != nil && n.Right.Implicit() && n.Right.Type != nil && n.Right.Type.IsPtr()
if mode == FErr {
if n.Right != nil && n.Right.Type != nil && !n.Implicit() {
- if ptrlit {
+ if n.Right.Implicit() && n.Right.Type.IsPtr() {
mode.Fprintf(s, "&%v literal", n.Right.Type.Elem())
return
- } else {
- mode.Fprintf(s, "%v literal", n.Right.Type)
- return
}
+ mode.Fprintf(s, "%v literal", n.Right.Type)
+ return
}
fmt.Fprint(s, "composite literal")
@@ -1532,9 +1520,8 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) {
func (n *Node) nodefmt(s fmt.State, flag FmtFlag, mode fmtMode) {
t := n.Type
- // We almost always want the original, except in export mode for literals.
- // This saves the importer some work, and avoids us having to redo some
- // special casing for package unsafe.
+ // We almost always want the original.
+ // TODO(gri) Why the special case for OLITERAL?
if n.Op != OLITERAL && n.Orig != nil {
n = n.Orig
}
diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go
index fb5a413b84..f188c9a9cd 100644
--- a/src/cmd/compile/internal/gc/inl.go
+++ b/src/cmd/compile/internal/gc/inl.go
@@ -71,9 +71,7 @@ func fnpkg(fn *Node) *types.Pkg {
func typecheckinl(fn *Node) {
lno := setlineno(fn)
- if flagiexport {
- expandInline(fn)
- }
+ expandInline(fn)
// typecheckinl is only for imported functions;
// their bodies may refer to unsafe as long as the package
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 44cf75e7c9..68f6294724 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -249,7 +249,6 @@ func Main(archInit func(*Arch)) {
flag.StringVar(&blockprofile, "blockprofile", "", "write block profile to `file`")
flag.StringVar(&mutexprofile, "mutexprofile", "", "write mutex profile to `file`")
flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`")
- flag.BoolVar(&flagiexport, "iexport", true, "export indexed package data")
objabi.Flagparse(usage)
// Record flags that affect the build result. (And don't
@@ -1129,24 +1128,13 @@ func importfile(f *Val) *types.Pkg {
errorexit()
}
- // New indexed format is distinguished by an 'i' byte,
- // whereas old export format always starts with 'c', 'd', or 'v'.
- if c == 'i' {
- if !flagiexport {
- yyerror("import %s: cannot import package compiled with -iexport=true", file)
- errorexit()
- }
-
- iimport(importpkg, imp)
- } else {
- if flagiexport {
- yyerror("import %s: cannot import package compiled with -iexport=false", file)
- errorexit()
- }
-
- imp.UnreadByte()
- Import(importpkg, imp.Reader)
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ if c != 'i' {
+ yyerror("import %s: unexpected package format byte: %v", file, c)
+ errorexit()
}
+ iimport(importpkg, imp)
default:
yyerror("no import in %q", path_)
diff --git a/src/cmd/compile/internal/gc/mpfloat.go b/src/cmd/compile/internal/gc/mpfloat.go
index 5977ef9748..d1f5cb1200 100644
--- a/src/cmd/compile/internal/gc/mpfloat.go
+++ b/src/cmd/compile/internal/gc/mpfloat.go
@@ -201,24 +201,16 @@ func (a *Mpflt) SetString(as string) {
}
func (f *Mpflt) String() string {
- return fconv(f, 0)
+ return f.Val.Text('b', 0)
}
-func fconv(fvp *Mpflt, flag FmtFlag) string {
- if flag&FmtSharp == 0 {
- return fvp.Val.Text('b', 0)
- }
-
- // use decimal format for error messages
-
+func (fvp *Mpflt) GoString() string {
// determine sign
+ sign := ""
f := &fvp.Val
- var sign string
if f.Sign() < 0 {
sign = "-"
f = new(big.Float).Abs(f)
- } else if flag&FmtSign != 0 {
- sign = "+"
}
// Don't try to convert infinities (will not terminate).
@@ -334,3 +326,34 @@ func (v *Mpcplx) Div(rv *Mpcplx) bool {
return true
}
+
+func (v *Mpcplx) String() string {
+ return fmt.Sprintf("(%s+%si)", v.Real.String(), v.Imag.String())
+}
+
+func (v *Mpcplx) GoString() string {
+ var re string
+ sre := v.Real.CmpFloat64(0)
+ if sre != 0 {
+ re = v.Real.GoString()
+ }
+
+ var im string
+ sim := v.Imag.CmpFloat64(0)
+ if sim != 0 {
+ im = v.Imag.GoString()
+ }
+
+ switch {
+ case sre == 0 && sim == 0:
+ return "0"
+ case sre == 0:
+ return im + "i"
+ case sim == 0:
+ return re
+ case sim < 0:
+ return fmt.Sprintf("(%s%si)", re, im)
+ default:
+ return fmt.Sprintf("(%s+%si)", re, im)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/mpint.go b/src/cmd/compile/internal/gc/mpint.go
index de47205435..e4dd22d0a0 100644
--- a/src/cmd/compile/internal/gc/mpint.go
+++ b/src/cmd/compile/internal/gc/mpint.go
@@ -299,13 +299,10 @@ func (a *Mpint) SetString(as string) {
}
}
-func (a *Mpint) String() string {
- return bconv(a, 0)
+func (a *Mpint) GoString() string {
+ return a.Val.String()
}
-func bconv(xval *Mpint, flag FmtFlag) string {
- if flag&FmtSharp != 0 {
- return fmt.Sprintf("%#x", &xval.Val)
- }
- return xval.Val.String()
+func (a *Mpint) String() string {
+ return fmt.Sprintf("%#x", &a.Val)
}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
index fb749d171f..19862c03aa 100644
--- a/src/cmd/compile/internal/gc/obj.go
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -281,7 +281,7 @@ func dumpglobls() {
funcsyms = nil
}
-// addGCLocals adds gcargs and gclocals symbols to Ctxt.Data.
+// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
// It takes care not to add any duplicates.
// Though the object file format handles duplicates efficiently,
// storing only a single copy of the data,
@@ -299,6 +299,9 @@ func addGCLocals() {
Ctxt.Data = append(Ctxt.Data, gcsym)
seen[gcsym.Name] = true
}
+ if x := s.Func.StackObjects; x != nil {
+ ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.LOCAL)
+ }
}
}
diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/gc/op_string.go
index 857234e45d..90b95d769f 100644
--- a/src/cmd/compile/internal/gc/op_string.go
+++ b/src/cmd/compile/internal/gc/op_string.go
@@ -4,9 +4,9 @@ package gc
import "strconv"
-const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDARRAYBYTESTRARRAYBYTESTRTMPARRAYRUNESTRSTRARRAYBYTESTRARRAYBYTETMPSTRARRAYRUNEASAS2AS2FUNCAS2RECVAS2MAPRAS2DOTTYPEASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECMPIFACECMPSTRCOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTINDINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMULDIVMODLSHRSHANDANDNOTNEWNOTCOMPLUSMINUSORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASEXCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELPROCRANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDDDDARGINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARKILLVARLIVEINDREGSPRETJMPGETGEND"
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDARRAYBYTESTRARRAYBYTESTRTMPARRAYRUNESTRSTRARRAYBYTESTRARRAYBYTETMPSTRARRAYRUNEASAS2AS2FUNCAS2RECVAS2MAPRAS2DOTTYPEASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECMPIFACECMPSTRCOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTINDINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMULDIVMODLSHRSHANDANDNOTNEWNOTCOMPLUSMINUSORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASEXCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELPROCRANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDDDDARGINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARDEFVARKILLVARLIVEINDREGSPRETJMPGETGEND"
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 73, 88, 100, 112, 127, 139, 141, 144, 151, 158, 165, 175, 179, 183, 191, 199, 208, 216, 219, 224, 231, 239, 245, 252, 258, 267, 275, 283, 289, 293, 302, 309, 313, 316, 323, 331, 339, 346, 352, 355, 361, 368, 376, 380, 387, 395, 397, 399, 401, 403, 405, 407, 410, 415, 423, 426, 435, 438, 442, 450, 457, 466, 469, 472, 475, 478, 481, 484, 490, 493, 496, 499, 503, 508, 512, 517, 522, 528, 533, 537, 542, 550, 558, 564, 573, 580, 584, 591, 598, 606, 610, 614, 618, 625, 632, 640, 646, 651, 656, 660, 665, 673, 678, 683, 687, 690, 698, 702, 704, 709, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 778, 784, 791, 796, 800, 805, 809, 819, 824, 832, 839, 846, 854, 860, 864, 867}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 73, 88, 100, 112, 127, 139, 141, 144, 151, 158, 165, 175, 179, 183, 191, 199, 208, 216, 219, 224, 231, 239, 245, 252, 258, 267, 275, 283, 289, 293, 302, 309, 313, 316, 323, 331, 339, 346, 352, 355, 361, 368, 376, 380, 387, 395, 397, 399, 401, 403, 405, 407, 410, 415, 423, 426, 435, 438, 442, 450, 457, 466, 469, 472, 475, 478, 481, 484, 490, 493, 496, 499, 503, 508, 512, 517, 522, 528, 533, 537, 542, 550, 558, 564, 573, 580, 584, 591, 598, 606, 610, 614, 618, 625, 632, 640, 646, 651, 656, 660, 665, 673, 678, 683, 687, 690, 698, 702, 704, 709, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 778, 784, 791, 796, 800, 805, 809, 819, 824, 832, 838, 845, 852, 860, 866, 870, 873}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go
index dce68a6c17..1e22ecfcdf 100644
--- a/src/cmd/compile/internal/gc/order.go
+++ b/src/cmd/compile/internal/gc/order.go
@@ -109,8 +109,7 @@ func (o *Order) cheapExpr(n *Node) *Node {
if l == n.Left {
return n
}
- a := n.copy()
- a.Orig = a
+ a := n.sepcopy()
a.Left = l
return typecheck(a, Erv)
}
@@ -135,8 +134,7 @@ func (o *Order) safeExpr(n *Node) *Node {
if l == n.Left {
return n
}
- a := n.copy()
- a.Orig = a
+ a := n.sepcopy()
a.Left = l
return typecheck(a, Erv)
@@ -145,8 +143,7 @@ func (o *Order) safeExpr(n *Node) *Node {
if l == n.Left {
return n
}
- a := n.copy()
- a.Orig = a
+ a := n.sepcopy()
a.Left = l
return typecheck(a, Erv)
@@ -161,8 +158,7 @@ func (o *Order) safeExpr(n *Node) *Node {
if l == n.Left && r == n.Right {
return n
}
- a := n.copy()
- a.Orig = a
+ a := n.sepcopy()
a.Left = l
a.Right = r
return typecheck(a, Erv)
diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go
index 563eb9e966..e6bbf04400 100644
--- a/src/cmd/compile/internal/gc/pgen.go
+++ b/src/cmd/compile/internal/gc/pgen.go
@@ -233,6 +233,26 @@ func compile(fn *Node) {
// Set up the function's LSym early to avoid data races with the assemblers.
fn.Func.initLSym()
+ // Make sure type syms are declared for all types that might
+ // be types of stack objects. We need to do this here
+ // because symbols must be allocated before the parallel
+ // phase of the compiler.
+ if fn.Func.lsym != nil { // not func _(){}
+ for _, n := range fn.Func.Dcl {
+ switch n.Class() {
+ case PPARAM, PPARAMOUT, PAUTO:
+ if livenessShouldTrack(n) && n.Addrtaken() {
+ dtypesym(n.Type)
+ // Also make sure we allocate a linker symbol
+ // for the stack object data, for the same reason.
+ if fn.Func.lsym.Func.StackObjects == nil {
+ fn.Func.lsym.Func.StackObjects = lookup(fmt.Sprintf("%s.stkobj", fn.funcname())).Linksym()
+ }
+ }
+ }
+ }
+ }
+
if compilenow() {
compileSSA(fn, 0)
} else {
diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go
index e070a5cd1a..563b425db7 100644
--- a/src/cmd/compile/internal/gc/plive.go
+++ b/src/cmd/compile/internal/gc/plive.go
@@ -78,6 +78,10 @@ import (
// that its argument is certainly dead, for use when the liveness analysis
// would not otherwise be able to deduce that fact.
+// TODO: get rid of OpVarKill here. It's useful for stack frame allocation
+// so the compiler can allocate two temps to the same location. Here it's now
+// useless, since the implementation of stack objects.
+
// BlockEffects summarizes the liveness effects on an SSA block.
type BlockEffects struct {
// Computed during Liveness.prologue using only the content of
@@ -85,23 +89,15 @@ type BlockEffects struct {
//
// uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block)
- // avarinit: addrtaken variables set or used (proof of initialization)
- uevar varRegVec
- varkill varRegVec
- avarinit bvec
+ uevar varRegVec
+ varkill varRegVec
// Computed during Liveness.solve using control flow information:
//
// livein: variables live at block entry
// liveout: variables live at block exit
- // avarinitany: addrtaken variables possibly initialized at block exit
- // (initialized in block or at exit from any predecessor block)
- // avarinitall: addrtaken variables certainly initialized at block exit
- // (initialized in block or at exit from all predecessor blocks)
- livein varRegVec
- liveout varRegVec
- avarinitany bvec
- avarinitall bvec
+ livein varRegVec
+ liveout varRegVec
}
// A collection of global state used by liveness analysis.
@@ -186,10 +182,9 @@ func (idx LivenessIndex) Valid() bool {
}
type progeffectscache struct {
- textavarinit []int32
- retuevar []int32
- tailuevar []int32
- initialized bool
+ retuevar []int32
+ tailuevar []int32
+ initialized bool
}
// varRegVec contains liveness bitmaps for variables and registers.
@@ -264,24 +259,13 @@ func (lv *Liveness) initcache() {
// all the parameters for correctness, and similarly it must not
// read the out arguments - they won't be set until the new
// function runs.
-
lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
- if node.Addrtaken() {
- lv.cache.textavarinit = append(lv.cache.textavarinit, int32(i))
- }
-
case PPARAMOUT:
- // If the result had its address taken, it is being tracked
- // by the avarinit code, which does not use uevar.
- // If we added it to uevar too, we'd not see any kill
- // and decide that the variable was live entry, which it is not.
- // So only use uevar in the non-addrtaken case.
- // The p.to.type == obj.TYPE_NONE limits the bvset to
- // non-tail-call return instructions; see note below for details.
- if !node.Addrtaken() {
- lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
- }
+ // All results are live at every return point.
+ // Note that this point is after escaping return values
+ // are copied back to the stack using their PAUTOHEAP references.
+ lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
}
}
}
@@ -291,21 +275,13 @@ func (lv *Liveness) initcache() {
//
// The possible flags are:
// uevar - used by the instruction
-// varkill - killed by the instruction
-// for variables without address taken, means variable was set
-// for variables with address taken, means variable was marked dead
-// avarinit - initialized or referred to by the instruction,
-// only for variables with address taken but not escaping to heap
-//
-// The avarinit output serves as a signal that the data has been
-// initialized, because any use of a variable must come after its
-// initialization.
+// varkill - killed by the instruction (set)
+// A kill happens after the use (for an instruction that updates a value, for example).
type liveEffect int
const (
uevar liveEffect = 1 << iota
varkill
- avarinit
)
// valueEffects returns the index of a variable in lv.vars and the
@@ -329,27 +305,15 @@ func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
}
var effect liveEffect
- if n.Addrtaken() {
- if v.Op != ssa.OpVarKill {
- effect |= avarinit
- }
- if v.Op == ssa.OpVarDef || v.Op == ssa.OpVarKill {
- effect |= varkill
- }
- } else {
- // Read is a read, obviously.
- // Addr by itself is also implicitly a read.
- //
- // Addr|Write means that the address is being taken
- // but only so that the instruction can write to the value.
- // It is not a read.
-
- if e&ssa.SymRead != 0 || e&(ssa.SymAddr|ssa.SymWrite) == ssa.SymAddr {
- effect |= uevar
- }
- if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
- effect |= varkill
- }
+ // Read is a read, obviously.
+ //
+ // Addr is a read also, as any subseqent holder of the pointer must be able
+ // to see all the values (including initialization) written so far.
+ if e&(ssa.SymRead|ssa.SymAddr) != 0 {
+ effect |= uevar
+ }
+ if e&ssa.SymWrite != 0 && (!isfat(n.Type) || v.Op == ssa.OpVarDef) {
+ effect |= varkill
}
if effect == 0 {
@@ -545,9 +509,6 @@ func newliveness(fn *Node, f *ssa.Func, vars []*Node, idx map[*Node]int32, stkpt
be.varkill = varRegVec{vars: bulk.next()}
be.livein = varRegVec{vars: bulk.next()}
be.liveout = varRegVec{vars: bulk.next()}
- be.avarinit = bulk.next()
- be.avarinitany = bulk.next()
- be.avarinitall = bulk.next()
}
lv.livenessMap.reset(lv.f.NumValues())
@@ -869,19 +830,6 @@ func (lv *Liveness) prologue() {
}
be.uevar.regs |= regUevar
}
-
- // Walk the block instructions forward to update avarinit bits.
- // avarinit describes the effect at the end of the block, not the beginning.
- for _, val := range b.Values {
- pos, e := lv.valueEffects(val)
- // No need for regEffects because registers never appear in avarinit.
- if e&varkill != 0 {
- be.avarinit.Unset(pos)
- }
- if e&avarinit != 0 {
- be.avarinit.Set(pos)
- }
- }
}
}
@@ -892,51 +840,10 @@ func (lv *Liveness) solve() {
nvars := int32(len(lv.vars))
newlivein := varRegVec{vars: bvalloc(nvars)}
newliveout := varRegVec{vars: bvalloc(nvars)}
- any := bvalloc(nvars)
- all := bvalloc(nvars)
- // Push avarinitall, avarinitany forward.
- // avarinitall says the addressed var is initialized along all paths reaching the block exit.
- // avarinitany says the addressed var is initialized along some path reaching the block exit.
- for _, b := range lv.f.Blocks {
- be := lv.blockEffects(b)
- if b == lv.f.Entry {
- be.avarinitall.Copy(be.avarinit)
- } else {
- be.avarinitall.Clear()
- be.avarinitall.Not()
- }
- be.avarinitany.Copy(be.avarinit)
- }
-
- // Walk blocks in the general direction of propagation (RPO
- // for avarinit{any,all}, and PO for live{in,out}). This
- // improves convergence.
+ // Walk blocks in postorder ordering. This improves convergence.
po := lv.f.Postorder()
- for change := true; change; {
- change = false
- for i := len(po) - 1; i >= 0; i-- {
- b := po[i]
- be := lv.blockEffects(b)
- lv.avarinitanyall(b, any, all)
-
- any.AndNot(any, be.varkill.vars)
- all.AndNot(all, be.varkill.vars)
- any.Or(any, be.avarinit)
- all.Or(all, be.avarinit)
- if !any.Eq(be.avarinitany) {
- change = true
- be.avarinitany.Copy(any)
- }
-
- if !all.Eq(be.avarinitall) {
- change = true
- be.avarinitall.Copy(all)
- }
- }
- }
-
// Iterate through the blocks in reverse round-robin fashion. A work
// queue might be slightly faster. As is, the number of iterations is
// so low that it hardly seems to be worth the complexity.
@@ -957,7 +864,7 @@ func (lv *Liveness) solve() {
newliveout.vars.Set(pos)
}
case ssa.BlockExit:
- // nothing to do
+ // panic exit - nothing to do
default:
// A variable is live on output from this block
// if it is live on input to some successor.
@@ -975,7 +882,7 @@ func (lv *Liveness) solve() {
}
// A variable is live on input to this block
- // if it is live on output from this block and
+ // if it is used by this block, or live on output from this block and
// not set by the code in this block.
//
// in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
@@ -990,8 +897,6 @@ func (lv *Liveness) solve() {
func (lv *Liveness) epilogue() {
nvars := int32(len(lv.vars))
liveout := varRegVec{vars: bvalloc(nvars)}
- any := bvalloc(nvars)
- all := bvalloc(nvars)
livedefer := bvalloc(nvars) // always-live variables
// If there is a defer (that could recover), then all output
@@ -1017,6 +922,9 @@ func (lv *Liveness) epilogue() {
livedefer.Set(int32(i))
}
if n.IsOutputParamHeapAddr() {
+ // This variable will be overwritten early in the function
+ // prologue (from the result of a mallocgc) but we need to
+ // zero it in case that malloc causes a stack scan.
n.Name.SetNeedzero(true)
livedefer.Set(int32(i))
}
@@ -1033,9 +941,6 @@ func (lv *Liveness) epilogue() {
{
// Reserve an entry for function entry.
live := bvalloc(nvars)
- for _, pos := range lv.cache.textavarinit {
- live.Set(pos)
- }
lv.livevars = append(lv.livevars, varRegVec{vars: live})
}
@@ -1043,53 +948,14 @@ func (lv *Liveness) epilogue() {
be := lv.blockEffects(b)
firstBitmapIndex := len(lv.livevars)
- // Compute avarinitany and avarinitall for entry to block.
- // This duplicates information known during Liveness.solve
- // but avoids storing two more vectors for each block.
- lv.avarinitanyall(b, any, all)
-
// Walk forward through the basic block instructions and
// allocate liveness maps for those instructions that need them.
- // Seed the maps with information about the addrtaken variables.
for _, v := range b.Values {
- pos, e := lv.valueEffects(v)
- // No need for regEffects because registers never appear in avarinit.
- if e&varkill != 0 {
- any.Unset(pos)
- all.Unset(pos)
- }
- if e&avarinit != 0 {
- any.Set(pos)
- all.Set(pos)
- }
-
if !lv.issafepoint(v) {
continue
}
- // Annotate ambiguously live variables so that they can
- // be zeroed at function entry and at VARKILL points.
- // liveout is dead here and used as a temporary.
- liveout.vars.AndNot(any, all)
- if !liveout.vars.IsEmpty() {
- for pos := int32(0); pos < liveout.vars.n; pos++ {
- if !liveout.vars.Get(pos) {
- continue
- }
- all.Set(pos) // silence future warnings in this block
- n := lv.vars[pos]
- if !n.Name.Needzero() {
- n.Name.SetNeedzero(true)
- if debuglive >= 1 {
- Warnl(v.Pos, "%v: %L is ambiguously live", lv.fn.Func.Nname, n)
- }
- }
- }
- }
-
- // Live stuff first.
live := bvalloc(nvars)
- live.Copy(any)
lv.livevars = append(lv.livevars, varRegVec{vars: live})
}
@@ -1128,6 +994,17 @@ func (lv *Liveness) epilogue() {
Fatalf("bad index for entry point: %v", index)
}
+ // Check to make sure only input variables are live.
+ for i, n := range lv.vars {
+ if !liveout.vars.Get(int32(i)) {
+ continue
+ }
+ if n.Class() == PPARAM {
+ continue // ok
+ }
+ Fatalf("bad live variable at entry of %v: %L", lv.fn.Func.Nname, n)
+ }
+
// Record live variables.
live := &lv.livevars[index]
live.Or(*live, liveout)
@@ -1330,28 +1207,6 @@ func clobberPtr(b *ssa.Block, v *Node, offset int64) {
b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v)
}
-func (lv *Liveness) avarinitanyall(b *ssa.Block, any, all bvec) {
- if len(b.Preds) == 0 {
- any.Clear()
- all.Clear()
- for _, pos := range lv.cache.textavarinit {
- any.Set(pos)
- all.Set(pos)
- }
- return
- }
-
- be := lv.blockEffects(b.Preds[0].Block())
- any.Copy(be.avarinitany)
- all.Copy(be.avarinitall)
-
- for _, pred := range b.Preds[1:] {
- be := lv.blockEffects(pred.Block())
- any.Or(any, be.avarinitany)
- all.And(all, be.avarinitall)
- }
-}
-
// Compact coalesces identical bitmaps from lv.livevars into the sets
// lv.stackMapSet and lv.regMaps.
//
@@ -1559,7 +1414,6 @@ func (lv *Liveness) printDebug() {
printed = false
printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0, regUevar)
printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0, regKill)
- printed = lv.printeffect(printed, "avarinit", pos, effect&avarinit != 0, 0)
if printed {
fmt.Printf("\n")
}
@@ -1596,9 +1450,6 @@ func (lv *Liveness) printDebug() {
printed = false
printed = lv.printbvec(printed, "varkill", be.varkill)
printed = lv.printbvec(printed, "liveout", be.liveout)
- printed = lv.printbvec(printed, "avarinit", varRegVec{vars: be.avarinit})
- printed = lv.printbvec(printed, "avarinitany", varRegVec{vars: be.avarinitany})
- printed = lv.printbvec(printed, "avarinitall", varRegVec{vars: be.avarinitall})
if printed {
fmt.Printf("\n")
}
diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go
index c6455c3693..9d1114fa43 100644
--- a/src/cmd/compile/internal/gc/sinit.go
+++ b/src/cmd/compile/internal/gc/sinit.go
@@ -349,15 +349,13 @@ func staticcopy(l *Node, r *Node, out *[]*Node) bool {
gdata(n, e.Expr, int(n.Type.Width))
continue
}
- ll := n.copy()
- ll.Orig = ll // completely separate copy
+ ll := n.sepcopy()
if staticassign(ll, e.Expr, out) {
continue
}
// Requires computation, but we're
// copying someone else's computation.
- rr := orig.copy()
- rr.Orig = rr // completely separate copy
+ rr := orig.sepcopy()
rr.Type = ll.Type
rr.Xoffset += e.Xoffset
setlineno(rr)
@@ -453,8 +451,7 @@ func staticassign(l *Node, r *Node, out *[]*Node) bool {
continue
}
setlineno(e.Expr)
- a := n.copy()
- a.Orig = a // completely separate copy
+ a := n.sepcopy()
if !staticassign(a, e.Expr, out) {
*out = append(*out, nod(OAS, a, e.Expr))
}
@@ -518,8 +515,7 @@ func staticassign(l *Node, r *Node, out *[]*Node) bool {
// Copy val directly into n.
n.Type = val.Type
setlineno(val)
- a := n.copy()
- a.Orig = a
+ a := n.sepcopy()
if !staticassign(a, val, out) {
*out = append(*out, nod(OAS, a, val))
}
@@ -843,6 +839,10 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
a = nod(OAS, x, nil)
a = typecheck(a, Etop)
init.Append(a) // zero new temp
+ } else {
+ // Declare that we're about to initialize all of x.
+ // (Which happens at the *vauto = vstat below.)
+ init.Append(nod(OVARDEF, x, nil))
}
a = nod(OADDR, x, nil)
@@ -853,6 +853,8 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) {
a = typecheck(a, Etop)
init.Append(a) // zero new temp
a = a.Left
+ } else {
+ init.Append(nod(OVARDEF, a, nil))
}
a = nod(OADDR, a, nil)
diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go
index 00ff7d4bd5..138ce08fec 100644
--- a/src/cmd/compile/internal/gc/ssa.go
+++ b/src/cmd/compile/internal/gc/ssa.go
@@ -16,6 +16,7 @@ import (
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
+ "cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
@@ -754,8 +755,8 @@ func (s *state) stmtList(l Nodes) {
// stmt converts the statement n to SSA and adds it to s.
func (s *state) stmt(n *Node) {
- if !(n.Op == OVARKILL || n.Op == OVARLIVE) {
- // OVARKILL and OVARLIVE are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
+ if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) {
+ // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
s.pushLine(n.Pos)
defer s.popLine()
}
@@ -1169,6 +1170,10 @@ func (s *state) stmt(n *Node) {
}
s.startBlock(bEnd)
+ case OVARDEF:
+ if !s.canSSA(n.Left) {
+ s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
+ }
case OVARKILL:
// Insert a varkill op to record that a variable is no longer live.
// We only care about liveness info at call sites, so putting the
@@ -3149,12 +3154,12 @@ func init() {
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
},
- sys.S390X)
+ sys.ARM64, sys.S390X)
addF("math", "Abs",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
},
- sys.PPC64)
+ sys.ARM64, sys.PPC64)
addF("math", "Copysign",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
@@ -3361,12 +3366,12 @@ func init() {
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1])
},
- sys.AMD64, sys.S390X)
+ sys.AMD64, sys.ARM64, sys.S390X)
addF("math/bits", "RotateLeft64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1])
},
- sys.AMD64, sys.S390X)
+ sys.AMD64, sys.ARM64, sys.S390X)
alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
@@ -3435,6 +3440,12 @@ func init() {
addF("math/bits", "OnesCount",
makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
sys.AMD64)
+ alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64)
+ addF("math/bits", "Mul64",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64)
/******** sync/atomic ********/
@@ -4927,6 +4938,57 @@ func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
}
}
+// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
+type byXoffset []*Node
+
+func (s byXoffset) Len() int { return len(s) }
+func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
+func (s byXoffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func emitStackObjects(e *ssafn, pp *Progs) {
+ var vars []*Node
+ for _, n := range e.curfn.Func.Dcl {
+ if livenessShouldTrack(n) && n.Addrtaken() {
+ vars = append(vars, n)
+ }
+ }
+ if len(vars) == 0 {
+ return
+ }
+
+ // Sort variables from lowest to highest address.
+ sort.Sort(byXoffset(vars))
+
+ // Populate the stack object data.
+ // Format must match runtime/stack.go:stackObjectRecord.
+ x := e.curfn.Func.lsym.Func.StackObjects
+ off := 0
+ off = duintptr(x, off, uint64(len(vars)))
+ for _, v := range vars {
+ // Note: arguments and return values have non-negative Xoffset,
+ // in which case the offset is relative to argp.
+ // Locals have a negative Xoffset, in which case the offset is relative to varp.
+ off = duintptr(x, off, uint64(v.Xoffset))
+ if !typesym(v.Type).Siggen() {
+ Fatalf("stack object's type symbol not generated for type %s", v.Type)
+ }
+ off = dsymptr(x, off, dtypesym(v.Type), 0)
+ }
+
+ // Emit a funcdata pointing at the stack object data.
+ p := pp.Prog(obj.AFUNCDATA)
+ Addrconst(&p.From, objabi.FUNCDATA_StackObjects)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+
+ if debuglive != 0 {
+ for _, v := range vars {
+ Warnl(v.Pos, "stack object %v %s", v, v.Type.String())
+ }
+ }
+}
+
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *Progs) {
var s SSAGenState
@@ -4934,6 +4996,7 @@ func genssa(f *ssa.Func, pp *Progs) {
e := f.Frontend().(*ssafn)
s.livenessMap = liveness(e, f)
+ emitStackObjects(e, pp)
// Remember where each block starts.
s.bstart = make([]*obj.Prog, f.NumBlocks())
@@ -5003,24 +5066,8 @@ func genssa(f *ssa.Func, pp *Progs) {
case ssa.OpGetG:
// nothing to do when there's a g register,
// and checkLower complains if there's not
- case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive:
+ case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
// nothing to do; already used by liveness
- case ssa.OpVarKill:
- // Zero variable if it is ambiguously live.
- // After the VARKILL anything this variable references
- // might be collected. If it were to become live again later,
- // the GC will see references to already-collected objects.
- // See issue 20029.
- n := v.Aux.(*Node)
- if n.Name.Needzero() {
- if n.Class() != PAUTO {
- v.Fatalf("zero of variable which isn't PAUTO %v", n)
- }
- if n.Type.Size()%int64(Widthptr) != 0 {
- v.Fatalf("zero of variable not a multiple of ptr size %v", n)
- }
- thearch.ZeroAuto(s.pp, n)
- }
case ssa.OpPhi:
CheckLoweredPhi(v)
case ssa.OpConvert:
@@ -5048,7 +5095,6 @@ func genssa(f *ssa.Func, pp *Progs) {
}
}
}
-
// Emit control flow instructions for block
var next *ssa.Block
if i < len(f.Blocks)-1 && Debug['N'] == 0 {
diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go
index 61a3b2385d..7e450e2e66 100644
--- a/src/cmd/compile/internal/gc/subr.go
+++ b/src/cmd/compile/internal/gc/subr.go
@@ -364,9 +364,35 @@ func nodSym(op Op, left *Node, sym *types.Sym) *Node {
return n
}
+// rawcopy returns a shallow copy of n.
+// Note: copy or sepcopy (rather than rawcopy) is usually the
+// correct choice (see comment with Node.copy, below).
+func (n *Node) rawcopy() *Node {
+ copy := *n
+ return &copy
+}
+
+// sepcopy returns a separate shallow copy of n, with the copy's
+// Orig pointing to itself.
+func (n *Node) sepcopy() *Node {
+ copy := *n
+ copy.Orig = &copy
+ return &copy
+}
+
+// copy returns shallow copy of n and adjusts the copy's Orig if
+// necessary: In general, if n.Orig points to itself, the copy's
+// Orig should point to itself as well. Otherwise, if n is modified,
+// the copy's Orig node appears modified, too, and then doesn't
+// represent the original node anymore.
+// (This caused the wrong complit Op to be used when printing error
+// messages; see issues #26855, #27765).
func (n *Node) copy() *Node {
- n2 := *n
- return &n2
+ copy := *n
+ if n.Orig == n {
+ copy.Orig = &copy
+ }
+ return &copy
}
// methcmp sorts methods by symbol.
@@ -412,8 +438,7 @@ func treecopy(n *Node, pos src.XPos) *Node {
switch n.Op {
default:
- m := n.copy()
- m.Orig = m
+ m := n.sepcopy()
m.Left = treecopy(n.Left, pos)
m.Right = treecopy(n.Right, pos)
m.List.Set(listtreecopy(n.List.Slice(), pos))
diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go
index df23b83f29..eb2ab6b916 100644
--- a/src/cmd/compile/internal/gc/syntax.go
+++ b/src/cmd/compile/internal/gc/syntax.go
@@ -739,6 +739,7 @@ const (
OCLOSUREVAR // variable reference at beginning of closure function
OCFUNC // reference to c function pointer (not go func value)
OCHECKNIL // emit code to ensure pointer/interface not nil
+ OVARDEF // variable is about to be fully initialized
OVARKILL // variable is dead
OVARLIVE // variable is alive
OINDREGSP // offset plus indirect of REGSP, such as 8(SP).
diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go
index bb78d8bf73..4831ecca34 100644
--- a/src/cmd/compile/internal/gc/typecheck.go
+++ b/src/cmd/compile/internal/gc/typecheck.go
@@ -2436,7 +2436,7 @@ func isMethodApplicable(t *types.Type, m *types.Field) bool {
}
func derefall(t *types.Type) *types.Type {
- for t != nil && t.Etype == types.Tptr {
+ for t != nil && t.IsPtr() {
t = t.Elem()
}
return t
@@ -2506,20 +2506,20 @@ func lookdot(n *Node, t *types.Type, dostrcmp int) *types.Field {
dowidth(tt)
rcvr := f2.Type.Recv().Type
if !eqtype(rcvr, tt) {
- if rcvr.Etype == types.Tptr && eqtype(rcvr.Elem(), tt) {
+ if rcvr.IsPtr() && eqtype(rcvr.Elem(), tt) {
checklvalue(n.Left, "call pointer method on")
n.Left = nod(OADDR, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv)
- } else if tt.Etype == types.Tptr && rcvr.Etype != types.Tptr && eqtype(tt.Elem(), rcvr) {
+ } else if tt.IsPtr() && !rcvr.IsPtr() && eqtype(tt.Elem(), rcvr) {
n.Left = nod(OIND, n.Left, nil)
n.Left.SetImplicit(true)
n.Left = typecheck(n.Left, Etype|Erv)
- } else if tt.Etype == types.Tptr && tt.Elem().Etype == types.Tptr && eqtype(derefall(tt), derefall(rcvr)) {
+ } else if tt.IsPtr() && tt.Elem().IsPtr() && eqtype(derefall(tt), derefall(rcvr)) {
yyerror("calling method %v with receiver %L requires explicit dereference", n.Sym, n.Left)
- for tt.Etype == types.Tptr {
+ for tt.IsPtr() {
// Stop one level early for method with pointer receiver.
- if rcvr.Etype == types.Tptr && tt.Elem().Etype != types.Tptr {
+ if rcvr.IsPtr() && !tt.Elem().IsPtr() {
break
}
n.Left = nod(OIND, n.Left, nil)
@@ -3298,7 +3298,8 @@ func samesafeexpr(l *Node, r *Node) bool {
case ODOT, ODOTPTR:
return l.Sym != nil && r.Sym != nil && l.Sym == r.Sym && samesafeexpr(l.Left, r.Left)
- case OIND, OCONVNOP:
+ case OIND, OCONVNOP,
+ ONOT, OCOM, OPLUS, OMINUS:
return samesafeexpr(l.Left, r.Left)
case OCONV:
@@ -3306,7 +3307,8 @@ func samesafeexpr(l *Node, r *Node) bool {
// Allow only numeric-ish types. This is a bit conservative.
return issimple[l.Type.Etype] && samesafeexpr(l.Left, r.Left)
- case OINDEX, OINDEXMAP:
+ case OINDEX, OINDEXMAP,
+ OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
case OLITERAL:
@@ -3339,7 +3341,7 @@ func typecheckas(n *Node) {
checkassign(n, n.Left)
if n.Right != nil && n.Right.Type != nil {
if n.Right.Type.IsFuncArgStruct() {
- yyerror("assignment mismatch: 1 variable but %d values", n.Right.Type.NumFields())
+ yyerror("assignment mismatch: 1 variable but %v returns %d values", n.Right.Left, n.Right.Type.NumFields())
// Multi-value RHS isn't actually valid for OAS; nil out
// to indicate failed typechecking.
n.Right.Type = nil
@@ -3484,7 +3486,12 @@ func typecheckas2(n *Node) {
}
mismatch:
- yyerror("assignment mismatch: %d variables but %d values", cl, cr)
+ switch r.Op {
+ default:
+ yyerror("assignment mismatch: %d variable but %d values", cl, cr)
+ case OCALLFUNC, OCALLMETH, OCALLINTER:
+ yyerror("assignment mismatch: %d variables but %v returns %d values", cl, r.Left, cr)
+ }
// second half of dance
out:
@@ -3640,25 +3647,22 @@ func typecheckdeftype(n *Node) {
}
func typecheckdef(n *Node) {
- lno := lineno
- setlineno(n)
+ lno := setlineno(n)
if n.Op == ONONAME {
if !n.Diag() {
n.SetDiag(true)
- if n.Pos.IsKnown() {
- lineno = n.Pos
- }
// Note: adderrorname looks for this string and
// adds context about the outer expression
- yyerror("undefined: %v", n.Sym)
+ yyerrorl(lineno, "undefined: %v", n.Sym)
}
-
+ lineno = lno
return
}
if n.Walkdef() == 1 {
+ lineno = lno
return
}
@@ -3701,20 +3705,19 @@ func typecheckdef(n *Node) {
e := n.Name.Defn
n.Name.Defn = nil
if e == nil {
- lineno = n.Pos
Dump("typecheckdef nil defn", n)
- yyerror("xxx")
+ yyerrorl(n.Pos, "xxx")
}
e = typecheck(e, Erv)
if Isconst(e, CTNIL) {
- yyerror("const initializer cannot be nil")
+ yyerrorl(n.Pos, "const initializer cannot be nil")
goto ret
}
if e.Type != nil && e.Op != OLITERAL || !e.isGoConst() {
if !e.Diag() {
- yyerror("const initializer %v is not a constant", e)
+ yyerrorl(n.Pos, "const initializer %v is not a constant", e)
e.SetDiag(true)
}
@@ -3724,12 +3727,12 @@ func typecheckdef(n *Node) {
t := n.Type
if t != nil {
if !okforconst[t.Etype] {
- yyerror("invalid constant type %v", t)
+ yyerrorl(n.Pos, "invalid constant type %v", t)
goto ret
}
if !e.Type.IsUntyped() && !eqtype(t, e.Type) {
- yyerror("cannot use %L as type %v in const initializer", e, t)
+ yyerrorl(n.Pos, "cannot use %L as type %v in const initializer", e, t)
goto ret
}
diff --git a/src/cmd/compile/internal/gc/walk.go b/src/cmd/compile/internal/gc/walk.go
index 2993e08fc2..1b1d36b61d 100644
--- a/src/cmd/compile/internal/gc/walk.go
+++ b/src/cmd/compile/internal/gc/walk.go
@@ -1312,7 +1312,7 @@ opswitch:
b = conv(b, convType)
b = nod(OLSH, b, nodintconst(int64(8*offset)))
ncsubstr = nod(OOR, ncsubstr, b)
- csubstr = csubstr | int64(s[i+offset])<<uint8(8*offset)
+ csubstr |= int64(s[i+offset]) << uint8(8*offset)
}
csubstrPart := nodintconst(csubstr)
// Compare "step" bytes as once
@@ -1418,7 +1418,7 @@ opswitch:
// Maximum key and value size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !Isconst(hint, CTINT) ||
- !(hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) > 0) {
+ hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
// var bv bmap
bv := temp(bmap(t))
@@ -4052,7 +4052,7 @@ func wrapCall(n *Node, init *Nodes) *Node {
// The result of substArgTypes MUST be assigned back to old, e.g.
// n.Left = substArgTypes(n.Left, t1, t2)
func substArgTypes(old *Node, types_ ...*types.Type) *Node {
- n := old.copy() // make shallow copy
+ n := old.copy()
for _, t := range types_ {
dowidth(t)
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index fd226a2e98..0a7238850c 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -153,6 +153,24 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = y
}
+ case ssa.OpPPC64LoweredMuluhilo:
+ // MULHDU Rarg1, Rarg0, Reg0
+ // MULLD Rarg1, Rarg0, Reg1
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(ppc64.AMULHDU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(ppc64.AMULLD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg1()
+
case ssa.OpPPC64LoweredAtomicAnd8,
ssa.OpPPC64LoweredAtomicOr8:
// LWSYNC
@@ -717,7 +735,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// Not a go.string, generate a normal load
fallthrough
- case ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
+ case ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
@@ -739,10 +757,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
- case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
+ case ssa.OpPPC64MOVDloadidx, ssa.OpPPC64MOVWloadidx, ssa.OpPPC64MOVHloadidx, ssa.OpPPC64MOVWZloadidx,
+ ssa.OpPPC64MOVBZloadidx, ssa.OpPPC64MOVHZloadidx, ssa.OpPPC64FMOVDloadidx, ssa.OpPPC64FMOVSloadidx,
+ ssa.OpPPC64MOVDBRloadidx, ssa.OpPPC64MOVWBRloadidx, ssa.OpPPC64MOVHBRloadidx:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
+ p.From.Index = v.Args[1].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@@ -755,17 +776,21 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
- case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
+ case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
- case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
+
+ case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
+ ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
+ ssa.OpPPC64MOVHBRstoreidx:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
- p.From.Reg = v.Args[1].Reg()
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Index = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
index 8131f1117a..f6aa37e884 100644
--- a/src/cmd/compile/internal/ssa/gen/386.rules
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -44,8 +44,8 @@
(Xor(32|16|8) x y) -> (XORL x y)
(Neg(32|16|8) x) -> (NEGL x)
-(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
-(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
+(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
+(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
(Neg32F x) && config.use387 -> (FCHS x)
(Neg64F x) && config.use387 -> (FCHS x)
@@ -1116,10 +1116,10 @@
(XORL x x) -> (MOVLconst [0])
// checking AND against 0.
-(CMP(L|W|B)const (ANDL x y) [0]) -> (TEST(L|W|B) x y)
-(CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
-(CMPWconst (ANDLconst [c] x) [0]) -> (TESTWconst [int64(int16(c))] x)
-(CMPBconst (ANDLconst [c] x) [0]) -> (TESTBconst [int64(int8(c))] x)
+(CMP(L|W|B)const l:(ANDL x y) [0]) && l.Uses==1 -> (TEST(L|W|B) x y)
+(CMPLconst l:(ANDLconst [c] x) [0]) && l.Uses==1 -> (TESTLconst [c] x)
+(CMPWconst l:(ANDLconst [c] x) [0]) && l.Uses==1 -> (TESTWconst [int64(int16(c))] x)
+(CMPBconst l:(ANDLconst [c] x) [0]) && l.Uses==1 -> (TESTBconst [int64(int8(c))] x)
// TEST %reg,%reg is shorter than CMP
(CMP(L|W|B)const x [0]) -> (TEST(L|W|B) x x)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 4c11f8d036..f9ac5e4dce 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -41,8 +41,8 @@
(Com(64|32|16|8) x) -> (NOT(Q|L|L|L) x)
(Neg(64|32|16|8) x) -> (NEG(Q|L|L|L) x)
-(Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
-(Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
+(Neg32F x) -> (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
+(Neg64F x) -> (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
// Lowering boolean ops
(AndB x y) -> (ANDL x y)
@@ -709,7 +709,17 @@
(ANDL x (MOVLconst [c])) -> (ANDLconst [c] x)
(AND(L|Q)const [c] (AND(L|Q)const [d] x)) -> (AND(L|Q)const [c & d] x)
+(BTR(L|Q)const [c] (AND(L|Q)const [d] x)) -> (AND(L|Q)const [d &^ (1<<uint32(c))] x)
+(AND(L|Q)const [c] (BTR(L|Q)const [d] x)) -> (AND(L|Q)const [c &^ (1<<uint32(d))] x)
+(BTR(L|Q)const [c] (BTR(L|Q)const [d] x)) -> (AND(L|Q)const [^(1<<uint32(c) | 1<<uint32(d))] x)
(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) -> (XOR(L|Q)const [c ^ d] x)
+(BTC(L|Q)const [c] (XOR(L|Q)const [d] x)) -> (XOR(L|Q)const [d ^ 1<<uint32(c)] x)
+(XOR(L|Q)const [c] (BTC(L|Q)const [d] x)) -> (XOR(L|Q)const [c ^ 1<<uint32(d)] x)
+(BTC(L|Q)const [c] (BTC(L|Q)const [d] x)) -> (XOR(L|Q)const [1<<uint32(c) ^ 1<<uint32(d)] x)
+(OR(L|Q)const [c] (OR(L|Q)const [d] x)) -> (OR(L|Q)const [c | d] x)
+(OR(L|Q)const [c] (BTS(L|Q)const [d] x)) -> (OR(L|Q)const [c | 1<<uint32(d)] x)
+(BTS(L|Q)const [c] (OR(L|Q)const [d] x)) -> (OR(L|Q)const [d | 1<<uint32(c)] x)
+(BTS(L|Q)const [c] (BTS(L|Q)const [d] x)) -> (OR(L|Q)const [1<<uint32(d) | 1<<uint32(c)] x)
(MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x)
(MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x)
@@ -1042,18 +1052,23 @@
((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
+ (CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).add(off2)] {sym} base mem)
+
((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(off1+off2) ->
((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
-((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
- ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
-((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
- ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
-((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
- ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem)
-((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
- ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
+((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd(off2) ->
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(off1+off2) ->
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {sym} base val mem)
// Fold constants into stores.
(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validValAndOff(c,off) ->
@@ -1088,24 +1103,31 @@
((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+
((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
-((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
- ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
-((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
&& ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2) ->
- ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
-((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
-((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
&& is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
// generating indexed loads and stores
(MOV(B|W|L|Q|SS|SD)load [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
@@ -1412,6 +1434,12 @@
(XORLconst [c] (MOVLconst [d])) -> (MOVLconst [c^d])
(NOTQ (MOVQconst [c])) -> (MOVQconst [^c])
(NOTL (MOVLconst [c])) -> (MOVLconst [^c])
+(BTSQconst [c] (MOVQconst [d])) -> (MOVQconst [d|(1<<uint32(c))])
+(BTSLconst [c] (MOVLconst [d])) -> (MOVLconst [d|(1<<uint32(c))])
+(BTRQconst [c] (MOVQconst [d])) -> (MOVQconst [d&^(1<<uint32(c))])
+(BTRLconst [c] (MOVLconst [d])) -> (MOVLconst [d&^(1<<uint32(c))])
+(BTCQconst [c] (MOVQconst [d])) -> (MOVQconst [d^(1<<uint32(c))])
+(BTCLconst [c] (MOVLconst [d])) -> (MOVLconst [d^(1<<uint32(c))])
// generic simplifications
// TODO: more of this
@@ -2292,11 +2320,11 @@
((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
-(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
- ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Lmodify [off] {sym} ptr x mem)
(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) -> ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
-(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
- ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l) ->
+ ((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off] {sym} ptr x mem)
// Merge ADDQconst and LEAQ into atomic loads.
(MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
@@ -2380,12 +2408,12 @@
(MOVWQZX (MOVBQZX x)) -> (MOVBQZX x)
(MOVBQZX (MOVBQZX x)) -> (MOVBQZX x)
-(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
- ((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
-(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) ->
- ((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) ->
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Qconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a) ->
+ ((ADD|AND|OR|XOR|BTC|BTR|BTS)Lconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
// float <-> int register moves, with no conversion.
// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
index 512df99694..017c07071d 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -272,14 +272,14 @@ func init() {
{name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32
{name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64
- {name: "BTL", argLength: 2, reg: gp2flags, asm: "BTL", typ: "Flags"}, // test whether bit arg0 % 32 in arg1 is set
- {name: "BTQ", argLength: 2, reg: gp2flags, asm: "BTQ", typ: "Flags"}, // test whether bit arg0 % 64 in arg1 is set
- {name: "BTCL", argLength: 2, reg: gp21, asm: "BTCL", resultInArg0: true, clobberFlags: true}, // complement bit arg0 % 32 in arg1
- {name: "BTCQ", argLength: 2, reg: gp21, asm: "BTCQ", resultInArg0: true, clobberFlags: true}, // complement bit arg0 % 64 in arg1
- {name: "BTRL", argLength: 2, reg: gp21, asm: "BTRL", resultInArg0: true, clobberFlags: true}, // reset bit arg0 % 32 in arg1
- {name: "BTRQ", argLength: 2, reg: gp21, asm: "BTRQ", resultInArg0: true, clobberFlags: true}, // reset bit arg0 % 64 in arg1
- {name: "BTSL", argLength: 2, reg: gp21, asm: "BTSL", resultInArg0: true, clobberFlags: true}, // set bit arg0 % 32 in arg1
- {name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg0 % 64 in arg1
+ {name: "BTL", argLength: 2, reg: gp2flags, asm: "BTL", typ: "Flags"}, // test whether bit arg0%32 in arg1 is set
+ {name: "BTQ", argLength: 2, reg: gp2flags, asm: "BTQ", typ: "Flags"}, // test whether bit arg0%64 in arg1 is set
+ {name: "BTCL", argLength: 2, reg: gp21, asm: "BTCL", resultInArg0: true, clobberFlags: true}, // complement bit arg1%32 in arg0
+ {name: "BTCQ", argLength: 2, reg: gp21, asm: "BTCQ", resultInArg0: true, clobberFlags: true}, // complement bit arg1%64 in arg0
+ {name: "BTRL", argLength: 2, reg: gp21, asm: "BTRL", resultInArg0: true, clobberFlags: true}, // reset bit arg1%32 in arg0
+ {name: "BTRQ", argLength: 2, reg: gp21, asm: "BTRQ", resultInArg0: true, clobberFlags: true}, // reset bit arg1%64 in arg0
+ {name: "BTSL", argLength: 2, reg: gp21, asm: "BTSL", resultInArg0: true, clobberFlags: true}, // set bit arg1%32 in arg0
+ {name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg1%64 in arg0
{name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32
{name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64
{name: "BTCLconst", argLength: 1, reg: gp11, asm: "BTCL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 32
@@ -289,6 +289,20 @@ func init() {
{name: "BTSLconst", argLength: 1, reg: gp11, asm: "BTSL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 32
{name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 64
+ // direct bit operation on memory operand
+ {name: "BTCQmodify", argLength: 3, reg: gpstore, asm: "BTCQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
+ {name: "BTCLmodify", argLength: 3, reg: gpstore, asm: "BTCL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
+ {name: "BTSQmodify", argLength: 3, reg: gpstore, asm: "BTSQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
+ {name: "BTSLmodify", argLength: 3, reg: gpstore, asm: "BTSL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
+ {name: "BTRQmodify", argLength: 3, reg: gpstore, asm: "BTRQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit arg1 in 64-bit arg0+auxint+aux, arg2=mem
+ {name: "BTRLmodify", argLength: 3, reg: gpstore, asm: "BTRL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit arg1 in 32-bit arg0+auxint+aux, arg2=mem
+ {name: "BTCQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTCLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // complement bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTSQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTSLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // set bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTRQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit ValAndOff(AuxInt).Val() in 64-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "BTRLconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // reset bit ValAndOff(AuxInt).Val() in 32-bit arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
{name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0
{name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
{name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index ede7ed3d7a..659081ec8b 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -83,12 +83,18 @@
(Com8 x) -> (MVN x)
// math package intrinsics
+(Abs x) -> (FABSD x)
(Sqrt x) -> (FSQRTD x)
(Ceil x) -> (FRINTPD x)
(Floor x) -> (FRINTMD x)
(Round x) -> (FRINTAD x)
+(RoundToEven x) -> (FRINTND x)
(Trunc x) -> (FRINTZD x)
+// lowering rotates
+(RotateLeft32 x y) -> (RORW x (NEG <y.Type> y))
+(RotateLeft64 x y) -> (ROR x (NEG <y.Type> y))
+
(Ctz64NonZero x) -> (Ctz64 x)
(Ctz32NonZero x) -> (Ctz32 x)
@@ -101,9 +107,20 @@
// Load args directly into the register class where it will be used.
(FMOVDgpfp <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
+(FMOVDfpgp <t> (Arg [off] {sym})) -> @b.Func.Entry (Arg <t> [off] {sym})
+
// Similarly for stores, if we see a store after FPR <-> GPR move, then redirect store to use the other register set.
-(MOVDstore ptr (FMOVDfpgp val) mem) -> (FMOVDstore ptr val mem)
-(FMOVDstore ptr (FMOVDgpfp val) mem) -> (MOVDstore ptr val mem)
+(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) -> (FMOVDstore [off] {sym} ptr val mem)
+(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) -> (MOVDstore [off] {sym} ptr val mem)
+(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) -> (FMOVSstore [off] {sym} ptr val mem)
+(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) -> (MOVWstore [off] {sym} ptr val mem)
+
+// float <-> int register moves, with no conversion.
+// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) -> (FMOVDfpgp val)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) -> (FMOVDgpfp val)
+(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) -> (FMOVSfpgp val)
+(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) -> (FMOVSgpfp val)
(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
@@ -125,6 +142,8 @@
// shifts
// hardware instruction uses only the low 6 bits of the shift
// we compare to 64 to ensure Go semantics for large shifts
+// Rules about rotates with non-const shift are based on the following rules,
+// if the following rules change, please also modify the rules based on them.
(Lsh64x64 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
(Lsh64x32 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
(Lsh64x16 <t> x y) -> (CSEL {OpARM64LessThanU} (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
@@ -1135,15 +1154,15 @@
(MULW (NEG x) y) -> (MNEGW x y)
// madd/msub
-(ADD a l:(MUL x y)) && l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MADD a x y)
-(SUB a l:(MUL x y)) && l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MSUB a x y)
-(ADD a l:(MNEG x y)) && l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MSUB a x y)
-(SUB a l:(MNEG x y)) && l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MADD a x y)
+(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) -> (MADD a x y)
+(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) -> (MSUB a x y)
+(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) -> (MSUB a x y)
+(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) -> (MADD a x y)
-(ADD a l:(MULW x y)) && l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MADDW a x y)
-(SUB a l:(MULW x y)) && l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MSUBW a x y)
-(ADD a l:(MNEGW x y)) && l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MSUBW a x y)
-(SUB a l:(MNEGW x y)) && l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l) -> (MADDW a x y)
+(ADD a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MADDW a x y)
+(SUB a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MSUBW a x y)
+(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MSUBW a x y)
+(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MADDW a x y)
// mul by constant
(MUL x (MOVDconst [-1])) -> (NEG x)
@@ -1191,6 +1210,94 @@
(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst <x.Type> [log2(c/7)] (SUBshiftLL <x.Type> x x [3]))
(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (NEG (SLLconst <x.Type> [log2(c/9)] (ADDshiftLL <x.Type> x x [3])))
+(MADD a x (MOVDconst [-1])) -> (SUB a x)
+(MADD a _ (MOVDconst [0])) -> a
+(MADD a x (MOVDconst [1])) -> (ADD a x)
+(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)])
+(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 -> (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 -> (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+
+(MADD a (MOVDconst [-1]) x) -> (SUB a x)
+(MADD a (MOVDconst [0]) _) -> a
+(MADD a (MOVDconst [1]) x) -> (ADD a x)
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)])
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 -> (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 -> (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+
+(MADDW a x (MOVDconst [c])) && int32(c)==-1 -> (SUB a x)
+(MADDW a _ (MOVDconst [c])) && int32(c)==0 -> a
+(MADDW a x (MOVDconst [c])) && int32(c)==1 -> (ADD a x)
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)])
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 -> (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 -> (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+
+(MADDW a (MOVDconst [c]) x) && int32(c)==-1 -> (SUB a x)
+(MADDW a (MOVDconst [c]) _) && int32(c)==0 -> a
+(MADDW a (MOVDconst [c]) x) && int32(c)==1 -> (ADD a x)
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)])
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 -> (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 -> (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+
+(MSUB a x (MOVDconst [-1])) -> (ADD a x)
+(MSUB a _ (MOVDconst [0])) -> a
+(MSUB a x (MOVDconst [1])) -> (SUB a x)
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)])
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 -> (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 -> (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+
+(MSUB a (MOVDconst [-1]) x) -> (ADD a x)
+(MSUB a (MOVDconst [0]) _) -> a
+(MSUB a (MOVDconst [1]) x) -> (SUB a x)
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)])
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 -> (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 -> (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+
+(MSUBW a x (MOVDconst [c])) && int32(c)==-1 -> (ADD a x)
+(MSUBW a _ (MOVDconst [c])) && int32(c)==0 -> a
+(MSUBW a x (MOVDconst [c])) && int32(c)==1 -> (SUB a x)
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)])
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 -> (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 -> (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+
+(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 -> (ADD a x)
+(MSUBW a (MOVDconst [c]) _) && int32(c)==0 -> a
+(MSUBW a (MOVDconst [c]) x) && int32(c)==1 -> (SUB a x)
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)])
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 -> (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 -> (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+
// div by constant
(UDIV x (MOVDconst [1])) -> x
(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x)
@@ -1242,6 +1349,14 @@
(MULW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)*int32(d))])
(MNEG (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [-c*d])
(MNEGW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [-int64(int32(c)*int32(d))])
+(MADD (MOVDconst [c]) x y) -> (ADDconst [c] (MUL <x.Type> x y))
+(MADDW (MOVDconst [c]) x y) -> (ADDconst [c] (MULW <x.Type> x y))
+(MSUB (MOVDconst [c]) x y) -> (ADDconst [c] (MNEG <x.Type> x y))
+(MSUBW (MOVDconst [c]) x y) -> (ADDconst [c] (MNEGW <x.Type> x y))
+(MADD a (MOVDconst [c]) (MOVDconst [d])) -> (ADDconst [c*d] a)
+(MADDW a (MOVDconst [c]) (MOVDconst [d])) -> (ADDconst [int64(int32(c)*int32(d))] a)
+(MSUB a (MOVDconst [c]) (MOVDconst [d])) -> (SUBconst [c*d] a)
+(MSUBW a (MOVDconst [c]) (MOVDconst [d])) -> (SUBconst [int64(int32(c)*int32(d))] a)
(DIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c/d])
(UDIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)/uint64(d))])
(DIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)/int32(d))])
@@ -1490,6 +1605,12 @@
(CSEL0 {arm64Negate(bool.Op)} x flagArg(bool))
// absorb shifts into ops
+(NEG x:(SLLconst [c] y)) && clobberIfDead(x) -> (NEGshiftLL [c] y)
+(NEG x:(SRLconst [c] y)) && clobberIfDead(x) -> (NEGshiftRL [c] y)
+(NEG x:(SRAconst [c] y)) && clobberIfDead(x) -> (NEGshiftRA [c] y)
+(MVN x:(SLLconst [c] y)) && clobberIfDead(x) -> (MVNshiftLL [c] y)
+(MVN x:(SRLconst [c] y)) && clobberIfDead(x) -> (MVNshiftRL [c] y)
+(MVN x:(SRAconst [c] y)) && clobberIfDead(x) -> (MVNshiftRA [c] y)
(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftLL x0 y [c])
(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRL x0 y [c])
(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (ADDshiftRA x0 y [c])
@@ -1520,6 +1641,12 @@
(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRL x1 y [c]))
(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMPshiftRA x0 y [c])
(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) -> (InvertFlags (CMPshiftRA x1 y [c]))
+(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftLL x0 y [c])
+(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRL x0 y [c])
+(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (CMNshiftRA x0 y [c])
+(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftLL x0 y [c])
+(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRL x0 y [c])
+(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) -> (TSTshiftRA x0 y [c])
// prefer *const ops to *shift ops
(ADDshiftLL (MOVDconst [c]) x [d]) -> (ADDconst [c] (SLLconst <x.Type> x [d]))
@@ -1537,8 +1664,20 @@
(CMPshiftLL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
(CMPshiftRL (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
(CMPshiftRA (MOVDconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(CMNshiftLL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVDconst [c]) x [d]) -> (CMNconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftLL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVDconst [c]) x [d]) -> (TSTconst [c] (SRAconst <x.Type> x [d]))
// constant folding in *shift ops
+(MVNshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)<<uint64(d))])
+(MVNshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [^int64(uint64(c)>>uint64(d))])
+(MVNshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [^(c>>uint64(d))])
+(NEGshiftLL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)<<uint64(d))])
+(NEGshiftRL (MOVDconst [c]) [d]) -> (MOVDconst [-int64(uint64(c)>>uint64(d))])
+(NEGshiftRA (MOVDconst [c]) [d]) -> (MOVDconst [-(c>>uint64(d))])
(ADDshiftLL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)<<uint64(d))])
(ADDshiftRL x (MOVDconst [c]) [d]) -> (ADDconst x [int64(uint64(c)>>uint64(d))])
(ADDshiftRA x (MOVDconst [c]) [d]) -> (ADDconst x [c>>uint64(d)])
@@ -1566,6 +1705,12 @@
(CMPshiftLL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)<<uint64(d))])
(CMPshiftRL x (MOVDconst [c]) [d]) -> (CMPconst x [int64(uint64(c)>>uint64(d))])
(CMPshiftRA x (MOVDconst [c]) [d]) -> (CMPconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)<<uint64(d))])
+(CMNshiftRL x (MOVDconst [c]) [d]) -> (CMNconst x [int64(uint64(c)>>uint64(d))])
+(CMNshiftRA x (MOVDconst [c]) [d]) -> (CMNconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)<<uint64(d))])
+(TSTshiftRL x (MOVDconst [c]) [d]) -> (TSTconst x [int64(uint64(c)>>uint64(d))])
+(TSTshiftRA x (MOVDconst [c]) [d]) -> (TSTconst x [c>>uint64(d)])
// simplification with *shift ops
(SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVDconst [0])
@@ -1590,7 +1735,7 @@
(ORNshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVDconst [-1])
(ORNshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVDconst [-1])
-// Generate rotates
+// Generate rotates with const shift
(ADDshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
( ORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
(XORshiftLL [c] (SRLconst x [64-c]) x) -> (RORconst [64-c] x)
@@ -1608,6 +1753,38 @@
( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [c] x)
+(RORconst [c] (RORconst [d] x)) -> (RORconst [(c+d)&63] x)
+(RORWconst [c] (RORWconst [d] x)) -> (RORWconst [(c+d)&31] x)
+
+// Generate rotates with non-const shift.
+// These rules match the Go source code like
+// y &= 63
+// x << y | x >> (64-y)
+// "|" can also be "^" or "+".
+// As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y).
+((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
+ (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc.(Op) == OpARM64LessThanU
+ -> (ROR x (NEG <t> y))
+((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
+ (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc.(Op) == OpARM64LessThanU
+ -> (ROR x y)
+
+// These rules match the Go source code like
+// y &= 31
+// x << y | x >> (32-y)
+// "|" can also be "^" or "+".
+// As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y).
+((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
+ (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc.(Op) == OpARM64LessThanU
+ -> (RORW x (NEG <t> y))
+((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
+ (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc.(Op) == OpARM64LessThanU
+ -> (RORW x y)
+
// Extract from reg pair
(ADDshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
( ORshiftLL [c] (SRLconst x [64-c]) x2) -> (EXTRconst [64-c] x2 x)
@@ -1626,6 +1803,9 @@
(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
+// Special case setting bit as 1. An example is math.Copysign(c,-1)
+(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 -> (ORconst [c1] x)
+
// bitfield ops
// sbfiz
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
index eb0ad530a1..fc0a41527b 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -212,6 +212,7 @@ func init() {
// unary ops
{name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
{name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD"}, // abs(arg0), float64
{name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32
{name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64
{name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64
@@ -248,6 +249,8 @@ func init() {
{name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned
{name: "SRA", argLength: 2, reg: gp21, asm: "ASR"}, // arg0 >> arg1, signed, shift amount is mod 64
{name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed
+ {name: "ROR", argLength: 2, reg: gp21, asm: "ROR"}, // arg0 right rotate by (arg1 mod 64) bits
+ {name: "RORW", argLength: 2, reg: gp21, asm: "RORW"}, // arg0 right rotate by (arg1 mod 32) bits
{name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits
{name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits
{name: "EXTRconst", argLength: 2, reg: gp21, asm: "EXTR", aux: "Int64"}, // extract 64 bits from arg0:arg1 starting at lsb auxInt
@@ -270,6 +273,12 @@ func init() {
{name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64
// shifted ops
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<<auxInt)
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), unsigned shift
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift
+ {name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<<auxInt)
+ {name: "NEGshiftRL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), unsigned shift
+ {name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift
{name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<<auxInt
{name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, unsigned shift
{name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift
@@ -297,6 +306,12 @@ func init() {
{name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<<auxInt
{name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
{name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+ {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<<auxInt) compare to 0
+ {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, unsigned shift
+ {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift
+ {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<<auxInt) compare to 0
+ {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift
// bitfield ops
// for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff
@@ -388,6 +403,8 @@ func init() {
{name: "FMOVDgpfp", argLength: 1, reg: gpfp, asm: "FMOVD"}, // move int64 to float64 (no conversion)
{name: "FMOVDfpgp", argLength: 1, reg: fpgp, asm: "FMOVD"}, // move float64 to int64 (no conversion)
+ {name: "FMOVSgpfp", argLength: 1, reg: gpfp, asm: "FMOVS"}, // move 32bits from int to float reg (no conversion)
+ {name: "FMOVSfpgp", argLength: 1, reg: fpgp, asm: "FMOVS"}, // move 32bits from float to int reg, zero extend (no conversion)
// conversions
{name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
@@ -422,6 +439,7 @@ func init() {
// floating-point round to integral
{name: "FRINTAD", argLength: 1, reg: fp11, asm: "FRINTAD"},
{name: "FRINTMD", argLength: 1, reg: fp11, asm: "FRINTMD"},
+ {name: "FRINTND", argLength: 1, reg: fp11, asm: "FRINTND"},
{name: "FRINTPD", argLength: 1, reg: fp11, asm: "FRINTPD"},
{name: "FRINTZD", argLength: 1, reg: fp11, asm: "FRINTZD"},
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index 6ef8c7b5b9..7d79c9ad50 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -25,6 +25,7 @@
(Mul64 x y) -> (MULLD x y)
(Mul(32|16|8) x y) -> (MULLW x y)
+(Mul64uhilo x y) -> (LoweredMuluhilo x y)
(Div64 x y) -> (DIVD x y)
(Div64u x y) -> (DIVDU x y)
@@ -74,11 +75,11 @@
(ConstBool [b]) -> (MOVDconst [b])
// Constant folding
-(FABS (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Abs(i2f(x)))])
-(FSQRT (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Sqrt(i2f(x)))])
-(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Floor(i2f(x)))])
-(FCEIL (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Ceil(i2f(x)))])
-(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Trunc(i2f(x)))])
+(FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
+(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
+(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
+(FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
+(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
// Rotate generation with const shift
(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
@@ -168,6 +169,20 @@
(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
+// Lower bounded shifts first. No need to check shift value.
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLD x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRD x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVHZreg x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVBZreg x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAD x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVHreg x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVBreg x) y)
+
// non-constant rotates
// These are subexpressions found in statements that can become rotates
// In these cases the shift count is known to be < 64 so the more complicated expressions
@@ -660,14 +675,51 @@
(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
// small and of zero-extend -> either zero-extend or small and
- // degenerate-and
(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
+(ANDconst [0xFF] y:(MOVBreg _)) -> y
(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF -> y
-(ANDconst [c] y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF -> y
- // normal case
-(ANDconst [c] (MOVBZreg x)) -> (ANDconst [c&0xFF] x)
-(ANDconst [c] (MOVHZreg x)) -> (ANDconst [c&0xFFFF] x)
-(ANDconst [c] (MOVWZreg x)) -> (ANDconst [c&0xFFFFFFFF] x)
+(ANDconst [0xFFFF] y:(MOVHreg _)) -> y
+
+(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF -> y
+(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) -> (MOVWZreg x)
+// normal case
+(ANDconst [c] (MOV(B|BZ)reg x)) -> (ANDconst [c&0xFF] x)
+(ANDconst [c] (MOV(H|HZ)reg x)) -> (ANDconst [c&0xFFFF] x)
+(ANDconst [c] (MOV(W|WZ)reg x)) -> (ANDconst [c&0xFFFFFFFF] x)
+
+// Eliminate unnecessary sign/zero extend following right shift
+(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) -> (SRWconst [c] (MOVBZreg x))
+(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) -> (SRWconst [c] (MOVHZreg x))
+(MOVWZreg (SRWconst [c] (MOVWZreg x))) -> (SRWconst [c] (MOVWZreg x))
+(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) -> (SRAWconst [c] (MOVBreg x))
+(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) -> (SRAWconst [c] (MOVHreg x))
+(MOVWreg (SRAWconst [c] (MOVWreg x))) -> (SRAWconst [c] (MOVWreg x))
+
+(MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRWconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRWconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 -> (SRWconst [c] x)
+(MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRAWconst [c] x)
+(MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRAWconst [c] x)
+(MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 -> (SRAWconst [c] x)
+
+// initial right shift will handle sign/zero extend
+(MOVBZreg (SRDconst [c] x)) && c>=56 -> (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c>56 -> (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c==56 -> (SRADconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && c>=24 -> (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c>24 -> (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c==24 -> (SRAWconst [c] x)
+
+(MOVHZreg (SRDconst [c] x)) && c>=48 -> (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c>48 -> (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c==48 -> (SRADconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && c>=16 -> (SRWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c>16 -> (SRWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c==16 -> (SRAWconst [c] x)
+
+(MOVWZreg (SRDconst [c] x)) && c>=32 -> (SRDconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c>32 -> (SRDconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c==32 -> (SRADconst [c] x)
// Various redundant zero/sign extension combinations.
(MOVBZreg y:(MOVBZreg _)) -> y // repeat
@@ -796,11 +848,19 @@
(MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
(MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
+// Determine load + addressing that can be done as a register indexed load
+(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 -> (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
+
+// Determine indexed loads with constant values that can be done without index
+(MOV(D|W|WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
+(MOV(D|W|WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
+
+
// Store of zero -> storezero
-(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVDstorezero [off] {sym} ptr mem)
-(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVWstorezero [off] {sym} ptr mem)
-(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVHstorezero [off] {sym} ptr mem)
-(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && c == 0 -> (MOVBstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
// Fold offsets for storezero
(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
@@ -812,6 +872,13 @@
(MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
(MOVBstorezero [off1+off2] {sym} x mem)
+// Stores with addressing that can be done as indexed stores
+(MOV(D|W|H|B)store [off] {sym} p:(ADD ptr idx) val mem) && off == 0 && sym == nil && p.Uses == 1 -> (MOV(D|W|H|B)storeidx ptr idx val mem)
+
+// Stores with constant index values can be done without indexed instructions
+(MOV(D|W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
+(MOV(D|W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
+
// Fold symbols into storezero
(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
&& (x.Op != OpSB || p.Uses == 1) ->
@@ -851,22 +918,43 @@
(ZeroExt16to(32|64) x) -> (MOVHZreg x)
(ZeroExt32to64 x) -> (MOVWZreg x)
-(Trunc(16|32|64)to8 x) -> (MOVBreg x)
-(Trunc(32|64)to16 x) -> (MOVHreg x)
-(Trunc64to32 x) -> (MOVWreg x)
+(Trunc(16|32|64)to8 x) && isSigned(x.Type) -> (MOVBreg x)
+(Trunc(16|32|64)to8 x) -> (MOVBZreg x)
+(Trunc(32|64)to16 x) && isSigned(x.Type) -> (MOVHreg x)
+(Trunc(32|64)to16 x) -> (MOVHZreg x)
+(Trunc64to32 x) && isSigned(x.Type) -> (MOVWreg x)
+(Trunc64to32 x) -> (MOVWZreg x)
(Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
// This may interact with other patterns in the future. (Compare with arm64)
-(MOVBZreg x:(MOVBZload _ _)) -> x
-(MOVHZreg x:(MOVHZload _ _)) -> x
-(MOVHreg x:(MOVHload _ _)) -> x
+(MOV(B|H|W)Zreg x:(MOVBZload _ _)) -> x
+(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) -> x
+(MOV(H|W)Zreg x:(MOVHZload _ _)) -> x
+(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) -> x
+(MOV(H|W)reg x:(MOVHload _ _)) -> x
+(MOV(H|W)reg x:(MOVHloadidx _ _ _)) -> x
+(MOVWZreg x:(MOVWZload _ _)) -> x
+(MOVWZreg x:(MOVWZloadidx _ _ _)) -> x
+(MOVWreg x:(MOVWload _ _)) -> x
+(MOVWreg x:(MOVWloadidx _ _ _)) -> x
+
+// don't extend if argument is already extended
+(MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> x
+(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> x
+(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> x
+(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> x
+(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> x
+(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> x
(MOVBZreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
(MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
(MOVHZreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
(MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
+(MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
+
// Lose widening ops fed to to stores
(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
@@ -874,6 +962,11 @@
(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx [off] {sym} ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstoreidx [off] {sym} ptr idx x mem)
+(MOVHstoreidx [off] {sym} ptr idx (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstoreidx [off] {sym} ptr idx x mem)
+(MOVWstoreidx [off] {sym} ptr idx (MOV(W|WZ)reg x) mem) -> (MOVWstoreidx [off] {sym} ptr idx x mem)
+(MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
(MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHBRstore {sym} ptr x mem)
(MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWBRstore {sym} ptr x mem)
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
index ce5d552375..c82f7312fe 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -135,11 +135,14 @@ func init() {
gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
gp1cr = regInfo{inputs: []regMask{gp | sp | sb}}
gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
crgp = regInfo{inputs: nil, outputs: []regMask{gp}}
gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ gploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ gpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}}
gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value
gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}}
@@ -151,7 +154,9 @@ func init() {
fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
fp2cr = regInfo{inputs: []regMask{fp, fp}}
fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}}
+ fploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{fp}}
fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
+ fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}}
callerSave = regMask(gp | fp | gr)
)
ops := []opData{
@@ -170,6 +175,7 @@ func init() {
{name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed
{name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned
{name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+ {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, returns (hi, lo)
{name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1
{name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1
@@ -281,6 +287,19 @@ func init() {
{name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend reverse order
{name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend reverse order
+ // In these cases an index register is used in addition to a base register
+ {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint8 to uint64
+ {name: "MOVHloadidx", argLength: 3, reg: gploadidx, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int16 to int64
+ {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint16 to uint64
+ {name: "MOVWloadidx", argLength: 3, reg: gploadidx, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int32 to int64
+ {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint32 to uint64
+ {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int16 to int64
+ {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int32 to int64
+ {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"},
+
// Store bytes in the reverse endian order of the arch into arg0.
// These are indexes stores with no offset field in the instruction so the aux fields are not used.
{name: "MOVDBRstore", argLength: 3, reg: gpstore, asm: "MOVDBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes reverse order
@@ -301,6 +320,17 @@ func init() {
{name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double flot
{name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float
+ // Stores using index and base registers
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store bye
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store half word
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store word
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double word
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double float
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float
+ {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store half word reversed byte using index reg
+ {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store word reversed byte using index reg
+ {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double word reversed byte using index reg
+
// The following ops store 0 into arg0+aux+auxint arg1=mem
{name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 1 byte
{name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes
diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules
index dc1581362c..64198839d0 100644
--- a/src/cmd/compile/internal/ssa/gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules
@@ -363,8 +363,8 @@
(I64And (I64Const [x]) (I64Const [y])) -> (I64Const [x & y])
(I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y])
(I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y])
-(F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [f2i(i2f(x) + i2f(y))])
-(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [f2i(i2f(x) * i2f(y))])
+(F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
+(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
(I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1])
(I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0])
(I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0])
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
index aa944b5379..d490e32f3d 100644
--- a/src/cmd/compile/internal/ssa/gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -44,16 +44,16 @@
(Trunc64to8 (Const64 [c])) -> (Const8 [int64(int8(c))])
(Trunc64to16 (Const64 [c])) -> (Const16 [int64(int16(c))])
(Trunc64to32 (Const64 [c])) -> (Const32 [int64(int32(c))])
-(Cvt64Fto32F (Const64F [c])) -> (Const32F [f2i(float64(i2f32(c)))])
+(Cvt64Fto32F (Const64F [c])) -> (Const32F [auxFrom32F(float32(auxTo64F(c)))])
(Cvt32Fto64F (Const32F [c])) -> (Const64F [c]) // c is already a 64 bit float
-(Cvt32to32F (Const32 [c])) -> (Const32F [f2i(float64(float32(int32(c))))])
-(Cvt32to64F (Const32 [c])) -> (Const64F [f2i(float64(int32(c)))])
-(Cvt64to32F (Const64 [c])) -> (Const32F [f2i(float64(float32(c)))])
-(Cvt64to64F (Const64 [c])) -> (Const64F [f2i(float64(c))])
-(Cvt32Fto32 (Const32F [c])) -> (Const32 [int64(int32(i2f(c)))])
-(Cvt32Fto64 (Const32F [c])) -> (Const64 [int64(i2f(c))])
-(Cvt64Fto32 (Const64F [c])) -> (Const32 [int64(int32(i2f(c)))])
-(Cvt64Fto64 (Const64F [c])) -> (Const64 [int64(i2f(c))])
+(Cvt32to32F (Const32 [c])) -> (Const32F [auxFrom32F(float32(int32(c)))])
+(Cvt32to64F (Const32 [c])) -> (Const64F [auxFrom64F(float64(int32(c)))])
+(Cvt64to32F (Const64 [c])) -> (Const32F [auxFrom32F(float32(c))])
+(Cvt64to64F (Const64 [c])) -> (Const64F [auxFrom64F(float64(c))])
+(Cvt32Fto32 (Const32F [c])) -> (Const32 [int64(int32(auxTo32F(c)))])
+(Cvt32Fto64 (Const32F [c])) -> (Const64 [int64(auxTo32F(c))])
+(Cvt64Fto32 (Const64F [c])) -> (Const32 [int64(int32(auxTo64F(c)))])
+(Cvt64Fto64 (Const64F [c])) -> (Const64 [int64(auxTo64F(c))])
(Round32F x:(Const32F)) -> x
(Round64F x:(Const64F)) -> x
@@ -95,16 +95,15 @@
(Neg16 (Const16 [c])) -> (Const16 [int64(-int16(c))])
(Neg32 (Const32 [c])) -> (Const32 [int64(-int32(c))])
(Neg64 (Const64 [c])) -> (Const64 [-c])
-(Neg32F (Const32F [c])) && i2f(c) != 0 -> (Const32F [f2i(-i2f(c))])
-(Neg64F (Const64F [c])) && i2f(c) != 0 -> (Const64F [f2i(-i2f(c))])
+(Neg32F (Const32F [c])) && auxTo32F(c) != 0 -> (Const32F [auxFrom32F(-auxTo32F(c))])
+(Neg64F (Const64F [c])) && auxTo64F(c) != 0 -> (Const64F [auxFrom64F(-auxTo64F(c))])
(Add8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c+d))])
(Add16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c+d))])
(Add32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c+d))])
(Add64 (Const64 [c]) (Const64 [d])) -> (Const64 [c+d])
-(Add32F (Const32F [c]) (Const32F [d])) ->
- (Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) // ensure we combine the operands with 32 bit precision
-(Add64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) + i2f(d))])
+(Add32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))])
+(Add64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))])
(AddPtr <t> x (Const64 [c])) -> (OffPtr <t> x [c])
(AddPtr <t> x (Const32 [c])) -> (OffPtr <t> x [c])
@@ -112,17 +111,15 @@
(Sub16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c-d))])
(Sub32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c-d))])
(Sub64 (Const64 [c]) (Const64 [d])) -> (Const64 [c-d])
-(Sub32F (Const32F [c]) (Const32F [d])) ->
- (Const32F [f2i(float64(i2f32(c) - i2f32(d)))])
-(Sub64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) - i2f(d))])
+(Sub32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) - auxTo32F(d))])
+(Sub64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) - auxTo64F(d))])
(Mul8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c*d))])
(Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))])
(Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))])
(Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d])
-(Mul32F (Const32F [c]) (Const32F [d])) ->
- (Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
-(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) * i2f(d))])
+(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
+(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
(And8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c&d))])
(And16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c&d))])
@@ -147,8 +144,8 @@
(Div16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))])
(Div32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))])
(Div64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))])
-(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [f2i(float64(i2f32(c) / i2f32(d)))])
-(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [f2i(i2f(c) / i2f(d))])
+(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
+(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
(Not (ConstBool [c])) -> (ConstBool [1-c])
@@ -444,12 +441,18 @@
(Leq8U (Const8 [c]) (Const8 [d])) -> (ConstBool [b2i(uint8(c) <= uint8(d))])
// constant floating point comparisons
-(Eq(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) == i2f(d))])
-(Neq(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) != i2f(d))])
-(Greater(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) > i2f(d))])
-(Geq(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) >= i2f(d))])
-(Less(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) < i2f(d))])
-(Leq(64|32)F (Const(64|32)F [c]) (Const(64|32)F [d])) -> (ConstBool [b2i(i2f(c) <= i2f(d))])
+(Eq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))])
+(Eq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))])
+(Neq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))])
+(Neq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))])
+(Greater32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) > auxTo32F(d))])
+(Greater64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) > auxTo64F(d))])
+(Geq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) >= auxTo32F(d))])
+(Geq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) >= auxTo64F(d))])
+(Less32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) < auxTo32F(d))])
+(Less64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) < auxTo64F(d))])
+(Leq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(auxTo32F(c) <= auxTo32F(d))])
+(Leq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(auxTo64F(c) <= auxTo64F(d))])
// simplifications
(Or(64|32|16|8) x x) -> x
@@ -572,9 +575,9 @@
// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x])
-(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [f2i(extend32Fto64F(math.Float32frombits(uint32(x))))])
+(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x])
-(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(truncate64Fto32F(i2f(x)))))])
+(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))])
// Float Loads up to Zeros so they can be constant folded.
(Load <t1> op:(OffPtr [o1] p1)
@@ -1326,19 +1329,16 @@
(Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x)) -> (Mul8 (Const8 <t> [int64(int8(c*d))]) x)
// floating point optimizations
-(Add(32|64)F x (Const(32|64)F [0])) -> x
-(Sub(32|64)F x (Const(32|64)F [0])) -> x
+(Mul(32|64)F x (Const(32|64)F [auxFrom64F(1)])) -> x
+(Mul32F x (Const32F [auxFrom32F(-1)])) -> (Neg32F x)
+(Mul64F x (Const64F [auxFrom64F(-1)])) -> (Neg64F x)
+(Mul32F x (Const32F [auxFrom32F(2)])) -> (Add32F x x)
+(Mul64F x (Const64F [auxFrom64F(2)])) -> (Add64F x x)
-(Mul(32|64)F x (Const(32|64)F [f2i(1)])) -> x
-(Mul32F x (Const32F [f2i(-1)])) -> (Neg32F x)
-(Mul64F x (Const64F [f2i(-1)])) -> (Neg64F x)
-(Mul32F x (Const32F [f2i(2)])) -> (Add32F x x)
-(Mul64F x (Const64F [f2i(2)])) -> (Add64F x x)
+(Div32F x (Const32F <t> [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F <t> [auxFrom32F(1/auxTo32F(c))]))
+(Div64F x (Const64F <t> [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F <t> [auxFrom64F(1/auxTo64F(c))]))
-(Div32F x (Const32F <t> [c])) && reciprocalExact32(float32(i2f(c))) -> (Mul32F x (Const32F <t> [f2i(1/i2f(c))]))
-(Div64F x (Const64F <t> [c])) && reciprocalExact64(i2f(c)) -> (Mul64F x (Const64F <t> [f2i(1/i2f(c))]))
-
-(Sqrt (Const64F [c])) -> (Const64F [f2i(math.Sqrt(i2f(c)))])
+(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
// recognize runtime.newobject and don't Zero/Nilcheck it
(Zero (Load (OffPtr [c] (SP)) mem) mem)
@@ -1363,12 +1363,12 @@
(NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _)
&& isSameSym(sym, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
- && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
-> (Invalid)
(NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _)
&& isSameSym(sym, "runtime.newobject")
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
- && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
-> (Invalid)
// Evaluate constant address comparisons.
@@ -1545,8 +1545,8 @@
// Don't Move from memory if the values are likely to already be
// in registers.
(Move {t1} [n] dst p1
- mem:(Store {t2} op2:(OffPtr [o2] p2) d1
- (Store {t3} op3:(OffPtr [0] p3) d2 _)))
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@@ -1554,12 +1554,12 @@
&& registerizable(b, t3)
&& o2 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3)
- -> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
- (Store {t3} (OffPtr <t3.(*types.Type)> [0] dst) d2 mem))
+ -> (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
(Move {t1} [n] dst p1
- mem:(Store {t2} op2:(OffPtr [o2] p2) d1
- (Store {t3} op3:(OffPtr [o3] p3) d2
- (Store {t4} op4:(OffPtr [0] p4) d3 _))))
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@@ -1570,14 +1570,14 @@
&& o3 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4)
- -> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
- (Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
- (Store {t4} (OffPtr <t4.(*types.Type)> [0] dst) d3 mem)))
+ -> (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
(Move {t1} [n] dst p1
- mem:(Store {t2} op2:(OffPtr [o2] p2) d1
- (Store {t3} op3:(OffPtr [o3] p3) d2
- (Store {t4} op4:(OffPtr [o4] p4) d3
- (Store {t5} op5:(OffPtr [0] p5) d4 _)))))
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
+ (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@@ -1591,16 +1591,16 @@
&& o3-o4 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5)
- -> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
- (Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
- (Store {t4} (OffPtr <t4.(*types.Type)> [o4] dst) d3
- (Store {t5} (OffPtr <t5.(*types.Type)> [0] dst) d4 mem))))
+ -> (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
// Same thing but with VarDef in the middle.
(Move {t1} [n] dst p1
mem:(VarDef
- (Store {t2} op2:(OffPtr [o2] p2) d1
- (Store {t3} op3:(OffPtr [0] p3) d2 _))))
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@@ -1608,13 +1608,13 @@
&& registerizable(b, t3)
&& o2 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3)
- -> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
- (Store {t3} (OffPtr <t3.(*types.Type)> [0] dst) d2 mem))
+ -> (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
(Move {t1} [n] dst p1
mem:(VarDef
- (Store {t2} op2:(OffPtr [o2] p2) d1
- (Store {t3} op3:(OffPtr [o3] p3) d2
- (Store {t4} op4:(OffPtr [0] p4) d3 _)))))
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@@ -1625,15 +1625,15 @@
&& o3 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4)
- -> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
- (Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
- (Store {t4} (OffPtr <t4.(*types.Type)> [0] dst) d3 mem)))
+ -> (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
(Move {t1} [n] dst p1
mem:(VarDef
- (Store {t2} op2:(OffPtr [o2] p2) d1
- (Store {t3} op3:(OffPtr [o3] p3) d2
- (Store {t4} op4:(OffPtr [o4] p4) d3
- (Store {t5} op5:(OffPtr [0] p5) d4 _))))))
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
+ (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
&& isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
&& alignof(t2) <= alignof(t1)
&& alignof(t3) <= alignof(t1)
@@ -1647,10 +1647,10 @@
&& o3-o4 == sizeof(t4)
&& o2-o3 == sizeof(t3)
&& n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5)
- -> (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1
- (Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2
- (Store {t4} (OffPtr <t4.(*types.Type)> [o4] dst) d3
- (Store {t5} (OffPtr <t5.(*types.Type)> [0] dst) d4 mem))))
+ -> (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
// Prefer to Zero and Store than to Move.
(Move {t1} [n] dst p1
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
index 7292012d26..ee9c6fa0f6 100644
--- a/src/cmd/compile/internal/ssa/gen/genericOps.go
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -470,8 +470,9 @@ var genericOps = []opData{
{name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem", symEffect: "None", zeroWidth: true}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
{name: "VarKill", argLength: 1, aux: "Sym", symEffect: "None"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
- {name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read", zeroWidth: true}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
- {name: "KeepAlive", argLength: 2, typ: "Mem", zeroWidth: true}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
+ // TODO: what's the difference betweeen VarLive and KeepAlive?
+ {name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read", zeroWidth: true}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
+ {name: "KeepAlive", argLength: 2, typ: "Mem", zeroWidth: true}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
// Ops for breaking 64-bit operations on 32-bit architectures
{name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo
diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go
index f35a991db2..f7195bf536 100644
--- a/src/cmd/compile/internal/ssa/gen/main.go
+++ b/src/cmd/compile/internal/ssa/gen/main.go
@@ -63,9 +63,14 @@ type blockData struct {
}
type regInfo struct {
- inputs []regMask
+ // inputs[i] encodes the set of registers allowed for the i'th input.
+ // Inputs that don't use registers (flags, memory, etc.) should be 0.
+ inputs []regMask
+ // clobbers encodes the set of registers that are overwritten by
+ // the instruction (other than the output registers).
clobbers regMask
- outputs []regMask
+ // outpus[i] encodes the set of registers allowed for the i'th output.
+ outputs []regMask
}
type regMask uint64
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
index c51ea02262..b7d5f912db 100644
--- a/src/cmd/compile/internal/ssa/html.go
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -484,7 +484,7 @@ func (x ByTopo) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x ByTopo) Less(i, j int) bool {
a := x[i]
b := x[j]
- if a.Filename == a.Filename {
+ if a.Filename == b.Filename {
return a.StartLineno < b.StartLineno
}
return a.Filename < b.Filename
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index 610921808e..43f5c59591 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -50,9 +50,17 @@ type outputInfo struct {
}
type regInfo struct {
- inputs []inputInfo // ordered in register allocation order
+ // inputs encodes the register restrictions for an instruction's inputs.
+ // Each entry specifies an allowed register set for a particular input.
+ // They are listed in the order in which regalloc should pick a register
+ // from the register set (most constrained first).
+ // Inputs which do not need registers are not listed.
+ inputs []inputInfo
+ // clobbers encodes the set of registers that are overwritten by
+ // the instruction (other than the output registers).
clobbers regMask
- outputs []outputInfo // ordered in register allocation order
+ // outputs is the same as inputs, but for the outputs of the instruction.
+ outputs []outputInfo
}
type auxType int8
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 5bf7021432..2145c6e723 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -550,6 +550,18 @@ const (
OpAMD64BTRQconst
OpAMD64BTSLconst
OpAMD64BTSQconst
+ OpAMD64BTCQmodify
+ OpAMD64BTCLmodify
+ OpAMD64BTSQmodify
+ OpAMD64BTSLmodify
+ OpAMD64BTRQmodify
+ OpAMD64BTRLmodify
+ OpAMD64BTCQconstmodify
+ OpAMD64BTCLconstmodify
+ OpAMD64BTSQconstmodify
+ OpAMD64BTSLconstmodify
+ OpAMD64BTRQconstmodify
+ OpAMD64BTRLconstmodify
OpAMD64TESTQ
OpAMD64TESTL
OpAMD64TESTW
@@ -1107,6 +1119,7 @@ const (
OpARM64LoweredMuluhilo
OpARM64MVN
OpARM64NEG
+ OpARM64FABSD
OpARM64FNEGS
OpARM64FNEGD
OpARM64FSQRTD
@@ -1139,6 +1152,8 @@ const (
OpARM64SRLconst
OpARM64SRA
OpARM64SRAconst
+ OpARM64ROR
+ OpARM64RORW
OpARM64RORconst
OpARM64RORWconst
OpARM64EXTRconst
@@ -1157,6 +1172,12 @@ const (
OpARM64TSTWconst
OpARM64FCMPS
OpARM64FCMPD
+ OpARM64MVNshiftLL
+ OpARM64MVNshiftRL
+ OpARM64MVNshiftRA
+ OpARM64NEGshiftLL
+ OpARM64NEGshiftRL
+ OpARM64NEGshiftRA
OpARM64ADDshiftLL
OpARM64ADDshiftRL
OpARM64ADDshiftRA
@@ -1184,6 +1205,12 @@ const (
OpARM64CMPshiftLL
OpARM64CMPshiftRL
OpARM64CMPshiftRA
+ OpARM64CMNshiftLL
+ OpARM64CMNshiftRL
+ OpARM64CMNshiftRA
+ OpARM64TSTshiftLL
+ OpARM64TSTshiftRL
+ OpARM64TSTshiftRA
OpARM64BFI
OpARM64BFXIL
OpARM64SBFIZ
@@ -1247,6 +1274,8 @@ const (
OpARM64MOVDstorezeroidx8
OpARM64FMOVDgpfp
OpARM64FMOVDfpgp
+ OpARM64FMOVSgpfp
+ OpARM64FMOVSfpgp
OpARM64MOVBreg
OpARM64MOVBUreg
OpARM64MOVHreg
@@ -1275,6 +1304,7 @@ const (
OpARM64FCVTDS
OpARM64FRINTAD
OpARM64FRINTMD
+ OpARM64FRINTND
OpARM64FRINTPD
OpARM64FRINTZD
OpARM64CSEL
@@ -1551,6 +1581,7 @@ const (
OpPPC64MULHW
OpPPC64MULHDU
OpPPC64MULHWU
+ OpPPC64LoweredMuluhilo
OpPPC64FMUL
OpPPC64FMULS
OpPPC64FMADD
@@ -1630,6 +1661,17 @@ const (
OpPPC64MOVDBRload
OpPPC64MOVWBRload
OpPPC64MOVHBRload
+ OpPPC64MOVBZloadidx
+ OpPPC64MOVHloadidx
+ OpPPC64MOVHZloadidx
+ OpPPC64MOVWloadidx
+ OpPPC64MOVWZloadidx
+ OpPPC64MOVDloadidx
+ OpPPC64MOVHBRloadidx
+ OpPPC64MOVWBRloadidx
+ OpPPC64MOVDBRloadidx
+ OpPPC64FMOVDloadidx
+ OpPPC64FMOVSloadidx
OpPPC64MOVDBRstore
OpPPC64MOVWBRstore
OpPPC64MOVHBRstore
@@ -1641,6 +1683,15 @@ const (
OpPPC64MOVDstore
OpPPC64FMOVDstore
OpPPC64FMOVSstore
+ OpPPC64MOVBstoreidx
+ OpPPC64MOVHstoreidx
+ OpPPC64MOVWstoreidx
+ OpPPC64MOVDstoreidx
+ OpPPC64FMOVDstoreidx
+ OpPPC64FMOVSstoreidx
+ OpPPC64MOVHBRstoreidx
+ OpPPC64MOVWBRstoreidx
+ OpPPC64MOVDBRstoreidx
OpPPC64MOVBstorezero
OpPPC64MOVHstorezero
OpPPC64MOVWstorezero
@@ -6896,6 +6947,180 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "BTCQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTCLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTSQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTSLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTCQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTCLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTSQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTSLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ },
+ },
+ {
name: "TESTQ",
argLen: 2,
commutative: true,
@@ -14657,6 +14882,19 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "FABSD",
+ argLen: 1,
+ asm: arm64.AFABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
name: "FNEGS",
argLen: 1,
asm: arm64.AFNEGS,
@@ -15105,6 +15343,34 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "ROR",
+ argLen: 2,
+ asm: arm64.AROR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORW",
+ argLen: 2,
+ asm: arm64.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "RORconst",
auxType: auxInt64,
argLen: 1,
@@ -15321,6 +15587,90 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "MVNshiftLL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftLL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftRL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftRA",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "ADDshiftLL",
auxType: auxInt64,
argLen: 2,
@@ -15717,6 +16067,78 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "CMNshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
name: "BFI",
auxType: auxInt64,
argLen: 2,
@@ -16572,6 +16994,32 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "FMOVSgpfp",
+ argLen: 1,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVSfpgp",
+ argLen: 1,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
name: "MOVBreg",
argLen: 1,
asm: arm64.AMOVB,
@@ -16936,6 +17384,19 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "FRINTND",
+ argLen: 1,
+ asm: arm64.AFRINTND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
name: "FRINTPD",
argLen: 1,
asm: arm64.AFRINTPD,
@@ -20590,6 +21051,21 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "LoweredMuluhilo",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "FMUL",
argLen: 2,
commutative: true,
@@ -21691,6 +22167,193 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "MOVBZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
name: "MOVDBRstore",
auxType: auxSymOff,
argLen: 3,
@@ -21849,6 +22512,141 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "MOVBstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
name: "MOVBstorezero",
auxType: auxSymOff,
argLen: 2,
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
index af2b9ef0ed..6462370d5c 100644
--- a/src/cmd/compile/internal/ssa/prove.go
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -58,7 +58,7 @@ func (r relation) String() string {
}
// domain represents the domain of a variable pair in which a set
-// of relations is known. For example, relations learned for unsigned
+// of relations is known. For example, relations learned for unsigned
// pairs cannot be transferred to signed pairs because the same bit
// representation can mean something else.
type domain uint
@@ -625,7 +625,7 @@ var (
// For example:
// OpLess8: {signed, lt},
// v1 = (OpLess8 v2 v3).
- // If v1 branch is taken than we learn that the rangeMaks
+ // If v1 branch is taken then we learn that the rangeMask
// can be at most lt.
domainRelationTable = map[Op]struct {
d domain
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index 278da6fe99..8946cf6b5c 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -150,6 +150,8 @@ type register uint8
const noRegister register = 255
+// A regMask encodes a set of machine registers.
+// TODO: regMask -> regSet?
type regMask uint64
func (m regMask) String() string {
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index ca6280deb1..fd5f684eda 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -234,7 +234,6 @@ func canMergeLoad(target, load, x *Value) bool {
// memPreds contains memory states known to be predecessors of load's
// memory state. It is lazily initialized.
var memPreds map[*Value]bool
-search:
for i := 0; len(args) > 0; i++ {
const limit = 100
if i >= limit {
@@ -246,13 +245,13 @@ search:
if target.Block.ID != v.Block.ID {
// Since target and load are in the same block
// we can stop searching when we leave the block.
- continue search
+ continue
}
if v.Op == OpPhi {
// A Phi implies we have reached the top of the block.
// The memory phi, if it exists, is always
// the first logical store in the block.
- continue search
+ continue
}
if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
// We could handle this situation however it is likely
@@ -296,14 +295,14 @@ search:
// load = read ... mem
// target = add x load
if memPreds[v] {
- continue search
+ continue
}
return false
}
if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
// If v takes mem as an input then we know mem
// is valid at this point.
- continue search
+ continue
}
for _, a := range v.Args {
if target.Block.ID == a.Block.ID {
@@ -450,19 +449,24 @@ func extend32Fto64F(f float32) float64 {
return math.Float64frombits(r)
}
-// i2f is used in rules for converting from an AuxInt to a float.
-func i2f(i int64) float64 {
- return math.Float64frombits(uint64(i))
+// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
+func auxFrom64F(f float64) int64 {
+ return int64(math.Float64bits(f))
}
-// i2f32 is used in rules for converting from an AuxInt to a float32.
-func i2f32(i int64) float32 {
- return float32(math.Float64frombits(uint64(i)))
+// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
+func auxFrom32F(f float32) int64 {
+ return int64(math.Float64bits(extend32Fto64F(f)))
}
-// f2i is used in the rules for storing a float in AuxInt.
-func f2i(f float64) int64 {
- return int64(math.Float64bits(f))
+// auxTo32F decodes a float32 from the AuxInt value provided.
+func auxTo32F(i int64) float32 {
+ return truncate64Fto32F(math.Float64frombits(uint64(i)))
+}
+
+// auxTo64F decodes a float64 from the AuxInt value provided.
+func auxTo64F(i int64) float64 {
+ return math.Float64frombits(uint64(i))
}
// uaddOvf returns true if unsigned a+b would overflow.
@@ -646,11 +650,11 @@ func noteRule(s string) bool {
return true
}
-// warnRule generates a compiler debug output with string s when
-// cond is true and the rule is fired.
+// warnRule generates compiler debug output with string s when
+// v is not in autogenerated code, cond is true and the rule has fired.
func warnRule(cond bool, v *Value, s string) bool {
- if cond {
- v.Block.Func.Warnl(v.Pos, s)
+ if pos := v.Pos; pos.Line() > 1 && cond {
+ v.Block.Func.Warnl(pos, s)
}
return true
}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index abc1d18309..5481b4e773 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -2507,38 +2507,44 @@ func rewriteValue386_Op386CMPBconst_0(v *Value) bool {
v.reset(Op386FlagLT_ULT)
return true
}
- // match: (CMPBconst (ANDL x y) [0])
- // cond:
+ // match: (CMPBconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
// result: (TESTB x y)
for {
if v.AuxInt != 0 {
break
}
- v_0 := v.Args[0]
- if v_0.Op != Op386ANDL {
+ l := v.Args[0]
+ if l.Op != Op386ANDL {
+ break
+ }
+ _ = l.Args[1]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
break
}
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
v.reset(Op386TESTB)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (CMPBconst (ANDLconst [c] x) [0])
- // cond:
+ // match: (CMPBconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
// result: (TESTBconst [int64(int8(c))] x)
for {
if v.AuxInt != 0 {
break
}
- v_0 := v.Args[0]
- if v_0.Op != Op386ANDLconst {
+ l := v.Args[0]
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := l.AuxInt
+ x := l.Args[0]
+ if !(l.Uses == 1) {
break
}
- c := v_0.AuxInt
- x := v_0.Args[0]
v.reset(Op386TESTBconst)
v.AuxInt = int64(int8(c))
v.AddArg(x)
@@ -2819,38 +2825,44 @@ func rewriteValue386_Op386CMPLconst_0(v *Value) bool {
v.reset(Op386FlagLT_ULT)
return true
}
- // match: (CMPLconst (ANDL x y) [0])
- // cond:
+ // match: (CMPLconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
// result: (TESTL x y)
for {
if v.AuxInt != 0 {
break
}
- v_0 := v.Args[0]
- if v_0.Op != Op386ANDL {
+ l := v.Args[0]
+ if l.Op != Op386ANDL {
+ break
+ }
+ _ = l.Args[1]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
break
}
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
v.reset(Op386TESTL)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (CMPLconst (ANDLconst [c] x) [0])
- // cond:
+ // match: (CMPLconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
// result: (TESTLconst [c] x)
for {
if v.AuxInt != 0 {
break
}
- v_0 := v.Args[0]
- if v_0.Op != Op386ANDLconst {
+ l := v.Args[0]
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := l.AuxInt
+ x := l.Args[0]
+ if !(l.Uses == 1) {
break
}
- c := v_0.AuxInt
- x := v_0.Args[0]
v.reset(Op386TESTLconst)
v.AuxInt = c
v.AddArg(x)
@@ -3122,38 +3134,44 @@ func rewriteValue386_Op386CMPWconst_0(v *Value) bool {
v.reset(Op386FlagLT_ULT)
return true
}
- // match: (CMPWconst (ANDL x y) [0])
- // cond:
+ // match: (CMPWconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
// result: (TESTW x y)
for {
if v.AuxInt != 0 {
break
}
- v_0 := v.Args[0]
- if v_0.Op != Op386ANDL {
+ l := v.Args[0]
+ if l.Op != Op386ANDL {
+ break
+ }
+ _ = l.Args[1]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
break
}
- _ = v_0.Args[1]
- x := v_0.Args[0]
- y := v_0.Args[1]
v.reset(Op386TESTW)
v.AddArg(x)
v.AddArg(y)
return true
}
- // match: (CMPWconst (ANDLconst [c] x) [0])
- // cond:
+ // match: (CMPWconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
// result: (TESTWconst [int64(int16(c))] x)
for {
if v.AuxInt != 0 {
break
}
- v_0 := v.Args[0]
- if v_0.Op != Op386ANDLconst {
+ l := v.Args[0]
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := l.AuxInt
+ x := l.Args[0]
+ if !(l.Uses == 1) {
break
}
- c := v_0.AuxInt
- x := v_0.Args[0]
v.reset(Op386TESTWconst)
v.AuxInt = int64(int16(c))
v.AddArg(x)
@@ -19804,7 +19822,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool {
_ = typ
// match: (Neg32F x)
// cond: !config.use387
- // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
for {
x := v.Args[0]
if !(!config.use387) {
@@ -19813,7 +19831,7 @@ func rewriteValue386_OpNeg32F_0(v *Value) bool {
v.reset(Op386PXOR)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32)
- v0.AuxInt = f2i(math.Copysign(0, -1))
+ v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1)))
v.AddArg(v0)
return true
}
@@ -19840,7 +19858,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool {
_ = typ
// match: (Neg64F x)
// cond: !config.use387
- // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
for {
x := v.Args[0]
if !(!config.use387) {
@@ -19849,7 +19867,7 @@ func rewriteValue386_OpNeg64F_0(v *Value) bool {
v.reset(Op386PXOR)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64)
- v0.AuxInt = f2i(math.Copysign(0, -1))
+ v0.AuxInt = auxFrom64F(math.Copysign(0, -1))
v.AddArg(v0)
return true
}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 1b531954db..89fc6780b9 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -65,18 +65,46 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64ANDQmodify_0(v)
case OpAMD64BSFQ:
return rewriteValueAMD64_OpAMD64BSFQ_0(v)
+ case OpAMD64BTCLconst:
+ return rewriteValueAMD64_OpAMD64BTCLconst_0(v)
+ case OpAMD64BTCLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v)
+ case OpAMD64BTCLmodify:
+ return rewriteValueAMD64_OpAMD64BTCLmodify_0(v)
+ case OpAMD64BTCQconst:
+ return rewriteValueAMD64_OpAMD64BTCQconst_0(v)
+ case OpAMD64BTCQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v)
+ case OpAMD64BTCQmodify:
+ return rewriteValueAMD64_OpAMD64BTCQmodify_0(v)
case OpAMD64BTLconst:
return rewriteValueAMD64_OpAMD64BTLconst_0(v)
case OpAMD64BTQconst:
return rewriteValueAMD64_OpAMD64BTQconst_0(v)
case OpAMD64BTRLconst:
return rewriteValueAMD64_OpAMD64BTRLconst_0(v)
+ case OpAMD64BTRLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v)
+ case OpAMD64BTRLmodify:
+ return rewriteValueAMD64_OpAMD64BTRLmodify_0(v)
case OpAMD64BTRQconst:
return rewriteValueAMD64_OpAMD64BTRQconst_0(v)
+ case OpAMD64BTRQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v)
+ case OpAMD64BTRQmodify:
+ return rewriteValueAMD64_OpAMD64BTRQmodify_0(v)
case OpAMD64BTSLconst:
return rewriteValueAMD64_OpAMD64BTSLconst_0(v)
+ case OpAMD64BTSLconstmodify:
+ return rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v)
+ case OpAMD64BTSLmodify:
+ return rewriteValueAMD64_OpAMD64BTSLmodify_0(v)
case OpAMD64BTSQconst:
return rewriteValueAMD64_OpAMD64BTSQconst_0(v)
+ case OpAMD64BTSQconstmodify:
+ return rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v)
+ case OpAMD64BTSQmodify:
+ return rewriteValueAMD64_OpAMD64BTSQmodify_0(v)
case OpAMD64CMOVLCC:
return rewriteValueAMD64_OpAMD64CMOVLCC_0(v)
case OpAMD64CMOVLCS:
@@ -141,24 +169,32 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64CMPB_0(v)
case OpAMD64CMPBconst:
return rewriteValueAMD64_OpAMD64CMPBconst_0(v)
+ case OpAMD64CMPBconstload:
+ return rewriteValueAMD64_OpAMD64CMPBconstload_0(v)
case OpAMD64CMPBload:
return rewriteValueAMD64_OpAMD64CMPBload_0(v)
case OpAMD64CMPL:
return rewriteValueAMD64_OpAMD64CMPL_0(v)
case OpAMD64CMPLconst:
return rewriteValueAMD64_OpAMD64CMPLconst_0(v) || rewriteValueAMD64_OpAMD64CMPLconst_10(v)
+ case OpAMD64CMPLconstload:
+ return rewriteValueAMD64_OpAMD64CMPLconstload_0(v)
case OpAMD64CMPLload:
return rewriteValueAMD64_OpAMD64CMPLload_0(v)
case OpAMD64CMPQ:
return rewriteValueAMD64_OpAMD64CMPQ_0(v)
case OpAMD64CMPQconst:
return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v)
+ case OpAMD64CMPQconstload:
+ return rewriteValueAMD64_OpAMD64CMPQconstload_0(v)
case OpAMD64CMPQload:
return rewriteValueAMD64_OpAMD64CMPQload_0(v)
case OpAMD64CMPW:
return rewriteValueAMD64_OpAMD64CMPW_0(v)
case OpAMD64CMPWconst:
return rewriteValueAMD64_OpAMD64CMPWconst_0(v)
+ case OpAMD64CMPWconstload:
+ return rewriteValueAMD64_OpAMD64CMPWconstload_0(v)
case OpAMD64CMPWload:
return rewriteValueAMD64_OpAMD64CMPWload_0(v)
case OpAMD64CMPXCHGLlock:
@@ -270,7 +306,7 @@ func rewriteValueAMD64(v *Value) bool {
case OpAMD64MOVQloadidx8:
return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v)
case OpAMD64MOVQstore:
- return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v)
+ return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v)
case OpAMD64MOVQstoreconst:
return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v)
case OpAMD64MOVQstoreconstidx1:
@@ -3582,6 +3618,22 @@ func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (ANDLconst [c] (BTRLconst [d] x))
+ // cond:
+ // result: (ANDLconst [c &^ (1<<uint32(d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = c &^ (1 << uint32(d))
+ v.AddArg(x)
+ return true
+ }
// match: (ANDLconst [ 0xFF] x)
// cond:
// result: (MOVBQZX x)
@@ -4093,6 +4145,22 @@ func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (ANDQconst [c] (BTRQconst [d] x))
+ // cond:
+ // result: (ANDQconst [c &^ (1<<uint32(d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = c &^ (1 << uint32(d))
+ v.AddArg(x)
+ return true
+ }
// match: (ANDQconst [ 0xFF] x)
// cond:
// result: (MOVBQZX x)
@@ -4421,6 +4489,320 @@ func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64BTCLconst_0(v *Value) bool {
+ // match: (BTCLconst [c] (XORLconst [d] x))
+ // cond:
+ // result: (XORLconst [d ^ 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = d ^ 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCLconst [c] (BTCLconst [d] x))
+ // cond:
+ // result: (XORLconst [1<<uint32(c) ^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d^(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = d ^ (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v *Value) bool {
+ // match: (BTCLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTCLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTCLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCLmodify_0(v *Value) bool {
+ // match: (BTCLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTCLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTCLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTCLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQconst_0(v *Value) bool {
+ // match: (BTCQconst [c] (XORQconst [d] x))
+ // cond:
+ // result: (XORQconst [d ^ 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = d ^ 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCQconst [c] (BTCQconst [d] x))
+ // cond:
+ // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [d^(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = d ^ (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v *Value) bool {
+ // match: (BTCQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTCQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTCQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQmodify_0(v *Value) bool {
+ // match: (BTCQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTCQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTCQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTCQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64BTLconst_0(v *Value) bool {
// match: (BTLconst [c] (SHRQconst [d] x))
// cond: (c+d)<64
@@ -4635,6 +5017,160 @@ func rewriteValueAMD64_OpAMD64BTRLconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (BTRLconst [c] (ANDLconst [d] x))
+ // cond:
+ // result: (ANDLconst [d &^ (1<<uint32(c))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = d &^ (1 << uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (BTRLconst [d] x))
+ // cond:
+ // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d&^(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = d &^ (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v *Value) bool {
+ // match: (BTRLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTRLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTRLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLmodify_0(v *Value) bool {
+ // match: (BTRLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTRLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTRLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTRLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool {
@@ -4674,6 +5210,160 @@ func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (BTRQconst [c] (ANDQconst [d] x))
+ // cond:
+ // result: (ANDQconst [d &^ (1<<uint32(c))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = d &^ (1 << uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (BTRQconst [d] x))
+ // cond:
+ // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [d&^(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = d &^ (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v *Value) bool {
+ // match: (BTRQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTRQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTRQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQmodify_0(v *Value) bool {
+ // match: (BTRQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTRQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTRQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTRQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool {
@@ -4713,6 +5403,160 @@ func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (BTSLconst [c] (ORLconst [d] x))
+ // cond:
+ // result: (ORLconst [d | 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = d | 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (BTSLconst [d] x))
+ // cond:
+ // result: (ORLconst [1<<uint32(d) | 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (MOVLconst [d]))
+ // cond:
+ // result: (MOVLconst [d|(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = d | (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v *Value) bool {
+ // match: (BTSLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTSLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTSLconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLmodify_0(v *Value) bool {
+ // match: (BTSLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTSLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTSLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTSLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool {
@@ -4752,6 +5596,160 @@ func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (BTSQconst [c] (ORQconst [d] x))
+ // cond:
+ // result: (ORQconst [d | 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = d | 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (BTSQconst [d] x))
+ // cond:
+ // result: (ORQconst [1<<uint32(d) | 1<<uint32(c)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (MOVQconst [d]))
+ // cond:
+ // result: (MOVQconst [d|(1<<uint32(c))])
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := v_0.AuxInt
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = d | (1 << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v *Value) bool {
+ // match: (BTSQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTSQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (BTSQconstmodify [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQmodify_0(v *Value) bool {
+ // match: (BTSQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (BTSQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (BTSQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (BTSQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool {
@@ -7932,7 +8930,112 @@ func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64CMPBconstload_0(v *Value) bool {
+ // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPBconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64CMPBload_0(v *Value) bool {
+ // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (CMPBload [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(int64(int8(c)),off)
// result: (CMPBconstload {sym} [makeValAndOff(int64(int8(c)),off)] ptr mem)
@@ -8249,7 +9352,112 @@ func rewriteValueAMD64_OpAMD64CMPLconst_10(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64CMPLconstload_0(v *Value) bool {
+ // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPLconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64CMPLload_0(v *Value) bool {
+ // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (CMPLload [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(c,off)
// result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
@@ -8689,7 +9897,112 @@ func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64CMPQconstload_0(v *Value) bool {
+ // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPQconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64CMPQload_0(v *Value) bool {
+ // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (CMPQload [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
// cond: validValAndOff(c,off)
// result: (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
@@ -8987,7 +10300,112 @@ func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64CMPWconstload_0(v *Value) bool {
+ // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2)
+ // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {sym} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPWconstload [ValAndOff(valoff1).add(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ mem := v.Args[1]
+ if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = ValAndOff(valoff1).add(off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64CMPWload_0(v *Value) bool {
+ // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(off1+off2)
+ // result: (CMPWload [off1+off2] {sym} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ base := v_0.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
// cond: validValAndOff(int64(int16(c)),off)
// result: (CMPWconstload {sym} [makeValAndOff(int64(int16(c)),off)] ptr mem)
@@ -15448,8 +16866,140 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstore {sym} [off] ptr y:(BTCL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (BTCLmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64BTCL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64BTCLmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(BTRL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (BTRLmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64BTRL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64BTRLmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(BTSL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (BTSLmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64BTSL {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64BTSLmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
// match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ADDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
@@ -15477,7 +17027,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ADDLconstmodify)
@@ -15488,7 +17038,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ANDLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
@@ -15516,7 +17066,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ANDLconstmodify)
@@ -15527,7 +17077,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
return true
}
// match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
@@ -15555,7 +17105,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ORLconstmodify)
@@ -15565,11 +17115,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
// match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (XORLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
@@ -15597,7 +17144,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64XORLconstmodify)
@@ -15607,6 +17154,123 @@ func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVLstore [off] {sym} ptr a:(BTCLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTCLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTCLconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTCLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(BTRLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTRLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTRLconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTRLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(BTSLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTSLconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTSLconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTSLconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
// cond:
// result: (MOVSSstore [off] {sym} ptr val mem)
@@ -18099,8 +19763,137 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVQstore {sym} [off] ptr y:(BTCQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (BTCQmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64BTCQ {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64BTCQmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(BTRQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (BTRQmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64BTRQ {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64BTRQmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(BTSQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y) && clobber(l)
+ // result: (BTSQmodify [off] {sym} ptr x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ y := v.Args[1]
+ if y.Op != OpAMD64BTSQ {
+ break
+ }
+ _ = y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ if ptr != l.Args[0] {
+ break
+ }
+ mem := l.Args[1]
+ x := y.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64BTSQmodify)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ADDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
@@ -18128,7 +19921,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ADDQconstmodify)
@@ -18139,7 +19932,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ANDQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
@@ -18167,7 +19960,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ANDQconstmodify)
@@ -18178,7 +19971,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
return true
}
// match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (ORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
@@ -18206,7 +19999,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64ORQconstmodify)
@@ -18217,7 +20010,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
return true
}
// match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
- // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
// result: (XORQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
for {
off := v.AuxInt
@@ -18245,7 +20038,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
if mem != v.Args[2] {
break
}
- if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) {
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
break
}
v.reset(OpAMD64XORQconstmodify)
@@ -18255,6 +20048,126 @@ func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVQstore [off] {sym} ptr a:(BTCQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTCQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTCQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(BTRQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTRQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTRQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstore_30(v *Value) bool {
+ // match: (MOVQstore [off] {sym} ptr a:(BTSQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) && clobber(l) && clobber(a)
+ // result: (BTSQconstmodify {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ a := v.Args[1]
+ if a.Op != OpAMD64BTSQconst {
+ break
+ }
+ c := a.AuxInt
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ if l.AuxInt != off {
+ break
+ }
+ if l.Aux != sym {
+ break
+ }
+ _ = l.Args[1]
+ ptr2 := l.Args[0]
+ mem := l.Args[1]
+ if mem != v.Args[2] {
+ break
+ }
+ if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = makeValAndOff(c, off)
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
// cond:
// result: (MOVSDstore [off] {sym} ptr val mem)
@@ -32713,6 +34626,38 @@ func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (ORLconst [c] (ORLconst [d] x))
+ // cond:
+ // result: (ORLconst [c | d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = c | d
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] (BTSLconst [d] x))
+ // cond:
+ // result: (ORLconst [c | 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = c | 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (ORLconst [c] x)
// cond: int32(c)==0
// result: x
@@ -43732,6 +45677,38 @@ func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (ORQconst [c] (ORQconst [d] x))
+ // cond:
+ // result: (ORQconst [c | d] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = c | d
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [c] (BTSQconst [d] x))
+ // cond:
+ // result: (ORQconst [c | 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = c | 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (ORQconst [0] x)
// cond:
// result: x
@@ -54458,6 +56435,22 @@ func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (XORLconst [c] (BTCLconst [d] x))
+ // cond:
+ // result: (XORLconst [c ^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = c ^ 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (XORLconst [c] x)
// cond: int32(c)==0
// result: x
@@ -54981,6 +56974,22 @@ func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (XORQconst [c] (BTCQconst [d] x))
+ // cond:
+ // result: (XORQconst [c ^ 1<<uint32(d)] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = c ^ 1<<uint32(d)
+ v.AddArg(x)
+ return true
+ }
// match: (XORQconst [0] x)
// cond:
// result: x
@@ -60410,13 +62419,13 @@ func rewriteValueAMD64_OpNeg32F_0(v *Value) bool {
_ = typ
// match: (Neg32F x)
// cond:
- // result: (PXOR x (MOVSSconst <typ.Float32> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSSconst <typ.Float32> [auxFrom32F(float32(math.Copysign(0, -1)))]))
for {
x := v.Args[0]
v.reset(OpAMD64PXOR)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
- v0.AuxInt = f2i(math.Copysign(0, -1))
+ v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1)))
v.AddArg(v0)
return true
}
@@ -60439,13 +62448,13 @@ func rewriteValueAMD64_OpNeg64F_0(v *Value) bool {
_ = typ
// match: (Neg64F x)
// cond:
- // result: (PXOR x (MOVSDconst <typ.Float64> [f2i(math.Copysign(0, -1))]))
+ // result: (PXOR x (MOVSDconst <typ.Float64> [auxFrom64F(math.Copysign(0, -1))]))
for {
x := v.Args[0]
v.reset(OpAMD64PXOR)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
- v0.AuxInt = f2i(math.Copysign(0, -1))
+ v0.AuxInt = auxFrom64F(math.Copysign(0, -1))
v.AddArg(v0)
return true
}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index fbdf352998..ba38ae0505 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -16,7 +16,7 @@ var _ = types.TypeMem // in case not otherwise used
func rewriteValueARM64(v *Value) bool {
switch v.Op {
case OpARM64ADD:
- return rewriteValueARM64_OpARM64ADD_0(v) || rewriteValueARM64_OpARM64ADD_10(v)
+ return rewriteValueARM64_OpARM64ADD_0(v) || rewriteValueARM64_OpARM64ADD_10(v) || rewriteValueARM64_OpARM64ADD_20(v)
case OpARM64ADDconst:
return rewriteValueARM64_OpARM64ADDconst_0(v)
case OpARM64ADDshiftLL:
@@ -51,6 +51,12 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64CMNWconst_0(v)
case OpARM64CMNconst:
return rewriteValueARM64_OpARM64CMNconst_0(v)
+ case OpARM64CMNshiftLL:
+ return rewriteValueARM64_OpARM64CMNshiftLL_0(v)
+ case OpARM64CMNshiftRA:
+ return rewriteValueARM64_OpARM64CMNshiftRA_0(v)
+ case OpARM64CMNshiftRL:
+ return rewriteValueARM64_OpARM64CMNshiftRL_0(v)
case OpARM64CMP:
return rewriteValueARM64_OpARM64CMP_0(v)
case OpARM64CMPW:
@@ -87,6 +93,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64FADDD_0(v)
case OpARM64FADDS:
return rewriteValueARM64_OpARM64FADDS_0(v)
+ case OpARM64FMOVDfpgp:
+ return rewriteValueARM64_OpARM64FMOVDfpgp_0(v)
case OpARM64FMOVDgpfp:
return rewriteValueARM64_OpARM64FMOVDgpfp_0(v)
case OpARM64FMOVDload:
@@ -137,6 +145,10 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64LessThan_0(v)
case OpARM64LessThanU:
return rewriteValueARM64_OpARM64LessThanU_0(v)
+ case OpARM64MADD:
+ return rewriteValueARM64_OpARM64MADD_0(v) || rewriteValueARM64_OpARM64MADD_10(v) || rewriteValueARM64_OpARM64MADD_20(v)
+ case OpARM64MADDW:
+ return rewriteValueARM64_OpARM64MADDW_0(v) || rewriteValueARM64_OpARM64MADDW_10(v) || rewriteValueARM64_OpARM64MADDW_20(v)
case OpARM64MNEG:
return rewriteValueARM64_OpARM64MNEG_0(v) || rewriteValueARM64_OpARM64MNEG_10(v) || rewriteValueARM64_OpARM64MNEG_20(v)
case OpARM64MNEGW:
@@ -243,18 +255,34 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64MOVWstorezeroidx_0(v)
case OpARM64MOVWstorezeroidx4:
return rewriteValueARM64_OpARM64MOVWstorezeroidx4_0(v)
+ case OpARM64MSUB:
+ return rewriteValueARM64_OpARM64MSUB_0(v) || rewriteValueARM64_OpARM64MSUB_10(v) || rewriteValueARM64_OpARM64MSUB_20(v)
+ case OpARM64MSUBW:
+ return rewriteValueARM64_OpARM64MSUBW_0(v) || rewriteValueARM64_OpARM64MSUBW_10(v) || rewriteValueARM64_OpARM64MSUBW_20(v)
case OpARM64MUL:
return rewriteValueARM64_OpARM64MUL_0(v) || rewriteValueARM64_OpARM64MUL_10(v) || rewriteValueARM64_OpARM64MUL_20(v)
case OpARM64MULW:
return rewriteValueARM64_OpARM64MULW_0(v) || rewriteValueARM64_OpARM64MULW_10(v) || rewriteValueARM64_OpARM64MULW_20(v)
case OpARM64MVN:
return rewriteValueARM64_OpARM64MVN_0(v)
+ case OpARM64MVNshiftLL:
+ return rewriteValueARM64_OpARM64MVNshiftLL_0(v)
+ case OpARM64MVNshiftRA:
+ return rewriteValueARM64_OpARM64MVNshiftRA_0(v)
+ case OpARM64MVNshiftRL:
+ return rewriteValueARM64_OpARM64MVNshiftRL_0(v)
case OpARM64NEG:
return rewriteValueARM64_OpARM64NEG_0(v)
+ case OpARM64NEGshiftLL:
+ return rewriteValueARM64_OpARM64NEGshiftLL_0(v)
+ case OpARM64NEGshiftRA:
+ return rewriteValueARM64_OpARM64NEGshiftRA_0(v)
+ case OpARM64NEGshiftRL:
+ return rewriteValueARM64_OpARM64NEGshiftRL_0(v)
case OpARM64NotEqual:
return rewriteValueARM64_OpARM64NotEqual_0(v)
case OpARM64OR:
- return rewriteValueARM64_OpARM64OR_0(v) || rewriteValueARM64_OpARM64OR_10(v) || rewriteValueARM64_OpARM64OR_20(v) || rewriteValueARM64_OpARM64OR_30(v)
+ return rewriteValueARM64_OpARM64OR_0(v) || rewriteValueARM64_OpARM64OR_10(v) || rewriteValueARM64_OpARM64OR_20(v) || rewriteValueARM64_OpARM64OR_30(v) || rewriteValueARM64_OpARM64OR_40(v)
case OpARM64ORN:
return rewriteValueARM64_OpARM64ORN_0(v)
case OpARM64ORNshiftLL:
@@ -271,6 +299,10 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64ORshiftRA_0(v)
case OpARM64ORshiftRL:
return rewriteValueARM64_OpARM64ORshiftRL_0(v)
+ case OpARM64RORWconst:
+ return rewriteValueARM64_OpARM64RORWconst_0(v)
+ case OpARM64RORconst:
+ return rewriteValueARM64_OpARM64RORconst_0(v)
case OpARM64SLL:
return rewriteValueARM64_OpARM64SLL_0(v)
case OpARM64SLLconst:
@@ -303,6 +335,12 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64TSTWconst_0(v)
case OpARM64TSTconst:
return rewriteValueARM64_OpARM64TSTconst_0(v)
+ case OpARM64TSTshiftLL:
+ return rewriteValueARM64_OpARM64TSTshiftLL_0(v)
+ case OpARM64TSTshiftRA:
+ return rewriteValueARM64_OpARM64TSTshiftRA_0(v)
+ case OpARM64TSTshiftRL:
+ return rewriteValueARM64_OpARM64TSTshiftRL_0(v)
case OpARM64UBFIZ:
return rewriteValueARM64_OpARM64UBFIZ_0(v)
case OpARM64UBFX:
@@ -325,6 +363,8 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64XORshiftRA_0(v)
case OpARM64XORshiftRL:
return rewriteValueARM64_OpARM64XORshiftRL_0(v)
+ case OpAbs:
+ return rewriteValueARM64_OpAbs_0(v)
case OpAdd16:
return rewriteValueARM64_OpAdd16_0(v)
case OpAdd32:
@@ -731,12 +771,18 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpPopCount32_0(v)
case OpPopCount64:
return rewriteValueARM64_OpPopCount64_0(v)
+ case OpRotateLeft32:
+ return rewriteValueARM64_OpRotateLeft32_0(v)
+ case OpRotateLeft64:
+ return rewriteValueARM64_OpRotateLeft64_0(v)
case OpRound:
return rewriteValueARM64_OpRound_0(v)
case OpRound32F:
return rewriteValueARM64_OpRound32F_0(v)
case OpRound64F:
return rewriteValueARM64_OpRound64F_0(v)
+ case OpRoundToEven:
+ return rewriteValueARM64_OpRoundToEven_0(v)
case OpRsh16Ux16:
return rewriteValueARM64_OpRsh16Ux16_0(v)
case OpRsh16Ux32:
@@ -910,7 +956,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return true
}
// match: (ADD a l:(MUL x y))
- // cond: l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: l.Uses==1 && clobber(l)
// result: (MADD a x y)
for {
_ = v.Args[1]
@@ -922,7 +968,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
- if !(l.Uses == 1 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MADD)
@@ -932,7 +978,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return true
}
// match: (ADD l:(MUL x y) a)
- // cond: l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: l.Uses==1 && clobber(l)
// result: (MADD a x y)
for {
_ = v.Args[1]
@@ -944,7 +990,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
x := l.Args[0]
y := l.Args[1]
a := v.Args[1]
- if !(l.Uses == 1 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MADD)
@@ -954,7 +1000,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return true
}
// match: (ADD a l:(MNEG x y))
- // cond: l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: l.Uses==1 && clobber(l)
// result: (MSUB a x y)
for {
_ = v.Args[1]
@@ -966,7 +1012,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
- if !(l.Uses == 1 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MSUB)
@@ -976,7 +1022,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return true
}
// match: (ADD l:(MNEG x y) a)
- // cond: l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: l.Uses==1 && clobber(l)
// result: (MSUB a x y)
for {
_ = v.Args[1]
@@ -988,7 +1034,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
x := l.Args[0]
y := l.Args[1]
a := v.Args[1]
- if !(l.Uses == 1 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MSUB)
@@ -998,7 +1044,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return true
}
// match: (ADD a l:(MULW x y))
- // cond: l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
// result: (MADDW a x y)
for {
_ = v.Args[1]
@@ -1010,7 +1056,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
- if !(l.Uses == 1 && a.Type.Size() != 8 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MADDW)
@@ -1020,7 +1066,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return true
}
// match: (ADD l:(MULW x y) a)
- // cond: l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
// result: (MADDW a x y)
for {
_ = v.Args[1]
@@ -1032,7 +1078,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
x := l.Args[0]
y := l.Args[1]
a := v.Args[1]
- if !(l.Uses == 1 && a.Type.Size() != 8 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MADDW)
@@ -1042,7 +1088,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return true
}
// match: (ADD a l:(MNEGW x y))
- // cond: l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
// result: (MSUBW a x y)
for {
_ = v.Args[1]
@@ -1054,7 +1100,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
- if !(l.Uses == 1 && a.Type.Size() != 8 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MSUBW)
@@ -1064,7 +1110,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return true
}
// match: (ADD l:(MNEGW x y) a)
- // cond: l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
// result: (MSUBW a x y)
for {
_ = v.Args[1]
@@ -1076,7 +1122,7 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
x := l.Args[0]
y := l.Args[1]
a := v.Args[1]
- if !(l.Uses == 1 && a.Type.Size() != 8 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MSUBW)
@@ -1088,6 +1134,10 @@ func rewriteValueARM64_OpARM64ADD_0(v *Value) bool {
return false
}
func rewriteValueARM64_OpARM64ADD_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (ADD x (NEG y))
// cond:
// result: (SUB x y)
@@ -1246,6 +1296,925 @@ func rewriteValueARM64_OpARM64ADD_10(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (ADD (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL {
+ break
+ }
+ if v_1_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADD (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))) (SLL x (ANDconst <t> [63] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADD_20(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (ADD (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))) (SRL <typ.UInt64> x (ANDconst <t> [63] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRL {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL {
+ break
+ }
+ if v_1_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if x != v_1_0_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADD (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))) (SLL x (ANDconst <t> [31] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADD (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADD (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRL {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64ADDconst_0(v *Value) bool {
@@ -2395,6 +3364,132 @@ func rewriteValueARM64_OpARM64CMN_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (CMN x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftLL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftLL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x1:(SLLconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftLL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftLL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftRL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x1:(SRLconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftRL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRA x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftRA)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMN x1:(SRAconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRA x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMNshiftRA)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64CMNW_0(v *Value) bool {
@@ -2598,6 +3693,132 @@ func rewriteValueARM64_OpARM64CMNconst_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64CMNshiftLL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMNshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (CMNconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMNconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRA_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMNshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (CMNconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMNconst x [c>>uint64(d)])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = c >> uint64(d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (CMNshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (CMNconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (CMNconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64CMP_0(v *Value) bool {
b := v.Block
_ = b
@@ -3960,6 +5181,30 @@ func rewriteValueARM64_OpARM64FADDS_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64FMOVDfpgp_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (FMOVDfpgp <t> (Arg [off] {sym}))
+ // cond:
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ v_0 := v.Args[0]
+ if v_0.Op != OpArg {
+ break
+ }
+ off := v_0.AuxInt
+ sym := v_0.Aux
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.reset(OpCopy)
+ v.AddArg(v0)
+ v0.AuxInt = off
+ v0.Aux = sym
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64FMOVDgpfp_0(v *Value) bool {
b := v.Block
_ = b
@@ -3989,6 +5234,33 @@ func rewriteValueARM64_OpARM64FMOVDload_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
+ // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _))
+ // cond:
+ // result: (FMOVDgpfp val)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDstore {
+ break
+ }
+ if v_1.AuxInt != off {
+ break
+ }
+ if v_1.Aux != sym {
+ break
+ }
+ _ = v_1.Args[2]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ val := v_1.Args[1]
+ v.reset(OpARM64FMOVDgpfp)
+ v.AddArg(val)
+ return true
+ }
// match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (FMOVDload [off1+off2] {sym} ptr mem)
@@ -4108,10 +5380,12 @@ func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- // match: (FMOVDstore ptr (FMOVDgpfp val) mem)
+ // match: (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem)
// cond:
- // result: (MOVDstore ptr val mem)
+ // result: (MOVDstore [off] {sym} ptr val mem)
for {
+ off := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
@@ -4121,6 +5395,8 @@ func rewriteValueARM64_OpARM64FMOVDstore_0(v *Value) bool {
val := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARM64MOVDstore)
+ v.AuxInt = off
+ v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
@@ -4255,6 +5531,33 @@ func rewriteValueARM64_OpARM64FMOVSload_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
+ // match: (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _))
+ // cond:
+ // result: (FMOVSgpfp val)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVWstore {
+ break
+ }
+ if v_1.AuxInt != off {
+ break
+ }
+ if v_1.Aux != sym {
+ break
+ }
+ _ = v_1.Args[2]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ val := v_1.Args[1]
+ v.reset(OpARM64FMOVSgpfp)
+ v.AddArg(val)
+ return true
+ }
// match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (FMOVSload [off1+off2] {sym} ptr mem)
@@ -4374,6 +5677,28 @@ func rewriteValueARM64_OpARM64FMOVSstore_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
+ // match: (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem)
+ // cond:
+ // result: (MOVWstore [off] {sym} ptr val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FMOVSgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (FMOVSstore [off1+off2] {sym} ptr val mem)
@@ -5474,6 +6799,1024 @@ func rewriteValueARM64_OpARM64LessThanU_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64MADD_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MADD a x (MOVDconst [-1]))
+ // cond:
+ // result: (SUB a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_2.AuxInt != -1 {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADD a _ (MOVDconst [0]))
+ // cond:
+ // result: a
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_2.AuxInt != 0 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [1]))
+ // cond:
+ // result: (ADD a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_2.AuxInt != 1 {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (ADDshiftLL a x [log2(c)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c-1) && c>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c+1) && c>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 3)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 5)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 7)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 9)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADD_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MADD a (MOVDconst [-1]) x)
+ // cond:
+ // result: (SUB a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != -1 {
+ break
+ }
+ x := v.Args[2]
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [0]) _)
+ // cond:
+ // result: a
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MADD a (MOVDconst [1]) x)
+ // cond:
+ // result: (ADD a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ x := v.Args[2]
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (ADDshiftLL a x [log2(c)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c-1) && c>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c+1) && c>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 3)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 5)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 7)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 9)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADD_20(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MADD (MOVDconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (MUL <x.Type> x y))
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (ADDconst [c*d] a)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_2.AuxInt
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c * d
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADDW_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (SUB a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADDW a _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: a
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (ADD a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (ADDshiftLL a x [log2(c)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c-1) && int32(c)>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c+1) && int32(c)>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 3)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 5)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 7)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 9)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADDW_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (SUB a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: a
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: (ADD a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (ADDshiftLL a x [log2(c)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c-1) && int32(c)>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c+1) && int32(c)>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 3)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 5)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 7)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 9)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADDW_20(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MADDW (MOVDconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (MULW <x.Type> x y))
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (ADDconst [int64(int32(c)*int32(d))] a)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_2.AuxInt
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64(int32(c) * int32(d))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64MNEG_0(v *Value) bool {
b := v.Block
_ = b
@@ -10145,6 +12488,33 @@ func rewriteValueARM64_OpARM64MOVDload_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
+ // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _))
+ // cond:
+ // result: (FMOVDfpgp val)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FMOVDstore {
+ break
+ }
+ if v_1.AuxInt != off {
+ break
+ }
+ if v_1.Aux != sym {
+ break
+ }
+ _ = v_1.Args[2]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ val := v_1.Args[1]
+ v.reset(OpARM64FMOVDfpgp)
+ v.AddArg(val)
+ return true
+ }
// match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVDload [off1+off2] {sym} ptr mem)
@@ -10447,10 +12817,12 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
- // match: (MOVDstore ptr (FMOVDfpgp val) mem)
+ // match: (MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem)
// cond:
- // result: (FMOVDstore ptr val mem)
+ // result: (FMOVDstore [off] {sym} ptr val mem)
for {
+ off := v.AuxInt
+ sym := v.Aux
_ = v.Args[2]
ptr := v.Args[0]
v_1 := v.Args[1]
@@ -10460,6 +12832,8 @@ func rewriteValueARM64_OpARM64MOVDstore_0(v *Value) bool {
val := v_1.Args[0]
mem := v.Args[2]
v.reset(OpARM64FMOVDstore)
+ v.AuxInt = off
+ v.Aux = sym
v.AddArg(ptr)
v.AddArg(val)
v.AddArg(mem)
@@ -13659,6 +16033,33 @@ func rewriteValueARM64_OpARM64MOVWUload_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
+ // match: (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _))
+ // cond:
+ // result: (FMOVSfpgp val)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[1]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FMOVSstore {
+ break
+ }
+ if v_1.AuxInt != off {
+ break
+ }
+ if v_1.Aux != sym {
+ break
+ }
+ _ = v_1.Args[2]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ val := v_1.Args[1]
+ v.reset(OpARM64FMOVSfpgp)
+ v.AddArg(val)
+ return true
+ }
// match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
// cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWUload [off1+off2] {sym} ptr mem)
@@ -14683,6 +17084,28 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool {
_ = b
config := b.Func.Config
_ = config
+ // match: (MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem)
+ // cond:
+ // result: (FMOVSstore [off] {sym} ptr val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64FMOVSfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v.Args[2]
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
// cond: is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
// result: (MOVWstore [off1+off2] {sym} ptr val mem)
@@ -14946,6 +17369,11 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem))
// cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
// result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
@@ -14997,11 +17425,6 @@ func rewriteValueARM64_OpARM64MOVWstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueARM64_OpARM64MOVWstore_10(v *Value) bool {
- b := v.Block
- _ = b
// match: (MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem))
// cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
// result: (MOVDstore [i-4] {s} ptr0 w0 mem)
@@ -15784,6 +18207,1024 @@ func rewriteValueARM64_OpARM64MOVWstorezeroidx4_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64MSUB_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MSUB a x (MOVDconst [-1]))
+ // cond:
+ // result: (ADD a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_2.AuxInt != -1 {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUB a _ (MOVDconst [0]))
+ // cond:
+ // result: a
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_2.AuxInt != 0 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [1]))
+ // cond:
+ // result: (SUB a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_2.AuxInt != 1 {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SUBshiftLL a x [log2(c)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c-1) && c>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c+1) && c>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 3)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 5)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 7)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 9)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUB_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MSUB a (MOVDconst [-1]) x)
+ // cond:
+ // result: (ADD a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != -1 {
+ break
+ }
+ x := v.Args[2]
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [0]) _)
+ // cond:
+ // result: a
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0 {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [1]) x)
+ // cond:
+ // result: (SUB a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 1 {
+ break
+ }
+ x := v.Args[2]
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (SUBshiftLL a x [log2(c)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c-1) && c>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c+1) && c>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%3 == 0 && isPowerOfTwo(c/3)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 3)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%5 == 0 && isPowerOfTwo(c/5)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 5)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%7 == 0 && isPowerOfTwo(c/7)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 7)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%9 == 0 && isPowerOfTwo(c/9)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 9)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUB_20(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MSUB (MOVDconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (MNEG <x.Type> x y))
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (SUBconst [c*d] a)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_2.AuxInt
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = c * d
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUBW_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (ADD a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUBW a _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: a
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (SUB a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (SUBshiftLL a x [log2(c)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c-1) && int32(c)>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo(c+1) && int32(c)>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 3)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 5)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 7)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ x := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_2.AuxInt
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 9)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUBW_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (ADD a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: a
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = a.Type
+ v.AddArg(a)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: (SUB a x)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c)
+ // result: (SUBshiftLL a x [log2(c)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c)
+ v.AddArg(a)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c-1) && int32(c)>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log2(c-1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = log2(c - 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo(c+1) && int32(c)>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log2(c+1)]))
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(isPowerOfTwo(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = log2(c + 1)
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log2(c/3)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 3)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log2(c/5)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 5)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 2
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log2(c/7)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = log2(c / 7)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log2(c/9)])
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ x := v.Args[2]
+ if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = log2(c / 9)
+ v.AddArg(a)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = 3
+ v0.AddArg(x)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUBW_20(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (MSUBW (MOVDconst [c]) x y)
+ // cond:
+ // result: (ADDconst [c] (MNEGW <x.Type> x y))
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ y := v.Args[2]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) (MOVDconst [d]))
+ // cond:
+ // result: (SUBconst [int64(int32(c)*int32(d))] a)
+ for {
+ _ = v.Args[2]
+ a := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v_2 := v.Args[2]
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := v_2.AuxInt
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64(int32(c) * int32(d))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64MUL_0(v *Value) bool {
// match: (MUL (NEG x) y)
// cond:
@@ -16794,6 +20235,111 @@ func rewriteValueARM64_OpARM64MVN_0(v *Value) bool {
v.AuxInt = ^c
return true
}
+ // match: (MVN x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftLL [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftLL)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRL [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRL)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRA [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRA)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftLL_0(v *Value) bool {
+ // match: (MVNshiftLL (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = ^int64(uint64(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRA_0(v *Value) bool {
+ // match: (MVNshiftRA (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [^(c>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = ^(c >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRL_0(v *Value) bool {
+ // match: (MVNshiftRL (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = ^int64(uint64(c) >> uint64(d))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64NEG_0(v *Value) bool {
@@ -16842,6 +20388,111 @@ func rewriteValueARM64_OpARM64NEG_0(v *Value) bool {
v.AuxInt = -c
return true
}
+ // match: (NEG x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftLL [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftLL)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRL [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRL)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRA [c] y)
+ for {
+ x := v.Args[0]
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := x.AuxInt
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRA)
+ v.AuxInt = c
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftLL_0(v *Value) bool {
+ // match: (NEGshiftLL (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [-int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = -int64(uint64(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRA_0(v *Value) bool {
+ // match: (NEGshiftRA (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [-(c>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = -(c >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRL_0(v *Value) bool {
+ // match: (NEGshiftRL (MOVDconst [c]) [d])
+ // cond:
+ // result: (MOVDconst [-int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = -int64(uint64(c) >> uint64(d))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64NotEqual_0(v *Value) bool {
@@ -17109,6 +20760,8 @@ func rewriteValueARM64_OpARM64OR_0(v *Value) bool {
func rewriteValueARM64_OpARM64OR_10(v *Value) bool {
b := v.Block
_ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (OR x1:(SRAconst [c] y) x0)
// cond: clobberIfDead(x1)
// result: (ORshiftRA x0 y [c])
@@ -17130,6 +20783,918 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (OR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL {
+ break
+ }
+ if v_1_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (OR (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))) (SLL x (ANDconst <t> [63] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (OR (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))) (SRL <typ.UInt64> x (ANDconst <t> [63] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRL {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL {
+ break
+ }
+ if v_1_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if x != v_1_0_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (OR (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))) (SLL x (ANDconst <t> [31] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (OR (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (OR (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRL {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
// cond: ac == ^((1<<uint(getARM64BFwidth(bfc))-1) << uint(getARM64BFlsb(bfc)))
// result: (BFI [bfc] y x)
@@ -17156,6 +21721,11 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool {
v.AddArg(x)
return true
}
+ return false
+}
+func rewriteValueARM64_OpARM64OR_20(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (OR (ANDconst [ac] y) (UBFIZ [bfc] x))
// cond: ac == ^((1<<uint(getARM64BFwidth(bfc))-1) << uint(getARM64BFlsb(bfc)))
// result: (BFI [bfc] y x)
@@ -17822,11 +22392,6 @@ func rewriteValueARM64_OpARM64OR_10(v *Value) bool {
v0.AddArg(mem)
return true
}
- return false
-}
-func rewriteValueARM64_OpARM64OR_20(v *Value) bool {
- b := v.Block
- _ = b
// match: (OR <t> y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)
// result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
@@ -18177,6 +22742,11 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool {
v0.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValueARM64_OpARM64OR_30(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (OR <t> y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))))
// cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)
// result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [i0] p) mem)
@@ -19733,11 +24303,6 @@ func rewriteValueARM64_OpARM64OR_20(v *Value) bool {
v0.AddArg(v1)
return true
}
- return false
-}
-func rewriteValueARM64_OpARM64OR_30(v *Value) bool {
- b := v.Block
- _ = b
// match: (OR <t> y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))))
// cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)
// result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
@@ -19986,6 +24551,11 @@ func rewriteValueARM64_OpARM64OR_30(v *Value) bool {
v0.AddArg(v1)
return true
}
+ return false
+}
+func rewriteValueARM64_OpARM64OR_40(v *Value) bool {
+ b := v.Block
+ _ = b
// match: (OR <t> y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))))
// cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)
// result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
@@ -21834,6 +26404,25 @@ func rewriteValueARM64_OpARM64ORconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (ORconst [c1] (ANDconst [c2] x))
+ // cond: c2|c1 == ^0
+ // result: (ORconst [c1] x)
+ for {
+ c1 := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c2|c1 == ^0) {
+ break
+ }
+ v.reset(OpARM64ORconst)
+ v.AuxInt = c1
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64ORshiftLL_0(v *Value) bool {
@@ -24183,6 +28772,44 @@ func rewriteValueARM64_OpARM64ORshiftRL_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64RORWconst_0(v *Value) bool {
+ // match: (RORWconst [c] (RORWconst [d] x))
+ // cond:
+ // result: (RORWconst [(c+d)&31] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64RORWconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = (c + d) & 31
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64RORconst_0(v *Value) bool {
+ // match: (RORconst [c] (RORconst [d] x))
+ // cond:
+ // result: (RORconst [(c+d)&63] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64RORconst {
+ break
+ }
+ d := v_0.AuxInt
+ x := v_0.Args[0]
+ v.reset(OpARM64RORconst)
+ v.AuxInt = (c + d) & 63
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64SLL_0(v *Value) bool {
// match: (SLL x (MOVDconst [c]))
// cond:
@@ -24859,7 +29486,7 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool {
return true
}
// match: (SUB a l:(MUL x y))
- // cond: l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: l.Uses==1 && clobber(l)
// result: (MSUB a x y)
for {
_ = v.Args[1]
@@ -24871,7 +29498,7 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool {
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
- if !(l.Uses == 1 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MSUB)
@@ -24881,7 +29508,7 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool {
return true
}
// match: (SUB a l:(MNEG x y))
- // cond: l.Uses==1 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: l.Uses==1 && clobber(l)
// result: (MADD a x y)
for {
_ = v.Args[1]
@@ -24893,7 +29520,7 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool {
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
- if !(l.Uses == 1 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MADD)
@@ -24903,7 +29530,7 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool {
return true
}
// match: (SUB a l:(MULW x y))
- // cond: l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
// result: (MSUBW a x y)
for {
_ = v.Args[1]
@@ -24915,7 +29542,7 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool {
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
- if !(l.Uses == 1 && a.Type.Size() != 8 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MSUBW)
@@ -24925,7 +29552,7 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool {
return true
}
// match: (SUB a l:(MNEGW x y))
- // cond: l.Uses==1 && a.Type.Size() != 8 && x.Op!=OpARM64MOVDconst && y.Op!=OpARM64MOVDconst && a.Op!=OpARM64MOVDconst && clobber(l)
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
// result: (MADDW a x y)
for {
_ = v.Args[1]
@@ -24937,7 +29564,7 @@ func rewriteValueARM64_OpARM64SUB_0(v *Value) bool {
_ = l.Args[1]
x := l.Args[0]
y := l.Args[1]
- if !(l.Uses == 1 && a.Type.Size() != 8 && x.Op != OpARM64MOVDconst && y.Op != OpARM64MOVDconst && a.Op != OpARM64MOVDconst && clobber(l)) {
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
break
}
v.reset(OpARM64MADDW)
@@ -25290,6 +29917,132 @@ func rewriteValueARM64_OpARM64TST_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (TST x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftLL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftLL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x1:(SLLconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftLL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftLL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftRL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x1:(SRLconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRL x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftRL)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRA x0 y [c])
+ for {
+ _ = v.Args[1]
+ x0 := v.Args[0]
+ x1 := v.Args[1]
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftRA)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (TST x1:(SRAconst [c] y) x0)
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRA x0 y [c])
+ for {
+ _ = v.Args[1]
+ x1 := v.Args[0]
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := x1.AuxInt
+ y := x1.Args[0]
+ x0 := v.Args[1]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64TSTshiftRA)
+ v.AuxInt = c
+ v.AddArg(x0)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64TSTW_0(v *Value) bool {
@@ -25429,6 +30182,132 @@ func rewriteValueARM64_OpARM64TSTconst_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64TSTshiftLL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TSTshiftLL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (TSTconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (TSTconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64(uint64(c) << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRA_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TSTshiftRA (MOVDconst [c]) x [d])
+ // cond:
+ // result: (TSTconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRA x (MOVDconst [c]) [d])
+ // cond:
+ // result: (TSTconst x [c>>uint64(d)])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = c >> uint64(d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRL_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (TSTshiftRL (MOVDconst [c]) x [d])
+ // cond:
+ // result: (TSTconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v.Args[1]
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = d
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRL x (MOVDconst [c]) [d])
+ // cond:
+ // result: (TSTconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := v.AuxInt
+ _ = v.Args[1]
+ x := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64(uint64(c) >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64UBFIZ_0(v *Value) bool {
// match: (UBFIZ [bfc] (SLLconst [sc] x))
// cond: sc < getARM64BFwidth(bfc)
@@ -25950,6 +30829,10 @@ func rewriteValueARM64_OpARM64XOR_0(v *Value) bool {
return false
}
func rewriteValueARM64_OpARM64XOR_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (XOR x1:(SRAconst [c] y) x0)
// cond: clobberIfDead(x1)
// result: (XORshiftRA x0 y [c])
@@ -25971,6 +30854,918 @@ func rewriteValueARM64_OpARM64XOR_10(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (XOR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL {
+ break
+ }
+ if v_1_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XOR (CSEL0 <typ.UInt64> {cc} (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))) (SLL x (ANDconst <t> [63] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XOR (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (CSEL0 <typ.UInt64> {cc} (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))) (SRL <typ.UInt64> x (ANDconst <t> [63] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt64 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 64 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 63 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 64 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 63 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRL {
+ break
+ }
+ if v_1.Type != typ.UInt64 {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 63 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL {
+ break
+ }
+ if v_1_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if x != v_1_0_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XOR (CSEL0 <typ.UInt32> {cc} (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))) (SLL x (ANDconst <t> [31] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XOR (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64SRL {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ t := v_0_1.Type
+ if v_0_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ cc := v_1.Aux
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB {
+ break
+ }
+ if v_1_0_1.Type != t {
+ break
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_0_1_1.Type != t {
+ break
+ }
+ if v_1_0_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_0_1_1.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_1_1.AuxInt != 64 {
+ break
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_1_1_0.Type != t {
+ break
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_1_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1_0_1.Type != t {
+ break
+ }
+ if v_1_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1_0_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (XOR (CSEL0 <typ.UInt32> {cc} (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)))
+ // cond: cc.(Op) == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpARM64CSEL0 {
+ break
+ }
+ if v_0.Type != typ.UInt32 {
+ break
+ }
+ cc := v_0.Aux
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64SLL {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpARM64SUB {
+ break
+ }
+ t := v_0_0_1.Type
+ _ = v_0_0_1.Args[1]
+ v_0_0_1_0 := v_0_0_1.Args[0]
+ if v_0_0_1_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_0_1_0.AuxInt != 32 {
+ break
+ }
+ v_0_0_1_1 := v_0_0_1.Args[1]
+ if v_0_0_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_0_1_1.Type != t {
+ break
+ }
+ if v_0_0_1_1.AuxInt != 31 {
+ break
+ }
+ y := v_0_0_1_1.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64CMPconst {
+ break
+ }
+ if v_0_1.AuxInt != 64 {
+ break
+ }
+ v_0_1_0 := v_0_1.Args[0]
+ if v_0_1_0.Op != OpARM64SUB {
+ break
+ }
+ if v_0_1_0.Type != t {
+ break
+ }
+ _ = v_0_1_0.Args[1]
+ v_0_1_0_0 := v_0_1_0.Args[0]
+ if v_0_1_0_0.Op != OpARM64MOVDconst {
+ break
+ }
+ if v_0_1_0_0.AuxInt != 32 {
+ break
+ }
+ v_0_1_0_1 := v_0_1_0.Args[1]
+ if v_0_1_0_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_0_1_0_1.Type != t {
+ break
+ }
+ if v_0_1_0_1.AuxInt != 31 {
+ break
+ }
+ if y != v_0_1_0_1.Args[0] {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpARM64SRL {
+ break
+ }
+ if v_1.Type != typ.UInt32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if x != v_1_0.Args[0] {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64ANDconst {
+ break
+ }
+ if v_1_1.Type != t {
+ break
+ }
+ if v_1_1.AuxInt != 31 {
+ break
+ }
+ if y != v_1_1.Args[0] {
+ break
+ }
+ if !(cc.(Op) == OpARM64LessThanU) {
+ break
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64XORconst_0(v *Value) bool {
@@ -26365,6 +32160,17 @@ func rewriteValueARM64_OpARM64XORshiftRL_0(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpAbs_0(v *Value) bool {
+ // match: (Abs x)
+ // cond:
+ // result: (FABSD x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FABSD)
+ v.AddArg(x)
+ return true
+ }
+}
func rewriteValueARM64_OpAdd16_0(v *Value) bool {
// match: (Add16 x y)
// cond:
@@ -30489,6 +36295,42 @@ func rewriteValueARM64_OpPopCount64_0(v *Value) bool {
return true
}
}
+func rewriteValueARM64_OpRotateLeft32_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (RotateLeft32 x y)
+ // cond:
+ // result: (RORW x (NEG <y.Type> y))
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64RORW)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft64_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ // match: (RotateLeft64 x y)
+ // cond:
+ // result: (ROR x (NEG <y.Type> y))
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpARM64ROR)
+ v.AddArg(x)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+}
func rewriteValueARM64_OpRound_0(v *Value) bool {
// match: (Round x)
// cond:
@@ -30522,6 +36364,17 @@ func rewriteValueARM64_OpRound64F_0(v *Value) bool {
return true
}
}
+func rewriteValueARM64_OpRoundToEven_0(v *Value) bool {
+ // match: (RoundToEven x)
+ // cond:
+ // result: (FRINTND x)
+ for {
+ x := v.Args[0]
+ v.reset(OpARM64FRINTND)
+ v.AddArg(x)
+ return true
+ }
+}
func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool {
b := v.Block
_ = b
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
index ba6a862989..9aff3106db 100644
--- a/src/cmd/compile/internal/ssa/rewritePPC64.go
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -337,6 +337,8 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpMul64_0(v)
case OpMul64F:
return rewriteValuePPC64_OpMul64F_0(v)
+ case OpMul64uhilo:
+ return rewriteValuePPC64_OpMul64uhilo_0(v)
case OpMul8:
return rewriteValuePPC64_OpMul8_0(v)
case OpNeg16:
@@ -388,9 +390,9 @@ func rewriteValuePPC64(v *Value) bool {
case OpPPC64ADDconst:
return rewriteValuePPC64_OpPPC64ADDconst_0(v)
case OpPPC64AND:
- return rewriteValuePPC64_OpPPC64AND_0(v)
+ return rewriteValuePPC64_OpPPC64AND_0(v) || rewriteValuePPC64_OpPPC64AND_10(v)
case OpPPC64ANDconst:
- return rewriteValuePPC64_OpPPC64ANDconst_0(v)
+ return rewriteValuePPC64_OpPPC64ANDconst_0(v) || rewriteValuePPC64_OpPPC64ANDconst_10(v)
case OpPPC64CMP:
return rewriteValuePPC64_OpPPC64CMP_0(v)
case OpPPC64CMPU:
@@ -449,46 +451,66 @@ func rewriteValuePPC64(v *Value) bool {
return rewriteValuePPC64_OpPPC64MFVSRD_0(v)
case OpPPC64MOVBZload:
return rewriteValuePPC64_OpPPC64MOVBZload_0(v)
+ case OpPPC64MOVBZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVBZloadidx_0(v)
case OpPPC64MOVBZreg:
- return rewriteValuePPC64_OpPPC64MOVBZreg_0(v)
+ return rewriteValuePPC64_OpPPC64MOVBZreg_0(v) || rewriteValuePPC64_OpPPC64MOVBZreg_10(v)
case OpPPC64MOVBreg:
- return rewriteValuePPC64_OpPPC64MOVBreg_0(v)
+ return rewriteValuePPC64_OpPPC64MOVBreg_0(v) || rewriteValuePPC64_OpPPC64MOVBreg_10(v)
case OpPPC64MOVBstore:
return rewriteValuePPC64_OpPPC64MOVBstore_0(v) || rewriteValuePPC64_OpPPC64MOVBstore_10(v) || rewriteValuePPC64_OpPPC64MOVBstore_20(v)
+ case OpPPC64MOVBstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v) || rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v)
case OpPPC64MOVBstorezero:
return rewriteValuePPC64_OpPPC64MOVBstorezero_0(v)
case OpPPC64MOVDload:
return rewriteValuePPC64_OpPPC64MOVDload_0(v)
+ case OpPPC64MOVDloadidx:
+ return rewriteValuePPC64_OpPPC64MOVDloadidx_0(v)
case OpPPC64MOVDstore:
return rewriteValuePPC64_OpPPC64MOVDstore_0(v)
+ case OpPPC64MOVDstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v)
case OpPPC64MOVDstorezero:
return rewriteValuePPC64_OpPPC64MOVDstorezero_0(v)
case OpPPC64MOVHBRstore:
return rewriteValuePPC64_OpPPC64MOVHBRstore_0(v)
case OpPPC64MOVHZload:
return rewriteValuePPC64_OpPPC64MOVHZload_0(v)
+ case OpPPC64MOVHZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVHZloadidx_0(v)
case OpPPC64MOVHZreg:
- return rewriteValuePPC64_OpPPC64MOVHZreg_0(v)
+ return rewriteValuePPC64_OpPPC64MOVHZreg_0(v) || rewriteValuePPC64_OpPPC64MOVHZreg_10(v)
case OpPPC64MOVHload:
return rewriteValuePPC64_OpPPC64MOVHload_0(v)
+ case OpPPC64MOVHloadidx:
+ return rewriteValuePPC64_OpPPC64MOVHloadidx_0(v)
case OpPPC64MOVHreg:
- return rewriteValuePPC64_OpPPC64MOVHreg_0(v)
+ return rewriteValuePPC64_OpPPC64MOVHreg_0(v) || rewriteValuePPC64_OpPPC64MOVHreg_10(v)
case OpPPC64MOVHstore:
return rewriteValuePPC64_OpPPC64MOVHstore_0(v)
+ case OpPPC64MOVHstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v)
case OpPPC64MOVHstorezero:
return rewriteValuePPC64_OpPPC64MOVHstorezero_0(v)
case OpPPC64MOVWBRstore:
return rewriteValuePPC64_OpPPC64MOVWBRstore_0(v)
case OpPPC64MOVWZload:
return rewriteValuePPC64_OpPPC64MOVWZload_0(v)
+ case OpPPC64MOVWZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVWZloadidx_0(v)
case OpPPC64MOVWZreg:
- return rewriteValuePPC64_OpPPC64MOVWZreg_0(v)
+ return rewriteValuePPC64_OpPPC64MOVWZreg_0(v) || rewriteValuePPC64_OpPPC64MOVWZreg_10(v) || rewriteValuePPC64_OpPPC64MOVWZreg_20(v)
case OpPPC64MOVWload:
return rewriteValuePPC64_OpPPC64MOVWload_0(v)
+ case OpPPC64MOVWloadidx:
+ return rewriteValuePPC64_OpPPC64MOVWloadidx_0(v)
case OpPPC64MOVWreg:
- return rewriteValuePPC64_OpPPC64MOVWreg_0(v)
+ return rewriteValuePPC64_OpPPC64MOVWreg_0(v) || rewriteValuePPC64_OpPPC64MOVWreg_10(v)
case OpPPC64MOVWstore:
return rewriteValuePPC64_OpPPC64MOVWstore_0(v)
+ case OpPPC64MOVWstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v)
case OpPPC64MOVWstorezero:
return rewriteValuePPC64_OpPPC64MOVWstorezero_0(v)
case OpPPC64MTVSRD:
@@ -544,7 +566,7 @@ func rewriteValuePPC64(v *Value) bool {
case OpRsh32Ux32:
return rewriteValuePPC64_OpRsh32Ux32_0(v)
case OpRsh32Ux64:
- return rewriteValuePPC64_OpRsh32Ux64_0(v)
+ return rewriteValuePPC64_OpRsh32Ux64_0(v) || rewriteValuePPC64_OpRsh32Ux64_10(v)
case OpRsh32Ux8:
return rewriteValuePPC64_OpRsh32Ux8_0(v)
case OpRsh32x16:
@@ -552,7 +574,7 @@ func rewriteValuePPC64(v *Value) bool {
case OpRsh32x32:
return rewriteValuePPC64_OpRsh32x32_0(v)
case OpRsh32x64:
- return rewriteValuePPC64_OpRsh32x64_0(v)
+ return rewriteValuePPC64_OpRsh32x64_0(v) || rewriteValuePPC64_OpRsh32x64_10(v)
case OpRsh32x8:
return rewriteValuePPC64_OpRsh32x8_0(v)
case OpRsh64Ux16:
@@ -560,7 +582,7 @@ func rewriteValuePPC64(v *Value) bool {
case OpRsh64Ux32:
return rewriteValuePPC64_OpRsh64Ux32_0(v)
case OpRsh64Ux64:
- return rewriteValuePPC64_OpRsh64Ux64_0(v)
+ return rewriteValuePPC64_OpRsh64Ux64_0(v) || rewriteValuePPC64_OpRsh64Ux64_10(v)
case OpRsh64Ux8:
return rewriteValuePPC64_OpRsh64Ux8_0(v)
case OpRsh64x16:
@@ -568,7 +590,7 @@ func rewriteValuePPC64(v *Value) bool {
case OpRsh64x32:
return rewriteValuePPC64_OpRsh64x32_0(v)
case OpRsh64x64:
- return rewriteValuePPC64_OpRsh64x64_0(v)
+ return rewriteValuePPC64_OpRsh64x64_0(v) || rewriteValuePPC64_OpRsh64x64_10(v)
case OpRsh64x8:
return rewriteValuePPC64_OpRsh64x8_0(v)
case OpRsh8Ux16:
@@ -3070,6 +3092,21 @@ func rewriteValuePPC64_OpLsh16x16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh16x16 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
@@ -3136,6 +3173,21 @@ func rewriteValuePPC64_OpLsh16x32_0(v *Value) bool {
return true
}
// match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh16x32 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
@@ -3219,6 +3271,21 @@ func rewriteValuePPC64_OpLsh16x64_0(v *Value) bool {
return true
}
// match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh16x64 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
@@ -3245,6 +3312,21 @@ func rewriteValuePPC64_OpLsh16x8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh16x8 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
@@ -3273,6 +3355,21 @@ func rewriteValuePPC64_OpLsh32x16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh32x16 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
@@ -3339,6 +3436,21 @@ func rewriteValuePPC64_OpLsh32x32_0(v *Value) bool {
return true
}
// match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh32x32 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
@@ -3421,6 +3533,21 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (Lsh32x64 x (AND y (MOVDconst [31])))
// cond:
// result: (SLW x (ANDconst <typ.Int32> [31] y))
@@ -3527,6 +3654,21 @@ func rewriteValuePPC64_OpLsh32x8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh32x8 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
@@ -3555,6 +3697,21 @@ func rewriteValuePPC64_OpLsh64x16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh64x16 x y)
// cond:
// result: (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
@@ -3621,6 +3778,21 @@ func rewriteValuePPC64_OpLsh64x32_0(v *Value) bool {
return true
}
// match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh64x32 x y)
// cond:
// result: (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
@@ -3703,6 +3875,21 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (Lsh64x64 x (AND y (MOVDconst [63])))
// cond:
// result: (SLD x (ANDconst <typ.Int64> [63] y))
@@ -3809,6 +3996,21 @@ func rewriteValuePPC64_OpLsh64x8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh64x8 x y)
// cond:
// result: (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
@@ -3837,6 +4039,21 @@ func rewriteValuePPC64_OpLsh8x16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh8x16 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
@@ -3903,6 +4120,21 @@ func rewriteValuePPC64_OpLsh8x32_0(v *Value) bool {
return true
}
// match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh8x32 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
@@ -3986,6 +4218,21 @@ func rewriteValuePPC64_OpLsh8x64_0(v *Value) bool {
return true
}
// match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh8x64 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
@@ -4012,6 +4259,21 @@ func rewriteValuePPC64_OpLsh8x8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Lsh8x8 x y)
// cond:
// result: (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
@@ -4569,6 +4831,20 @@ func rewriteValuePPC64_OpMul64F_0(v *Value) bool {
return true
}
}
+func rewriteValuePPC64_OpMul64uhilo_0(v *Value) bool {
+ // match: (Mul64uhilo x y)
+ // cond:
+ // result: (LoweredMuluhilo x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ v.reset(OpPPC64LoweredMuluhilo)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+}
func rewriteValuePPC64_OpMul8_0(v *Value) bool {
// match: (Mul8 x y)
// cond:
@@ -5533,6 +5809,95 @@ func rewriteValuePPC64_OpPPC64AND_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (AND (MOVDconst [c]) y:(MOVWZreg _))
+ // cond: c&0xFFFFFFFF == 0xFFFFFFFF
+ // result: y
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ y := v.Args[1]
+ if y.Op != OpPPC64MOVWZreg {
+ break
+ }
+ if !(c&0xFFFFFFFF == 0xFFFFFFFF) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND y:(MOVWZreg _) (MOVDconst [c]))
+ // cond: c&0xFFFFFFFF == 0xFFFFFFFF
+ // result: y
+ for {
+ _ = v.Args[1]
+ y := v.Args[0]
+ if y.Op != OpPPC64MOVWZreg {
+ break
+ }
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ if !(c&0xFFFFFFFF == 0xFFFFFFFF) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
+ // match: (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x))
+ // cond:
+ // result: (MOVWZreg x)
+ for {
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ if v_0.AuxInt != 0xFFFFFFFF {
+ break
+ }
+ y := v.Args[1]
+ if y.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (AND y:(MOVWreg x) (MOVDconst [0xFFFFFFFF]))
+ // cond:
+ // result: (MOVWZreg x)
+ for {
+ _ = v.Args[1]
+ y := v.Args[0]
+ if y.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := y.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ if v_1.AuxInt != 0xFFFFFFFF {
+ break
+ }
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64AND_10(v *Value) bool {
// match: (AND (MOVDconst [c]) x:(MOVBZload _ _))
// cond:
// result: (ANDconst [c&0xFF] x)
@@ -5673,6 +6038,22 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (ANDconst [0xFF] y:(MOVBreg _))
+ // cond:
+ // result: y
+ for {
+ if v.AuxInt != 0xFF {
+ break
+ }
+ y := v.Args[0]
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = y.Type
+ v.AddArg(y)
+ return true
+ }
// match: (ANDconst [c] y:(MOVHZreg _))
// cond: c&0xFFFF == 0xFFFF
// result: y
@@ -5690,16 +6071,15 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool {
v.AddArg(y)
return true
}
- // match: (ANDconst [c] y:(MOVWZreg _))
- // cond: c&0xFFFFFFFF == 0xFFFFFFFF
+ // match: (ANDconst [0xFFFF] y:(MOVHreg _))
+ // cond:
// result: y
for {
- c := v.AuxInt
- y := v.Args[0]
- if y.Op != OpPPC64MOVWZreg {
+ if v.AuxInt != 0xFFFF {
break
}
- if !(c&0xFFFFFFFF == 0xFFFFFFFF) {
+ y := v.Args[0]
+ if y.Op != OpPPC64MOVHreg {
break
}
v.reset(OpCopy)
@@ -5707,6 +6087,21 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (ANDconst [c] (MOVBreg x))
+ // cond:
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = c & 0xFF
+ v.AddArg(x)
+ return true
+ }
// match: (ANDconst [c] (MOVBZreg x))
// cond:
// result: (ANDconst [c&0xFF] x)
@@ -5722,6 +6117,24 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (ANDconst [c] (MOVHreg x))
+ // cond:
+ // result: (ANDconst [c&0xFFFF] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = c & 0xFFFF
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ANDconst_10(v *Value) bool {
// match: (ANDconst [c] (MOVHZreg x))
// cond:
// result: (ANDconst [c&0xFFFF] x)
@@ -5737,6 +6150,21 @@ func rewriteValuePPC64_OpPPC64ANDconst_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (ANDconst [c] (MOVWreg x))
+ // cond:
+ // result: (ANDconst [c&0xFFFFFFFF] x)
+ for {
+ c := v.AuxInt
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = c & 0xFFFFFFFF
+ v.AddArg(x)
+ return true
+ }
// match: (ANDconst [c] (MOVWZreg x))
// cond:
// result: (ANDconst [c&0xFFFFFFFF] x)
@@ -6257,7 +6685,7 @@ func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool {
func rewriteValuePPC64_OpPPC64FABS_0(v *Value) bool {
// match: (FABS (FMOVDconst [x]))
// cond:
- // result: (FMOVDconst [f2i(math.Abs(i2f(x)))])
+ // result: (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FMOVDconst {
@@ -6265,7 +6693,7 @@ func rewriteValuePPC64_OpPPC64FABS_0(v *Value) bool {
}
x := v_0.AuxInt
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = f2i(math.Abs(i2f(x)))
+ v.AuxInt = auxFrom64F(math.Abs(auxTo64F(x)))
return true
}
return false
@@ -6355,7 +6783,7 @@ func rewriteValuePPC64_OpPPC64FADDS_0(v *Value) bool {
func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool {
// match: (FCEIL (FMOVDconst [x]))
// cond:
- // result: (FMOVDconst [f2i(math.Ceil(i2f(x)))])
+ // result: (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FMOVDconst {
@@ -6363,7 +6791,7 @@ func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool {
}
x := v_0.AuxInt
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = f2i(math.Ceil(i2f(x)))
+ v.AuxInt = auxFrom64F(math.Ceil(auxTo64F(x)))
return true
}
return false
@@ -6371,7 +6799,7 @@ func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool {
func rewriteValuePPC64_OpPPC64FFLOOR_0(v *Value) bool {
// match: (FFLOOR (FMOVDconst [x]))
// cond:
- // result: (FMOVDconst [f2i(math.Floor(i2f(x)))])
+ // result: (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FMOVDconst {
@@ -6379,7 +6807,7 @@ func rewriteValuePPC64_OpPPC64FFLOOR_0(v *Value) bool {
}
x := v_0.AuxInt
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = f2i(math.Floor(i2f(x)))
+ v.AuxInt = auxFrom64F(math.Floor(auxTo64F(x)))
return true
}
return false
@@ -6681,7 +7109,7 @@ func rewriteValuePPC64_OpPPC64FNEG_0(v *Value) bool {
func rewriteValuePPC64_OpPPC64FSQRT_0(v *Value) bool {
// match: (FSQRT (FMOVDconst [x]))
// cond:
- // result: (FMOVDconst [f2i(math.Sqrt(i2f(x)))])
+ // result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FMOVDconst {
@@ -6689,7 +7117,7 @@ func rewriteValuePPC64_OpPPC64FSQRT_0(v *Value) bool {
}
x := v_0.AuxInt
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = f2i(math.Sqrt(i2f(x)))
+ v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x)))
return true
}
return false
@@ -6741,7 +7169,7 @@ func rewriteValuePPC64_OpPPC64FSUBS_0(v *Value) bool {
func rewriteValuePPC64_OpPPC64FTRUNC_0(v *Value) bool {
// match: (FTRUNC (FMOVDconst [x]))
// cond:
- // result: (FMOVDconst [f2i(math.Trunc(i2f(x)))])
+ // result: (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpPPC64FMOVDconst {
@@ -6749,7 +7177,7 @@ func rewriteValuePPC64_OpPPC64FTRUNC_0(v *Value) bool {
}
x := v_0.AuxInt
v.reset(OpPPC64FMOVDconst)
- v.AuxInt = f2i(math.Trunc(i2f(x)))
+ v.AuxInt = auxFrom64F(math.Trunc(auxTo64F(x)))
return true
}
return false
@@ -7058,9 +7486,84 @@ func rewriteValuePPC64_OpPPC64MOVBZload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVBZloadidx ptr idx mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBZloadidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZloadidx_0(v *Value) bool {
+ // match: (MOVBZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVBZload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVBZload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVBZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFF
// result: y
@@ -7078,6 +7581,81 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (MOVBZreg (SRWconst [c] (MOVBZreg x)))
+ // cond:
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) == 8
+ // result: (SRWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) == 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (SRDconst [c] x))
+ // cond: c>=56
+ // result: (SRDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c >= 56) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] x))
+ // cond: c>=24
+ // result: (SRWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c >= 24) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
// match: (MOVBZreg y:(MOVBZreg _))
// cond:
// result: y
@@ -7118,6 +7696,40 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVBZreg x:(MOVBZloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg x:(Arg <t>))
+ // cond: is8BitInt(t) && !isSigned(t)
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZreg_10(v *Value) bool {
// match: (MOVBZreg (MOVDconst [c]))
// cond:
// result: (MOVDconst [int64(uint8(c))])
@@ -7134,6 +7746,10 @@ func rewriteValuePPC64_OpPPC64MOVBZreg_0(v *Value) bool {
return false
}
func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVBreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0x7F
// result: y
@@ -7151,6 +7767,117 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (MOVBreg (SRAWconst [c] (MOVBreg x)))
+ // cond:
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) == 8
+ // result: (SRAWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) == 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRDconst [c] x))
+ // cond: c>56
+ // result: (SRDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c > 56) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRDconst [c] x))
+ // cond: c==56
+ // result: (SRADconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c == 56) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRWconst [c] x))
+ // cond: c>24
+ // result: (SRWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c > 24) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRWconst [c] x))
+ // cond: c==24
+ // result: (SRAWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c == 24) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
// match: (MOVBreg y:(MOVBreg _))
// cond:
// result: y
@@ -7177,6 +7904,26 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVBreg x:(Arg <t>))
+ // cond: is8BitInt(t) && isSigned(t)
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBreg_10(v *Value) bool {
// match: (MOVBreg (MOVDconst [c]))
// cond:
// result: (MOVDconst [int64(int8(c))])
@@ -7193,10 +7940,6 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool {
return false
}
func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool {
- b := v.Block
- _ = b
- typ := &b.Func.Config.Types
- _ = typ
// match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
// cond: is16Bit(off1+off2)
// result: (MOVBstore [off1+off2] {sym} x val mem)
@@ -7250,8 +7993,8 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
- // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // cond:
// result: (MOVBstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
@@ -7262,11 +8005,10 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool {
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
+ if v_1.AuxInt != 0 {
break
}
+ mem := v.Args[2]
v.reset(OpPPC64MOVBstorezero)
v.AuxInt = off
v.Aux = sym
@@ -7274,6 +8016,32 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVBstore [off] {sym} p:(ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil && p.Uses == 1
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(off == 0 && sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
// cond:
// result: (MOVBstore [off] {sym} ptr x mem)
@@ -7406,6 +8174,15 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHreg x) [c]) mem)
// cond: c <= 8
// result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
@@ -7439,15 +8216,6 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
- typ := &b.Func.Config.Types
- _ = typ
// match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHZreg x) [c]) mem)
// cond: c <= 8
// result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
@@ -7873,6 +8641,15 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool {
v.AddArg(mem)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool {
+ b := v.Block
+ _ = b
+ config := b.Func.Config
+ _ = config
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVBstore [i7] {s} p (SRDconst w [56]) x0:(MOVBstore [i6] {s} p (SRDconst w [48]) x1:(MOVBstore [i5] {s} p (SRDconst w [40]) x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVWstore [i0] {s} p w mem)))))
// cond: !config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
// result: (MOVDstore [i0] {s} p w mem)
@@ -7982,15 +8759,6 @@ func rewriteValuePPC64_OpPPC64MOVBstore_10(v *Value) bool {
v.AddArg(mem)
return true
}
- return false
-}
-func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool {
- b := v.Block
- _ = b
- config := b.Func.Config
- _ = config
- typ := &b.Func.Config.Types
- _ = typ
// match: (MOVBstore [i7] {s} p w x0:(MOVBstore [i6] {s} p (SRDconst w [8]) x1:(MOVBstore [i5] {s} p (SRDconst w [16]) x2:(MOVBstore [i4] {s} p (SRDconst w [24]) x3:(MOVBstore [i3] {s} p (SRDconst w [32]) x4:(MOVBstore [i2] {s} p (SRDconst w [40]) x5:(MOVBstore [i1] {s} p (SRDconst w [48]) x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
// cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
// result: (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
@@ -8170,6 +8938,350 @@ func rewriteValuePPC64_OpPPC64MOVBstore_20(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64MOVBstoreidx_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVBstore [c] ptr val mem)
+ for {
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVBstore [c] ptr val mem)
+ for {
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (MOVBreg x) mem)
+ // cond:
+ // result: (MOVBstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (MOVBZreg x) mem)
+ // cond:
+ // result: (MOVBstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (MOVHreg x) mem)
+ // cond:
+ // result: (MOVBstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (MOVHZreg x) mem)
+ // cond:
+ // result: (MOVBstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (MOVWreg x) mem)
+ // cond:
+ // result: (MOVBstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (MOVWZreg x) mem)
+ // cond:
+ // result: (MOVBstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVHreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v.Args[3]
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVHZreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v.Args[3]
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstoreidx_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
+ // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v.Args[3]
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOVWZreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_2.AuxInt
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v.Args[3]
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = c
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MOVBstorezero_0(v *Value) bool {
// match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
@@ -8299,6 +9411,77 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVDload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVDloadidx ptr idx mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDloadidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDloadidx_0(v *Value) bool {
+ // match: (MOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVDload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVDload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
@@ -8377,8 +9560,8 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
- // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // cond:
// result: (MOVDstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
@@ -8389,11 +9572,10 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
+ if v_1.AuxInt != 0 {
break
}
+ mem := v.Args[2]
v.reset(OpPPC64MOVDstorezero)
v.AuxInt = off
v.Aux = sym
@@ -8401,6 +9583,81 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVDstore [off] {sym} p:(ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil && p.Uses == 1
+ // result: (MOVDstoreidx ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(off == 0 && sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDstoreidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstoreidx_0(v *Value) bool {
+ // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVDstore [c] ptr val mem)
+ for {
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVDstore [c] ptr val mem)
+ for {
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool {
@@ -8588,9 +9845,84 @@ func rewriteValuePPC64_OpPPC64MOVHZload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVHZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHZloadidx ptr idx mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHZloadidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZloadidx_0(v *Value) bool {
+ // match: (MOVHZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVHZload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVHZload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVHZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFF
// result: y
@@ -8608,6 +9940,102 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (MOVHZreg (SRWconst [c] (MOVBZreg x)))
+ // cond:
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] (MOVHZreg x)))
+ // cond:
+ // result: (SRWconst [c] (MOVHZreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) <= 16
+ // result: (SRWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (SRDconst [c] x))
+ // cond: c>=48
+ // result: (SRDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c >= 48) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] x))
+ // cond: c>=16
+ // result: (SRWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
// match: (MOVHZreg y:(MOVHZreg _))
// cond:
// result: y
@@ -8661,6 +10089,37 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool {
v.AddArg(x)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZreg_10(v *Value) bool {
+ // match: (MOVHZreg x:(MOVBZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ _ = x.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
// match: (MOVHZreg x:(MOVHZload _ _))
// cond:
// result: x
@@ -8675,6 +10134,37 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVHZreg x:(MOVHZloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t)
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
// match: (MOVHZreg (MOVDconst [c]))
// cond:
// result: (MOVDconst [int64(uint16(c))])
@@ -8740,9 +10230,84 @@ func rewriteValuePPC64_OpPPC64MOVHload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVHload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHloadidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHloadidx_0(v *Value) bool {
+ // match: (MOVHloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVHload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVHload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVHreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0x7FFF
// result: y
@@ -8760,6 +10325,138 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (MOVHreg (SRAWconst [c] (MOVBreg x)))
+ // cond:
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] (MOVHreg x)))
+ // cond:
+ // result: (SRAWconst [c] (MOVHreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) <= 16
+ // result: (SRAWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRDconst [c] x))
+ // cond: c>48
+ // result: (SRDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c > 48) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRDconst [c] x))
+ // cond: c==48
+ // result: (SRADconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c == 48) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRWconst [c] x))
+ // cond: c>16
+ // result: (SRWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c > 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRWconst [c] x))
+ // cond: c==16
+ // result: (SRAWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c == 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
// match: (MOVHreg y:(MOVHreg _))
// cond:
// result: y
@@ -8786,6 +10483,9 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHreg_10(v *Value) bool {
// match: (MOVHreg y:(MOVHZreg x))
// cond:
// result: (MOVHreg x)
@@ -8813,6 +10513,37 @@ func rewriteValuePPC64_OpPPC64MOVHreg_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVHreg x:(MOVHloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVHloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t)
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
// match: (MOVHreg (MOVDconst [c]))
// cond:
// result: (MOVDconst [int64(int16(c))])
@@ -8886,8 +10617,8 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
- // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // cond:
// result: (MOVHstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
@@ -8898,11 +10629,10 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool {
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
+ if v_1.AuxInt != 0 {
break
}
+ mem := v.Args[2]
v.reset(OpPPC64MOVHstorezero)
v.AuxInt = off
v.Aux = sym
@@ -8910,6 +10640,32 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVHstore [off] {sym} p:(ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil && p.Uses == 1
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(off == 0 && sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
// cond:
// result: (MOVHstore [off] {sym} ptr x mem)
@@ -9086,6 +10842,151 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64MOVHstoreidx_0(v *Value) bool {
+ // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVHstore [c] ptr val mem)
+ for {
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVHstore [c] ptr val mem)
+ for {
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreidx [off] {sym} ptr idx (MOVHreg x) mem)
+ // cond:
+ // result: (MOVHstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreidx [off] {sym} ptr idx (MOVHZreg x) mem)
+ // cond:
+ // result: (MOVHstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreidx [off] {sym} ptr idx (MOVWreg x) mem)
+ // cond:
+ // result: (MOVHstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVHstoreidx [off] {sym} ptr idx (MOVWZreg x) mem)
+ // cond:
+ // result: (MOVHstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MOVHstorezero_0(v *Value) bool {
// match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
@@ -9231,9 +11132,84 @@ func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWZloadidx ptr idx mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWZloadidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZloadidx_0(v *Value) bool {
+ // match: (MOVWZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVWZload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVWZload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVWZreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFFFFFF
// result: y
@@ -9295,6 +11271,105 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (MOVWZreg (SRWconst [c] (MOVBZreg x)))
+ // cond:
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVHZreg x)))
+ // cond:
+ // result: (SRWconst [c] (MOVHZreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVWZreg x)))
+ // cond:
+ // result: (SRWconst [c] (MOVWZreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) <= 32
+ // result: (SRWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (SRDconst [c] x))
+ // cond: c>=32
+ // result: (SRDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
// match: (MOVWZreg y:(MOVWZreg _))
// cond:
// result: y
@@ -9321,6 +11396,9 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZreg_10(v *Value) bool {
// match: (MOVWZreg y:(MOVBZreg _))
// cond:
// result: y
@@ -9375,6 +11453,123 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVWZreg x:(MOVBZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ _ = x.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ _ = x.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVWZload {
+ break
+ }
+ _ = x.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVWZloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZreg_20(v *Value) bool {
+ // match: (MOVWZreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64(uint32(c))
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool {
@@ -9427,9 +11622,84 @@ func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if v.AuxInt != 0 {
+ break
+ }
+ sym := v.Aux
+ _ = v.Args[1]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ mem := v.Args[1]
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWloadidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWloadidx_0(v *Value) bool {
+ // match: (MOVWloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVWload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVWload [c] ptr mem)
+ for {
+ _ = v.Args[2]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ mem := v.Args[2]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (MOVWreg y:(ANDconst [c] _))
// cond: uint64(c) <= 0xFFFF
// result: y
@@ -9491,6 +11761,123 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ // match: (MOVWreg (SRAWconst [c] (MOVBreg x)))
+ // cond:
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVHreg x)))
+ // cond:
+ // result: (SRAWconst [c] (MOVHreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVWreg x)))
+ // cond:
+ // result: (SRAWconst [c] (MOVWreg x))
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) <= 32
+ // result: (SRAWconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRDconst [c] x))
+ // cond: c>32
+ // result: (SRDconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c > 32) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRDconst [c] x))
+ // cond: c==32
+ // result: (SRADconst [c] x)
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := v_0.AuxInt
+ x := v_0.Args[0]
+ if !(c == 32) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = c
+ v.AddArg(x)
+ return true
+ }
// match: (MOVWreg y:(MOVWreg _))
// cond:
// result: y
@@ -9504,6 +11891,9 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool {
v.AddArg(y)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWreg_10(v *Value) bool {
// match: (MOVWreg y:(MOVHreg _))
// cond:
// result: y
@@ -9543,6 +11933,92 @@ func rewriteValuePPC64_OpPPC64MOVWreg_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVWreg x:(MOVHload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVHload {
+ break
+ }
+ _ = x.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVHloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVWload {
+ break
+ }
+ _ = x.Args[1]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx _ _ _))
+ // cond:
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpPPC64MOVWloadidx {
+ break
+ }
+ _ = x.Args[2]
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)
+ // result: x
+ for {
+ x := v.Args[0]
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // cond:
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64(int32(c))
+ return true
+ }
return false
}
func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool {
@@ -9599,8 +12075,8 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
- // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
- // cond: c == 0
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // cond:
// result: (MOVWstorezero [off] {sym} ptr mem)
for {
off := v.AuxInt
@@ -9611,11 +12087,10 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool {
if v_1.Op != OpPPC64MOVDconst {
break
}
- c := v_1.AuxInt
- mem := v.Args[2]
- if !(c == 0) {
+ if v_1.AuxInt != 0 {
break
}
+ mem := v.Args[2]
v.reset(OpPPC64MOVWstorezero)
v.AuxInt = off
v.Aux = sym
@@ -9623,6 +12098,32 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool {
v.AddArg(mem)
return true
}
+ // match: (MOVWstore [off] {sym} p:(ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil && p.Uses == 1
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[2]
+ p := v.Args[0]
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ _ = p.Args[1]
+ ptr := p.Args[0]
+ idx := p.Args[1]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(off == 0 && sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
// match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
// cond:
// result: (MOVWstore [off] {sym} ptr x mem)
@@ -9669,6 +12170,103 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool {
}
return false
}
+func rewriteValuePPC64_OpPPC64MOVWstoreidx_0(v *Value) bool {
+ // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_1.AuxInt
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ _ = v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := v_0.AuxInt
+ ptr := v.Args[1]
+ val := v.Args[2]
+ mem := v.Args[3]
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = c
+ v.AddArg(ptr)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx [off] {sym} ptr idx (MOVWreg x) mem)
+ // cond:
+ // result: (MOVWstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVWstoreidx [off] {sym} ptr idx (MOVWZreg x) mem)
+ // cond:
+ // result: (MOVWstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ _ = v.Args[3]
+ ptr := v.Args[0]
+ idx := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v.Args[3]
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(idx)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MOVWstorezero_0(v *Value) bool {
// match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
// cond: is16Bit(off1+off2)
@@ -24104,6 +26702,23 @@ func rewriteValuePPC64_OpRsh16Ux16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh16Ux16 x y)
// cond:
// result: (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
@@ -24176,6 +26791,23 @@ func rewriteValuePPC64_OpRsh16Ux32_0(v *Value) bool {
return true
}
// match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
// cond:
// result: (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
@@ -24265,6 +26897,23 @@ func rewriteValuePPC64_OpRsh16Ux64_0(v *Value) bool {
return true
}
// match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
// cond:
// result: (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
@@ -24293,6 +26942,23 @@ func rewriteValuePPC64_OpRsh16Ux8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh16Ux8 x y)
// cond:
// result: (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
@@ -24323,6 +26989,23 @@ func rewriteValuePPC64_OpRsh16x16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh16x16 x y)
// cond:
// result: (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
for {
@@ -24395,6 +27078,23 @@ func rewriteValuePPC64_OpRsh16x32_0(v *Value) bool {
return true
}
// match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh16x32 x y)
// cond:
// result: (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
for {
@@ -24488,6 +27188,23 @@ func rewriteValuePPC64_OpRsh16x64_0(v *Value) bool {
return true
}
// match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh16x64 x y)
// cond:
// result: (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
for {
@@ -24516,6 +27233,23 @@ func rewriteValuePPC64_OpRsh16x8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh16x8 x y)
// cond:
// result: (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
for {
@@ -24546,6 +27280,21 @@ func rewriteValuePPC64_OpRsh32Ux16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh32Ux16 x y)
// cond:
// result: (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
@@ -24612,6 +27361,21 @@ func rewriteValuePPC64_OpRsh32Ux32_0(v *Value) bool {
return true
}
// match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
// cond:
// result: (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
@@ -24694,6 +27458,21 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (Rsh32Ux64 x (AND y (MOVDconst [31])))
// cond:
// result: (SRW x (ANDconst <typ.Int32> [31] y))
@@ -24918,6 +27697,13 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool {
v.AddArg(v0)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpRsh32Ux64_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32Ux64 x y)
// cond:
// result: (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
@@ -24945,6 +27731,21 @@ func rewriteValuePPC64_OpRsh32Ux8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh32Ux8 x y)
// cond:
// result: (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
@@ -24973,6 +27774,21 @@ func rewriteValuePPC64_OpRsh32x16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh32x16 x y)
// cond:
// result: (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
for {
@@ -25039,6 +27855,21 @@ func rewriteValuePPC64_OpRsh32x32_0(v *Value) bool {
return true
}
// match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh32x32 x y)
// cond:
// result: (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
for {
@@ -25123,6 +27954,21 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (Rsh32x64 x (AND y (MOVDconst [31])))
// cond:
// result: (SRAW x (ANDconst <typ.Int32> [31] y))
@@ -25347,6 +28193,13 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool {
v.AddArg(v0)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpRsh32x64_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh32x64 x y)
// cond:
// result: (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
@@ -25374,6 +28227,21 @@ func rewriteValuePPC64_OpRsh32x8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh32x8 x y)
// cond:
// result: (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
for {
@@ -25402,6 +28270,21 @@ func rewriteValuePPC64_OpRsh64Ux16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh64Ux16 x y)
// cond:
// result: (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
@@ -25468,6 +28351,21 @@ func rewriteValuePPC64_OpRsh64Ux32_0(v *Value) bool {
return true
}
// match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
// cond:
// result: (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
@@ -25550,6 +28448,21 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (Rsh64Ux64 x (AND y (MOVDconst [63])))
// cond:
// result: (SRD x (ANDconst <typ.Int64> [63] y))
@@ -25774,6 +28687,13 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool {
v.AddArg(v0)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpRsh64Ux64_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64Ux64 x y)
// cond:
// result: (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
@@ -25801,6 +28721,21 @@ func rewriteValuePPC64_OpRsh64Ux8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh64Ux8 x y)
// cond:
// result: (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
@@ -25829,6 +28764,21 @@ func rewriteValuePPC64_OpRsh64x16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh64x16 x y)
// cond:
// result: (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
for {
@@ -25895,6 +28845,21 @@ func rewriteValuePPC64_OpRsh64x32_0(v *Value) bool {
return true
}
// match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh64x32 x y)
// cond:
// result: (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
for {
@@ -25979,6 +28944,21 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
// match: (Rsh64x64 x (AND y (MOVDconst [63])))
// cond:
// result: (SRAD x (ANDconst <typ.Int64> [63] y))
@@ -26203,6 +29183,13 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool {
v.AddArg(v0)
return true
}
+ return false
+}
+func rewriteValuePPC64_OpRsh64x64_10(v *Value) bool {
+ b := v.Block
+ _ = b
+ typ := &b.Func.Config.Types
+ _ = typ
// match: (Rsh64x64 x y)
// cond:
// result: (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
@@ -26230,6 +29217,21 @@ func rewriteValuePPC64_OpRsh64x8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg(x)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh64x8 x y)
// cond:
// result: (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
for {
@@ -26258,6 +29260,23 @@ func rewriteValuePPC64_OpRsh8Ux16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh8Ux16 x y)
// cond:
// result: (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
@@ -26330,6 +29349,23 @@ func rewriteValuePPC64_OpRsh8Ux32_0(v *Value) bool {
return true
}
// match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
// cond:
// result: (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
@@ -26419,6 +29455,23 @@ func rewriteValuePPC64_OpRsh8Ux64_0(v *Value) bool {
return true
}
// match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
// cond:
// result: (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
@@ -26447,6 +29500,23 @@ func rewriteValuePPC64_OpRsh8Ux8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh8Ux8 x y)
// cond:
// result: (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
@@ -26477,6 +29547,23 @@ func rewriteValuePPC64_OpRsh8x16_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh8x16 x y)
// cond:
// result: (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
for {
@@ -26549,6 +29636,23 @@ func rewriteValuePPC64_OpRsh8x32_0(v *Value) bool {
return true
}
// match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh8x32 x y)
// cond:
// result: (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
for {
@@ -26642,6 +29746,23 @@ func rewriteValuePPC64_OpRsh8x64_0(v *Value) bool {
return true
}
// match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh8x64 x y)
// cond:
// result: (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
for {
@@ -26670,6 +29791,23 @@ func rewriteValuePPC64_OpRsh8x8_0(v *Value) bool {
typ := &b.Func.Config.Types
_ = typ
// match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ _ = v.Args[1]
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Rsh8x8 x y)
// cond:
// result: (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
for {
@@ -27043,69 +30181,141 @@ func rewriteValuePPC64_OpTrunc_0(v *Value) bool {
}
func rewriteValuePPC64_OpTrunc16to8_0(v *Value) bool {
// match: (Trunc16to8 x)
- // cond:
+ // cond: isSigned(x.Type)
// result: (MOVBreg x)
for {
x := v.Args[0]
+ if !(isSigned(x.Type)) {
+ break
+ }
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
+ // match: (Trunc16to8 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
}
func rewriteValuePPC64_OpTrunc32to16_0(v *Value) bool {
// match: (Trunc32to16 x)
- // cond:
+ // cond: isSigned(x.Type)
// result: (MOVHreg x)
for {
x := v.Args[0]
+ if !(isSigned(x.Type)) {
+ break
+ }
v.reset(OpPPC64MOVHreg)
v.AddArg(x)
return true
}
+ // match: (Trunc32to16 x)
+ // cond:
+ // result: (MOVHZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
}
func rewriteValuePPC64_OpTrunc32to8_0(v *Value) bool {
// match: (Trunc32to8 x)
- // cond:
+ // cond: isSigned(x.Type)
// result: (MOVBreg x)
for {
x := v.Args[0]
+ if !(isSigned(x.Type)) {
+ break
+ }
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
+ // match: (Trunc32to8 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
}
func rewriteValuePPC64_OpTrunc64to16_0(v *Value) bool {
// match: (Trunc64to16 x)
- // cond:
+ // cond: isSigned(x.Type)
// result: (MOVHreg x)
for {
x := v.Args[0]
+ if !(isSigned(x.Type)) {
+ break
+ }
v.reset(OpPPC64MOVHreg)
v.AddArg(x)
return true
}
+ // match: (Trunc64to16 x)
+ // cond:
+ // result: (MOVHZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
}
func rewriteValuePPC64_OpTrunc64to32_0(v *Value) bool {
// match: (Trunc64to32 x)
- // cond:
+ // cond: isSigned(x.Type)
// result: (MOVWreg x)
for {
x := v.Args[0]
+ if !(isSigned(x.Type)) {
+ break
+ }
v.reset(OpPPC64MOVWreg)
v.AddArg(x)
return true
}
+ // match: (Trunc64to32 x)
+ // cond:
+ // result: (MOVWZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
}
func rewriteValuePPC64_OpTrunc64to8_0(v *Value) bool {
// match: (Trunc64to8 x)
- // cond:
+ // cond: isSigned(x.Type)
// result: (MOVBreg x)
for {
x := v.Args[0]
+ if !(isSigned(x.Type)) {
+ break
+ }
v.reset(OpPPC64MOVBreg)
v.AddArg(x)
return true
}
+ // match: (Trunc64to8 x)
+ // cond:
+ // result: (MOVBZreg x)
+ for {
+ x := v.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
}
func rewriteValuePPC64_OpWB_0(v *Value) bool {
// match: (WB {fn} destptr srcptr mem)
diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go
index c07651ef0e..b92556db90 100644
--- a/src/cmd/compile/internal/ssa/rewriteWasm.go
+++ b/src/cmd/compile/internal/ssa/rewriteWasm.go
@@ -5071,7 +5071,7 @@ func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool {
_ = typ
// match: (F64Add (F64Const [x]) (F64Const [y]))
// cond:
- // result: (F64Const [f2i(i2f(x) + i2f(y))])
+ // result: (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -5085,7 +5085,7 @@ func rewriteValueWasm_OpWasmF64Add_0(v *Value) bool {
}
y := v_1.AuxInt
v.reset(OpWasmF64Const)
- v.AuxInt = f2i(i2f(x) + i2f(y))
+ v.AuxInt = auxFrom64F(auxTo64F(x) + auxTo64F(y))
return true
}
// match: (F64Add (F64Const [x]) y)
@@ -5115,7 +5115,7 @@ func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool {
_ = typ
// match: (F64Mul (F64Const [x]) (F64Const [y]))
// cond:
- // result: (F64Const [f2i(i2f(x) * i2f(y))])
+ // result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -5129,7 +5129,7 @@ func rewriteValueWasm_OpWasmF64Mul_0(v *Value) bool {
}
y := v_1.AuxInt
v.reset(OpWasmF64Const)
- v.AuxInt = f2i(i2f(x) * i2f(y))
+ v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y))
return true
}
// match: (F64Mul (F64Const [x]) y)
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index 81bebede46..2f239faa49 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -2409,7 +2409,7 @@ func rewriteValuegeneric_OpAdd32_30(v *Value) bool {
func rewriteValuegeneric_OpAdd32F_0(v *Value) bool {
// match: (Add32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))])
+ // result: (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -2423,12 +2423,12 @@ func rewriteValuegeneric_OpAdd32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(i2f32(c) + i2f32(d)))
+ v.AuxInt = auxFrom32F(auxTo32F(c) + auxTo32F(d))
return true
}
// match: (Add32F (Const32F [d]) (Const32F [c]))
// cond:
- // result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))])
+ // result: (Const32F [auxFrom32F(auxTo32F(c) + auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -2442,43 +2442,7 @@ func rewriteValuegeneric_OpAdd32F_0(v *Value) bool {
}
c := v_1.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(i2f32(c) + i2f32(d)))
- return true
- }
- // match: (Add32F x (Const32F [0]))
- // cond:
- // result: x
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst32F {
- break
- }
- if v_1.AuxInt != 0 {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (Add32F (Const32F [0]) x)
- // cond:
- // result: x
- for {
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpConst32F {
- break
- }
- if v_0.AuxInt != 0 {
- break
- }
- x := v.Args[1]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.AuxInt = auxFrom32F(auxTo32F(c) + auxTo32F(d))
return true
}
return false
@@ -3454,7 +3418,7 @@ func rewriteValuegeneric_OpAdd64_30(v *Value) bool {
func rewriteValuegeneric_OpAdd64F_0(v *Value) bool {
// match: (Add64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (Const64F [f2i(i2f(c) + i2f(d))])
+ // result: (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -3468,12 +3432,12 @@ func rewriteValuegeneric_OpAdd64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(i2f(c) + i2f(d))
+ v.AuxInt = auxFrom64F(auxTo64F(c) + auxTo64F(d))
return true
}
// match: (Add64F (Const64F [d]) (Const64F [c]))
// cond:
- // result: (Const64F [f2i(i2f(c) + i2f(d))])
+ // result: (Const64F [auxFrom64F(auxTo64F(c) + auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -3487,43 +3451,7 @@ func rewriteValuegeneric_OpAdd64F_0(v *Value) bool {
}
c := v_1.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(i2f(c) + i2f(d))
- return true
- }
- // match: (Add64F x (Const64F [0]))
- // cond:
- // result: x
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64F {
- break
- }
- if v_1.AuxInt != 0 {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
- return true
- }
- // match: (Add64F (Const64F [0]) x)
- // cond:
- // result: x
- for {
- _ = v.Args[1]
- v_0 := v.Args[0]
- if v_0.Op != OpConst64F {
- break
- }
- if v_0.AuxInt != 0 {
- break
- }
- x := v.Args[1]
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.AuxInt = auxFrom64F(auxTo64F(c) + auxTo64F(d))
return true
}
return false
@@ -7566,7 +7494,7 @@ func rewriteValuegeneric_OpConvert_0(v *Value) bool {
func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool {
// match: (Cvt32Fto32 (Const32F [c]))
// cond:
- // result: (Const32 [int64(int32(i2f(c)))])
+ // result: (Const32 [int64(int32(auxTo32F(c)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32F {
@@ -7574,7 +7502,7 @@ func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst32)
- v.AuxInt = int64(int32(i2f(c)))
+ v.AuxInt = int64(int32(auxTo32F(c)))
return true
}
return false
@@ -7582,7 +7510,7 @@ func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool {
func rewriteValuegeneric_OpCvt32Fto64_0(v *Value) bool {
// match: (Cvt32Fto64 (Const32F [c]))
// cond:
- // result: (Const64 [int64(i2f(c))])
+ // result: (Const64 [int64(auxTo32F(c))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32F {
@@ -7590,7 +7518,7 @@ func rewriteValuegeneric_OpCvt32Fto64_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst64)
- v.AuxInt = int64(i2f(c))
+ v.AuxInt = int64(auxTo32F(c))
return true
}
return false
@@ -7614,7 +7542,7 @@ func rewriteValuegeneric_OpCvt32Fto64F_0(v *Value) bool {
func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool {
// match: (Cvt32to32F (Const32 [c]))
// cond:
- // result: (Const32F [f2i(float64(float32(int32(c))))])
+ // result: (Const32F [auxFrom32F(float32(int32(c)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
@@ -7622,7 +7550,7 @@ func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(float32(int32(c))))
+ v.AuxInt = auxFrom32F(float32(int32(c)))
return true
}
return false
@@ -7630,7 +7558,7 @@ func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool {
func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool {
// match: (Cvt32to64F (Const32 [c]))
// cond:
- // result: (Const64F [f2i(float64(int32(c)))])
+ // result: (Const64F [auxFrom64F(float64(int32(c)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32 {
@@ -7638,7 +7566,7 @@ func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(float64(int32(c)))
+ v.AuxInt = auxFrom64F(float64(int32(c)))
return true
}
return false
@@ -7646,7 +7574,7 @@ func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool {
func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool {
// match: (Cvt64Fto32 (Const64F [c]))
// cond:
- // result: (Const32 [int64(int32(i2f(c)))])
+ // result: (Const32 [int64(int32(auxTo64F(c)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64F {
@@ -7654,7 +7582,7 @@ func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst32)
- v.AuxInt = int64(int32(i2f(c)))
+ v.AuxInt = int64(int32(auxTo64F(c)))
return true
}
return false
@@ -7662,7 +7590,7 @@ func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool {
func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool {
// match: (Cvt64Fto32F (Const64F [c]))
// cond:
- // result: (Const32F [f2i(float64(i2f32(c)))])
+ // result: (Const32F [auxFrom32F(float32(auxTo64F(c)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64F {
@@ -7670,7 +7598,7 @@ func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(i2f32(c)))
+ v.AuxInt = auxFrom32F(float32(auxTo64F(c)))
return true
}
return false
@@ -7678,7 +7606,7 @@ func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool {
func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool {
// match: (Cvt64Fto64 (Const64F [c]))
// cond:
- // result: (Const64 [int64(i2f(c))])
+ // result: (Const64 [int64(auxTo64F(c))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64F {
@@ -7686,7 +7614,7 @@ func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst64)
- v.AuxInt = int64(i2f(c))
+ v.AuxInt = int64(auxTo64F(c))
return true
}
return false
@@ -7694,7 +7622,7 @@ func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool {
func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool {
// match: (Cvt64to32F (Const64 [c]))
// cond:
- // result: (Const32F [f2i(float64(float32(c)))])
+ // result: (Const32F [auxFrom32F(float32(c))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
@@ -7702,7 +7630,7 @@ func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(float32(c)))
+ v.AuxInt = auxFrom32F(float32(c))
return true
}
return false
@@ -7710,7 +7638,7 @@ func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool {
func rewriteValuegeneric_OpCvt64to64F_0(v *Value) bool {
// match: (Cvt64to64F (Const64 [c]))
// cond:
- // result: (Const64F [f2i(float64(c))])
+ // result: (Const64F [auxFrom64F(float64(c))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64 {
@@ -7718,7 +7646,7 @@ func rewriteValuegeneric_OpCvt64to64F_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(float64(c))
+ v.AuxInt = auxFrom64F(float64(c))
return true
}
return false
@@ -8342,7 +8270,7 @@ func rewriteValuegeneric_OpDiv32F_0(v *Value) bool {
_ = b
// match: (Div32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (Const32F [f2i(float64(i2f32(c) / i2f32(d)))])
+ // result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -8356,12 +8284,12 @@ func rewriteValuegeneric_OpDiv32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(i2f32(c) / i2f32(d)))
+ v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d))
return true
}
// match: (Div32F x (Const32F <t> [c]))
- // cond: reciprocalExact32(float32(i2f(c)))
- // result: (Mul32F x (Const32F <t> [f2i(1/i2f(c))]))
+ // cond: reciprocalExact32(auxTo32F(c))
+ // result: (Mul32F x (Const32F <t> [auxFrom32F(1/auxTo32F(c))]))
for {
_ = v.Args[1]
x := v.Args[0]
@@ -8371,13 +8299,13 @@ func rewriteValuegeneric_OpDiv32F_0(v *Value) bool {
}
t := v_1.Type
c := v_1.AuxInt
- if !(reciprocalExact32(float32(i2f(c)))) {
+ if !(reciprocalExact32(auxTo32F(c))) {
break
}
v.reset(OpMul32F)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpConst32F, t)
- v0.AuxInt = f2i(1 / i2f(c))
+ v0.AuxInt = auxFrom32F(1 / auxTo32F(c))
v.AddArg(v0)
return true
}
@@ -8866,7 +8794,7 @@ func rewriteValuegeneric_OpDiv64F_0(v *Value) bool {
_ = b
// match: (Div64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (Const64F [f2i(i2f(c) / i2f(d))])
+ // result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -8880,12 +8808,12 @@ func rewriteValuegeneric_OpDiv64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(i2f(c) / i2f(d))
+ v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d))
return true
}
// match: (Div64F x (Const64F <t> [c]))
- // cond: reciprocalExact64(i2f(c))
- // result: (Mul64F x (Const64F <t> [f2i(1/i2f(c))]))
+ // cond: reciprocalExact64(auxTo64F(c))
+ // result: (Mul64F x (Const64F <t> [auxFrom64F(1/auxTo64F(c))]))
for {
_ = v.Args[1]
x := v.Args[0]
@@ -8895,13 +8823,13 @@ func rewriteValuegeneric_OpDiv64F_0(v *Value) bool {
}
t := v_1.Type
c := v_1.AuxInt
- if !(reciprocalExact64(i2f(c))) {
+ if !(reciprocalExact64(auxTo64F(c))) {
break
}
v.reset(OpMul64F)
v.AddArg(x)
v0 := b.NewValue0(v.Pos, OpConst64F, t)
- v0.AuxInt = f2i(1 / i2f(c))
+ v0.AuxInt = auxFrom64F(1 / auxTo64F(c))
v.AddArg(v0)
return true
}
@@ -9802,7 +9730,7 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool {
func rewriteValuegeneric_OpEq32F_0(v *Value) bool {
// match: (Eq32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) == i2f(d))])
+ // result: (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -9816,12 +9744,12 @@ func rewriteValuegeneric_OpEq32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) == i2f(d))
+ v.AuxInt = b2i(auxTo32F(c) == auxTo32F(d))
return true
}
// match: (Eq32F (Const32F [d]) (Const32F [c]))
// cond:
- // result: (ConstBool [b2i(i2f(c) == i2f(d))])
+ // result: (ConstBool [b2i(auxTo32F(c) == auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -9835,7 +9763,7 @@ func rewriteValuegeneric_OpEq32F_0(v *Value) bool {
}
c := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) == i2f(d))
+ v.AuxInt = b2i(auxTo32F(c) == auxTo32F(d))
return true
}
return false
@@ -10081,7 +10009,7 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool {
func rewriteValuegeneric_OpEq64F_0(v *Value) bool {
// match: (Eq64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) == i2f(d))])
+ // result: (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -10095,12 +10023,12 @@ func rewriteValuegeneric_OpEq64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) == i2f(d))
+ v.AuxInt = b2i(auxTo64F(c) == auxTo64F(d))
return true
}
// match: (Eq64F (Const64F [d]) (Const64F [c]))
// cond:
- // result: (ConstBool [b2i(i2f(c) == i2f(d))])
+ // result: (ConstBool [b2i(auxTo64F(c) == auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -10114,7 +10042,7 @@ func rewriteValuegeneric_OpEq64F_0(v *Value) bool {
}
c := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) == i2f(d))
+ v.AuxInt = b2i(auxTo64F(c) == auxTo64F(d))
return true
}
return false
@@ -11077,7 +11005,7 @@ func rewriteValuegeneric_OpGeq32_0(v *Value) bool {
func rewriteValuegeneric_OpGeq32F_0(v *Value) bool {
// match: (Geq32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) >= i2f(d))])
+ // result: (ConstBool [b2i(auxTo32F(c) >= auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -11091,7 +11019,7 @@ func rewriteValuegeneric_OpGeq32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) >= i2f(d))
+ v.AuxInt = b2i(auxTo32F(c) >= auxTo32F(d))
return true
}
return false
@@ -11143,7 +11071,7 @@ func rewriteValuegeneric_OpGeq64_0(v *Value) bool {
func rewriteValuegeneric_OpGeq64F_0(v *Value) bool {
// match: (Geq64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) >= i2f(d))])
+ // result: (ConstBool [b2i(auxTo64F(c) >= auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -11157,7 +11085,7 @@ func rewriteValuegeneric_OpGeq64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) >= i2f(d))
+ v.AuxInt = b2i(auxTo64F(c) >= auxTo64F(d))
return true
}
return false
@@ -11297,7 +11225,7 @@ func rewriteValuegeneric_OpGreater32_0(v *Value) bool {
func rewriteValuegeneric_OpGreater32F_0(v *Value) bool {
// match: (Greater32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) > i2f(d))])
+ // result: (ConstBool [b2i(auxTo32F(c) > auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -11311,7 +11239,7 @@ func rewriteValuegeneric_OpGreater32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) > i2f(d))
+ v.AuxInt = b2i(auxTo32F(c) > auxTo32F(d))
return true
}
return false
@@ -11363,7 +11291,7 @@ func rewriteValuegeneric_OpGreater64_0(v *Value) bool {
func rewriteValuegeneric_OpGreater64F_0(v *Value) bool {
// match: (Greater64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) > i2f(d))])
+ // result: (ConstBool [b2i(auxTo64F(c) > auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -11377,7 +11305,7 @@ func rewriteValuegeneric_OpGreater64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) > i2f(d))
+ v.AuxInt = b2i(auxTo64F(c) > auxTo64F(d))
return true
}
return false
@@ -12945,7 +12873,7 @@ func rewriteValuegeneric_OpLeq32_0(v *Value) bool {
func rewriteValuegeneric_OpLeq32F_0(v *Value) bool {
// match: (Leq32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) <= i2f(d))])
+ // result: (ConstBool [b2i(auxTo32F(c) <= auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -12959,7 +12887,7 @@ func rewriteValuegeneric_OpLeq32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) <= i2f(d))
+ v.AuxInt = b2i(auxTo32F(c) <= auxTo32F(d))
return true
}
return false
@@ -13011,7 +12939,7 @@ func rewriteValuegeneric_OpLeq64_0(v *Value) bool {
func rewriteValuegeneric_OpLeq64F_0(v *Value) bool {
// match: (Leq64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) <= i2f(d))])
+ // result: (ConstBool [b2i(auxTo64F(c) <= auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -13025,7 +12953,7 @@ func rewriteValuegeneric_OpLeq64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) <= i2f(d))
+ v.AuxInt = b2i(auxTo64F(c) <= auxTo64F(d))
return true
}
return false
@@ -13165,7 +13093,7 @@ func rewriteValuegeneric_OpLess32_0(v *Value) bool {
func rewriteValuegeneric_OpLess32F_0(v *Value) bool {
// match: (Less32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) < i2f(d))])
+ // result: (ConstBool [b2i(auxTo32F(c) < auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -13179,7 +13107,7 @@ func rewriteValuegeneric_OpLess32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) < i2f(d))
+ v.AuxInt = b2i(auxTo32F(c) < auxTo32F(d))
return true
}
return false
@@ -13231,7 +13159,7 @@ func rewriteValuegeneric_OpLess64_0(v *Value) bool {
func rewriteValuegeneric_OpLess64F_0(v *Value) bool {
// match: (Less64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) < i2f(d))])
+ // result: (ConstBool [b2i(auxTo64F(c) < auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -13245,7 +13173,7 @@ func rewriteValuegeneric_OpLess64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) < i2f(d))
+ v.AuxInt = b2i(auxTo64F(c) < auxTo64F(d))
return true
}
return false
@@ -13483,7 +13411,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool {
}
// match: (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _))
// cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1)
- // result: (Const32F [f2i(extend32Fto64F(math.Float32frombits(uint32(x))))])
+ // result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
for {
t1 := v.Type
_ = v.Args[1]
@@ -13504,7 +13432,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool {
break
}
v.reset(OpConst32F)
- v.AuxInt = f2i(extend32Fto64F(math.Float32frombits(uint32(x))))
+ v.AuxInt = auxFrom32F(math.Float32frombits(uint32(x)))
return true
}
// match: (Load <t1> p1 (Store {t2} p2 (Const64F [x]) _))
@@ -13535,7 +13463,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool {
}
// match: (Load <t1> p1 (Store {t2} p2 (Const32F [x]) _))
// cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1)
- // result: (Const32 [int64(int32(math.Float32bits(truncate64Fto32F(i2f(x)))))])
+ // result: (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))])
for {
t1 := v.Type
_ = v.Args[1]
@@ -13556,7 +13484,7 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool {
break
}
v.reset(OpConst32)
- v.AuxInt = int64(int32(math.Float32bits(truncate64Fto32F(i2f(x)))))
+ v.AuxInt = int64(int32(math.Float32bits(auxTo32F(x))))
return true
}
// match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _)))
@@ -16223,9 +16151,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
v.AddArg(v0)
return true
}
- // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _)))
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
// cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2) + sizeof(t3)
- // result: (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1 (Store {t3} (OffPtr <t3.(*types.Type)> [0] dst) d2 mem))
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
for {
n := v.AuxInt
t1 := v.Aux
@@ -16242,6 +16170,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op2.Op != OpOffPtr {
break
}
+ tt2 := op2.Type
o2 := op2.AuxInt
p2 := op2.Args[0]
d1 := mem.Args[1]
@@ -16255,6 +16184,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op3.Op != OpOffPtr {
break
}
+ tt3 := op3.Type
if op3.AuxInt != 0 {
break
}
@@ -16265,14 +16195,14 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
}
v.reset(OpStore)
v.Aux = t2
- v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
v0.AuxInt = o2
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(d1)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t3
- v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
v2.AuxInt = 0
v2.AddArg(dst)
v1.AddArg(v2)
@@ -16281,9 +16211,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
v.AddArg(v1)
return true
}
- // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _))))
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
// cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4)
- // result: (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1 (Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2 (Store {t4} (OffPtr <t4.(*types.Type)> [0] dst) d3 mem)))
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
for {
n := v.AuxInt
t1 := v.Aux
@@ -16300,6 +16230,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op2.Op != OpOffPtr {
break
}
+ tt2 := op2.Type
o2 := op2.AuxInt
p2 := op2.Args[0]
d1 := mem.Args[1]
@@ -16313,6 +16244,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op3.Op != OpOffPtr {
break
}
+ tt3 := op3.Type
o3 := op3.AuxInt
p3 := op3.Args[0]
d2 := mem_2.Args[1]
@@ -16326,6 +16258,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op4.Op != OpOffPtr {
break
}
+ tt4 := op4.Type
if op4.AuxInt != 0 {
break
}
@@ -16336,21 +16269,21 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
}
v.reset(OpStore)
v.Aux = t2
- v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
v0.AuxInt = o2
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(d1)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t3
- v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
v2.AuxInt = o3
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(d2)
v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v3.Aux = t4
- v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
v4.AuxInt = 0
v4.AddArg(dst)
v3.AddArg(v4)
@@ -16360,9 +16293,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
v.AddArg(v1)
return true
}
- // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _)))))
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
// cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5)
- // result: (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1 (Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2 (Store {t4} (OffPtr <t4.(*types.Type)> [o4] dst) d3 (Store {t5} (OffPtr <t5.(*types.Type)> [0] dst) d4 mem))))
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
for {
n := v.AuxInt
t1 := v.Aux
@@ -16379,6 +16312,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op2.Op != OpOffPtr {
break
}
+ tt2 := op2.Type
o2 := op2.AuxInt
p2 := op2.Args[0]
d1 := mem.Args[1]
@@ -16392,6 +16326,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op3.Op != OpOffPtr {
break
}
+ tt3 := op3.Type
o3 := op3.AuxInt
p3 := op3.Args[0]
d2 := mem_2.Args[1]
@@ -16405,6 +16340,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op4.Op != OpOffPtr {
break
}
+ tt4 := op4.Type
o4 := op4.AuxInt
p4 := op4.Args[0]
d3 := mem_2_2.Args[1]
@@ -16418,6 +16354,7 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
if op5.Op != OpOffPtr {
break
}
+ tt5 := op5.Type
if op5.AuxInt != 0 {
break
}
@@ -16428,28 +16365,28 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
}
v.reset(OpStore)
v.Aux = t2
- v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
v0.AuxInt = o2
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(d1)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t3
- v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
v2.AuxInt = o3
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(d2)
v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v3.Aux = t4
- v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
v4.AuxInt = o4
v4.AddArg(dst)
v3.AddArg(v4)
v3.AddArg(d3)
v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v5.Aux = t5
- v6 := b.NewValue0(v.Pos, OpOffPtr, t5.(*types.Type))
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
v6.AuxInt = 0
v6.AddArg(dst)
v5.AddArg(v6)
@@ -16465,9 +16402,9 @@ func rewriteValuegeneric_OpMove_0(v *Value) bool {
func rewriteValuegeneric_OpMove_10(v *Value) bool {
b := v.Block
_ = b
- // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [0] p3) d2 _))))
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
// cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && o2 == sizeof(t3) && n == sizeof(t2) + sizeof(t3)
- // result: (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1 (Store {t3} (OffPtr <t3.(*types.Type)> [0] dst) d2 mem))
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
for {
n := v.AuxInt
t1 := v.Aux
@@ -16488,6 +16425,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op2.Op != OpOffPtr {
break
}
+ tt2 := op2.Type
o2 := op2.AuxInt
p2 := op2.Args[0]
d1 := mem_0.Args[1]
@@ -16501,6 +16439,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op3.Op != OpOffPtr {
break
}
+ tt3 := op3.Type
if op3.AuxInt != 0 {
break
}
@@ -16511,14 +16450,14 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
}
v.reset(OpStore)
v.Aux = t2
- v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
v0.AuxInt = o2
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(d1)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t3
- v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
v2.AuxInt = 0
v2.AddArg(dst)
v1.AddArg(v2)
@@ -16527,9 +16466,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
v.AddArg(v1)
return true
}
- // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [0] p4) d3 _)))))
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
// cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4)
- // result: (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1 (Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2 (Store {t4} (OffPtr <t4.(*types.Type)> [0] dst) d3 mem)))
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
for {
n := v.AuxInt
t1 := v.Aux
@@ -16550,6 +16489,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op2.Op != OpOffPtr {
break
}
+ tt2 := op2.Type
o2 := op2.AuxInt
p2 := op2.Args[0]
d1 := mem_0.Args[1]
@@ -16563,6 +16503,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op3.Op != OpOffPtr {
break
}
+ tt3 := op3.Type
o3 := op3.AuxInt
p3 := op3.Args[0]
d2 := mem_0_2.Args[1]
@@ -16576,6 +16517,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op4.Op != OpOffPtr {
break
}
+ tt4 := op4.Type
if op4.AuxInt != 0 {
break
}
@@ -16586,21 +16528,21 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
}
v.reset(OpStore)
v.Aux = t2
- v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
v0.AuxInt = o2
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(d1)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t3
- v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
v2.AuxInt = o3
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(d2)
v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v3.Aux = t4
- v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
v4.AuxInt = 0
v4.AddArg(dst)
v3.AddArg(v4)
@@ -16610,9 +16552,9 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
v.AddArg(v1)
return true
}
- // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr [o2] p2) d1 (Store {t3} op3:(OffPtr [o3] p3) d2 (Store {t4} op4:(OffPtr [o4] p4) d3 (Store {t5} op5:(OffPtr [0] p5) d4 _))))))
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
// cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && alignof(t2) <= alignof(t1) && alignof(t3) <= alignof(t1) && alignof(t4) <= alignof(t1) && alignof(t5) <= alignof(t1) && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == sizeof(t5) && o3-o4 == sizeof(t4) && o2-o3 == sizeof(t3) && n == sizeof(t2) + sizeof(t3) + sizeof(t4) + sizeof(t5)
- // result: (Store {t2} (OffPtr <t2.(*types.Type)> [o2] dst) d1 (Store {t3} (OffPtr <t3.(*types.Type)> [o3] dst) d2 (Store {t4} (OffPtr <t4.(*types.Type)> [o4] dst) d3 (Store {t5} (OffPtr <t5.(*types.Type)> [0] dst) d4 mem))))
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
for {
n := v.AuxInt
t1 := v.Aux
@@ -16633,6 +16575,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op2.Op != OpOffPtr {
break
}
+ tt2 := op2.Type
o2 := op2.AuxInt
p2 := op2.Args[0]
d1 := mem_0.Args[1]
@@ -16646,6 +16589,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op3.Op != OpOffPtr {
break
}
+ tt3 := op3.Type
o3 := op3.AuxInt
p3 := op3.Args[0]
d2 := mem_0_2.Args[1]
@@ -16659,6 +16603,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op4.Op != OpOffPtr {
break
}
+ tt4 := op4.Type
o4 := op4.AuxInt
p4 := op4.Args[0]
d3 := mem_0_2_2.Args[1]
@@ -16672,6 +16617,7 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
if op5.Op != OpOffPtr {
break
}
+ tt5 := op5.Type
if op5.AuxInt != 0 {
break
}
@@ -16682,28 +16628,28 @@ func rewriteValuegeneric_OpMove_10(v *Value) bool {
}
v.reset(OpStore)
v.Aux = t2
- v0 := b.NewValue0(v.Pos, OpOffPtr, t2.(*types.Type))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
v0.AuxInt = o2
v0.AddArg(dst)
v.AddArg(v0)
v.AddArg(d1)
v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v1.Aux = t3
- v2 := b.NewValue0(v.Pos, OpOffPtr, t3.(*types.Type))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
v2.AuxInt = o3
v2.AddArg(dst)
v1.AddArg(v2)
v1.AddArg(d2)
v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v3.Aux = t4
- v4 := b.NewValue0(v.Pos, OpOffPtr, t4.(*types.Type))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
v4.AuxInt = o4
v4.AddArg(dst)
v3.AddArg(v4)
v3.AddArg(d3)
v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
v5.Aux = t5
- v6 := b.NewValue0(v.Pos, OpOffPtr, t5.(*types.Type))
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
v6.AuxInt = 0
v6.AddArg(dst)
v5.AddArg(v6)
@@ -18320,7 +18266,7 @@ func rewriteValuegeneric_OpMul32_10(v *Value) bool {
func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
// match: (Mul32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
+ // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -18334,12 +18280,12 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(i2f32(c) * i2f32(d)))
+ v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d))
return true
}
// match: (Mul32F (Const32F [d]) (Const32F [c]))
// cond:
- // result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))])
+ // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -18353,10 +18299,10 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
}
c := v_1.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(i2f32(c) * i2f32(d)))
+ v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d))
return true
}
- // match: (Mul32F x (Const32F [f2i(1)]))
+ // match: (Mul32F x (Const32F [auxFrom64F(1)]))
// cond:
// result: x
for {
@@ -18366,7 +18312,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
if v_1.Op != OpConst32F {
break
}
- if v_1.AuxInt != f2i(1) {
+ if v_1.AuxInt != auxFrom64F(1) {
break
}
v.reset(OpCopy)
@@ -18374,7 +18320,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (Mul32F (Const32F [f2i(1)]) x)
+ // match: (Mul32F (Const32F [auxFrom64F(1)]) x)
// cond:
// result: x
for {
@@ -18383,7 +18329,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
if v_0.Op != OpConst32F {
break
}
- if v_0.AuxInt != f2i(1) {
+ if v_0.AuxInt != auxFrom64F(1) {
break
}
x := v.Args[1]
@@ -18392,7 +18338,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (Mul32F x (Const32F [f2i(-1)]))
+ // match: (Mul32F x (Const32F [auxFrom32F(-1)]))
// cond:
// result: (Neg32F x)
for {
@@ -18402,14 +18348,14 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
if v_1.Op != OpConst32F {
break
}
- if v_1.AuxInt != f2i(-1) {
+ if v_1.AuxInt != auxFrom32F(-1) {
break
}
v.reset(OpNeg32F)
v.AddArg(x)
return true
}
- // match: (Mul32F (Const32F [f2i(-1)]) x)
+ // match: (Mul32F (Const32F [auxFrom32F(-1)]) x)
// cond:
// result: (Neg32F x)
for {
@@ -18418,7 +18364,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
if v_0.Op != OpConst32F {
break
}
- if v_0.AuxInt != f2i(-1) {
+ if v_0.AuxInt != auxFrom32F(-1) {
break
}
x := v.Args[1]
@@ -18426,7 +18372,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (Mul32F x (Const32F [f2i(2)]))
+ // match: (Mul32F x (Const32F [auxFrom32F(2)]))
// cond:
// result: (Add32F x x)
for {
@@ -18436,7 +18382,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
if v_1.Op != OpConst32F {
break
}
- if v_1.AuxInt != f2i(2) {
+ if v_1.AuxInt != auxFrom32F(2) {
break
}
v.reset(OpAdd32F)
@@ -18444,7 +18390,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (Mul32F (Const32F [f2i(2)]) x)
+ // match: (Mul32F (Const32F [auxFrom32F(2)]) x)
// cond:
// result: (Add32F x x)
for {
@@ -18453,7 +18399,7 @@ func rewriteValuegeneric_OpMul32F_0(v *Value) bool {
if v_0.Op != OpConst32F {
break
}
- if v_0.AuxInt != f2i(2) {
+ if v_0.AuxInt != auxFrom32F(2) {
break
}
x := v.Args[1]
@@ -19001,7 +18947,7 @@ func rewriteValuegeneric_OpMul64_10(v *Value) bool {
func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
// match: (Mul64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (Const64F [f2i(i2f(c) * i2f(d))])
+ // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -19015,12 +18961,12 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(i2f(c) * i2f(d))
+ v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d))
return true
}
// match: (Mul64F (Const64F [d]) (Const64F [c]))
// cond:
- // result: (Const64F [f2i(i2f(c) * i2f(d))])
+ // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -19034,10 +18980,10 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
}
c := v_1.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(i2f(c) * i2f(d))
+ v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d))
return true
}
- // match: (Mul64F x (Const64F [f2i(1)]))
+ // match: (Mul64F x (Const64F [auxFrom64F(1)]))
// cond:
// result: x
for {
@@ -19047,7 +18993,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
if v_1.Op != OpConst64F {
break
}
- if v_1.AuxInt != f2i(1) {
+ if v_1.AuxInt != auxFrom64F(1) {
break
}
v.reset(OpCopy)
@@ -19055,7 +19001,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (Mul64F (Const64F [f2i(1)]) x)
+ // match: (Mul64F (Const64F [auxFrom64F(1)]) x)
// cond:
// result: x
for {
@@ -19064,7 +19010,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
if v_0.Op != OpConst64F {
break
}
- if v_0.AuxInt != f2i(1) {
+ if v_0.AuxInt != auxFrom64F(1) {
break
}
x := v.Args[1]
@@ -19073,7 +19019,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (Mul64F x (Const64F [f2i(-1)]))
+ // match: (Mul64F x (Const64F [auxFrom64F(-1)]))
// cond:
// result: (Neg64F x)
for {
@@ -19083,14 +19029,14 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
if v_1.Op != OpConst64F {
break
}
- if v_1.AuxInt != f2i(-1) {
+ if v_1.AuxInt != auxFrom64F(-1) {
break
}
v.reset(OpNeg64F)
v.AddArg(x)
return true
}
- // match: (Mul64F (Const64F [f2i(-1)]) x)
+ // match: (Mul64F (Const64F [auxFrom64F(-1)]) x)
// cond:
// result: (Neg64F x)
for {
@@ -19099,7 +19045,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
if v_0.Op != OpConst64F {
break
}
- if v_0.AuxInt != f2i(-1) {
+ if v_0.AuxInt != auxFrom64F(-1) {
break
}
x := v.Args[1]
@@ -19107,7 +19053,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (Mul64F x (Const64F [f2i(2)]))
+ // match: (Mul64F x (Const64F [auxFrom64F(2)]))
// cond:
// result: (Add64F x x)
for {
@@ -19117,7 +19063,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
if v_1.Op != OpConst64F {
break
}
- if v_1.AuxInt != f2i(2) {
+ if v_1.AuxInt != auxFrom64F(2) {
break
}
v.reset(OpAdd64F)
@@ -19125,7 +19071,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (Mul64F (Const64F [f2i(2)]) x)
+ // match: (Mul64F (Const64F [auxFrom64F(2)]) x)
// cond:
// result: (Add64F x x)
for {
@@ -19134,7 +19080,7 @@ func rewriteValuegeneric_OpMul64F_0(v *Value) bool {
if v_0.Op != OpConst64F {
break
}
- if v_0.AuxInt != f2i(2) {
+ if v_0.AuxInt != auxFrom64F(2) {
break
}
x := v.Args[1]
@@ -19585,19 +19531,19 @@ func rewriteValuegeneric_OpNeg32_0(v *Value) bool {
}
func rewriteValuegeneric_OpNeg32F_0(v *Value) bool {
// match: (Neg32F (Const32F [c]))
- // cond: i2f(c) != 0
- // result: (Const32F [f2i(-i2f(c))])
+ // cond: auxTo32F(c) != 0
+ // result: (Const32F [auxFrom32F(-auxTo32F(c))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst32F {
break
}
c := v_0.AuxInt
- if !(i2f(c) != 0) {
+ if !(auxTo32F(c) != 0) {
break
}
v.reset(OpConst32F)
- v.AuxInt = f2i(-i2f(c))
+ v.AuxInt = auxFrom32F(-auxTo32F(c))
return true
}
return false
@@ -19636,19 +19582,19 @@ func rewriteValuegeneric_OpNeg64_0(v *Value) bool {
}
func rewriteValuegeneric_OpNeg64F_0(v *Value) bool {
// match: (Neg64F (Const64F [c]))
- // cond: i2f(c) != 0
- // result: (Const64F [f2i(-i2f(c))])
+ // cond: auxTo64F(c) != 0
+ // result: (Const64F [auxFrom64F(-auxTo64F(c))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64F {
break
}
c := v_0.AuxInt
- if !(i2f(c) != 0) {
+ if !(auxTo64F(c) != 0) {
break
}
v.reset(OpConst64F)
- v.AuxInt = f2i(-i2f(c))
+ v.AuxInt = auxFrom64F(-auxTo64F(c))
return true
}
return false
@@ -20164,7 +20110,7 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool {
func rewriteValuegeneric_OpNeq32F_0(v *Value) bool {
// match: (Neq32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) != i2f(d))])
+ // result: (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -20178,12 +20124,12 @@ func rewriteValuegeneric_OpNeq32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) != i2f(d))
+ v.AuxInt = b2i(auxTo32F(c) != auxTo32F(d))
return true
}
// match: (Neq32F (Const32F [d]) (Const32F [c]))
// cond:
- // result: (ConstBool [b2i(i2f(c) != i2f(d))])
+ // result: (ConstBool [b2i(auxTo32F(c) != auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -20197,7 +20143,7 @@ func rewriteValuegeneric_OpNeq32F_0(v *Value) bool {
}
c := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) != i2f(d))
+ v.AuxInt = b2i(auxTo32F(c) != auxTo32F(d))
return true
}
return false
@@ -20443,7 +20389,7 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool {
func rewriteValuegeneric_OpNeq64F_0(v *Value) bool {
// match: (Neq64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (ConstBool [b2i(i2f(c) != i2f(d))])
+ // result: (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -20457,12 +20403,12 @@ func rewriteValuegeneric_OpNeq64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) != i2f(d))
+ v.AuxInt = b2i(auxTo64F(c) != auxTo64F(d))
return true
}
// match: (Neq64F (Const64F [d]) (Const64F [c]))
// cond:
- // result: (ConstBool [b2i(i2f(c) != i2f(d))])
+ // result: (ConstBool [b2i(auxTo64F(c) != auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -20476,7 +20422,7 @@ func rewriteValuegeneric_OpNeq64F_0(v *Value) bool {
}
c := v_1.AuxInt
v.reset(OpConstBool)
- v.AuxInt = b2i(i2f(c) != i2f(d))
+ v.AuxInt = b2i(auxTo64F(c) != auxTo64F(d))
return true
}
return false
@@ -21412,7 +21358,7 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool {
return true
}
// match: (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _)
- // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
+ // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")
// result: (Invalid)
for {
_ = v.Args[1]
@@ -21435,14 +21381,14 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool {
break
}
sym := v_0_1.Aux
- if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) {
+ if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
break
}
v.reset(OpInvalid)
return true
}
// match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _)
- // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")
+ // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")
// result: (Invalid)
for {
_ = v.Args[1]
@@ -21469,7 +21415,7 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool {
break
}
sym := v_0_0_1.Aux
- if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) {
+ if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
break
}
v.reset(OpInvalid)
@@ -27601,7 +27547,7 @@ func rewriteValuegeneric_OpSlicemask_0(v *Value) bool {
func rewriteValuegeneric_OpSqrt_0(v *Value) bool {
// match: (Sqrt (Const64F [c]))
// cond:
- // result: (Const64F [f2i(math.Sqrt(i2f(c)))])
+ // result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
for {
v_0 := v.Args[0]
if v_0.Op != OpConst64F {
@@ -27609,7 +27555,7 @@ func rewriteValuegeneric_OpSqrt_0(v *Value) bool {
}
c := v_0.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(math.Sqrt(i2f(c)))
+ v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c)))
return true
}
return false
@@ -29824,7 +29770,7 @@ func rewriteValuegeneric_OpSub32_10(v *Value) bool {
func rewriteValuegeneric_OpSub32F_0(v *Value) bool {
// match: (Sub32F (Const32F [c]) (Const32F [d]))
// cond:
- // result: (Const32F [f2i(float64(i2f32(c) - i2f32(d)))])
+ // result: (Const32F [auxFrom32F(auxTo32F(c) - auxTo32F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -29838,25 +29784,7 @@ func rewriteValuegeneric_OpSub32F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConst32F)
- v.AuxInt = f2i(float64(i2f32(c) - i2f32(d)))
- return true
- }
- // match: (Sub32F x (Const32F [0]))
- // cond:
- // result: x
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst32F {
- break
- }
- if v_1.AuxInt != 0 {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.AuxInt = auxFrom32F(auxTo32F(c) - auxTo32F(d))
return true
}
return false
@@ -30248,7 +30176,7 @@ func rewriteValuegeneric_OpSub64_10(v *Value) bool {
func rewriteValuegeneric_OpSub64F_0(v *Value) bool {
// match: (Sub64F (Const64F [c]) (Const64F [d]))
// cond:
- // result: (Const64F [f2i(i2f(c) - i2f(d))])
+ // result: (Const64F [auxFrom64F(auxTo64F(c) - auxTo64F(d))])
for {
_ = v.Args[1]
v_0 := v.Args[0]
@@ -30262,25 +30190,7 @@ func rewriteValuegeneric_OpSub64F_0(v *Value) bool {
}
d := v_1.AuxInt
v.reset(OpConst64F)
- v.AuxInt = f2i(i2f(c) - i2f(d))
- return true
- }
- // match: (Sub64F x (Const64F [0]))
- // cond:
- // result: x
- for {
- _ = v.Args[1]
- x := v.Args[0]
- v_1 := v.Args[1]
- if v_1.Op != OpConst64F {
- break
- }
- if v_1.AuxInt != 0 {
- break
- }
- v.reset(OpCopy)
- v.Type = x.Type
- v.AddArg(x)
+ v.AuxInt = auxFrom64F(auxTo64F(c) - auxTo64F(d))
return true
}
return false
diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go
index 39829b046c..b41819c6ad 100644
--- a/src/cmd/compile/internal/ssa/softfloat.go
+++ b/src/cmd/compile/internal/ssa/softfloat.go
@@ -25,7 +25,7 @@ func softfloat(f *Func) {
case OpConst32F:
v.Op = OpConst32
v.Type = f.Config.Types.UInt32
- v.AuxInt = int64(int32(math.Float32bits(i2f32(v.AuxInt))))
+ v.AuxInt = int64(int32(math.Float32bits(auxTo32F(v.AuxInt))))
case OpConst64F:
v.Op = OpConst64
v.Type = f.Config.Types.UInt64
diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go
index 1081f83f6d..c0fc7adab5 100644
--- a/src/cmd/compile/internal/ssa/stmtlines_test.go
+++ b/src/cmd/compile/internal/ssa/stmtlines_test.go
@@ -62,6 +62,9 @@ func TestStmtLines(t *testing.T) {
if pkgname == "runtime" {
continue
}
+ if e.Val(dwarf.AttrStmtList) == nil {
+ continue
+ }
lrdr, err := dw.LineReader(e)
must(err)
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
index ad2719185e..6586f243e2 100644
--- a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
+++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
@@ -19,7 +19,7 @@ dy = <Optimized out, as expected>
65: if len(os.Args) > 1 {
73: scanner := bufio.NewScanner(reader)
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -29,7 +29,7 @@ i = 1
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -39,7 +39,7 @@ i = 1
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -49,7 +49,7 @@ i = 1
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -59,7 +59,7 @@ i = 2
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -69,7 +69,7 @@ i = 2
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -79,7 +79,7 @@ i = 2
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -89,7 +89,7 @@ i = 4
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -99,7 +99,7 @@ i = 4
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
75: s := scanner.Text()
76: i, err := strconv.ParseInt(s, 10, 64)
77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
@@ -109,7 +109,7 @@ i = 5
81: hist = ensure(int(i), hist)
82: hist[int(i)]++
74: for scanner.Scan() { //gdb-opt=(scanner/A)
-scanner = (struct bufio.Scanner *) <A>
+scanner = (bufio.Scanner *) <A>
86: for i, a := range hist {
87: if a == 0 { //gdb-opt=(a,n,t)
a = 0
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index d367cd1944..25f8f826e6 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -817,7 +817,7 @@ func (t *Type) ChanArgs() *Type {
return t.Extra.(ChanArgs).T
}
-// FuncArgs returns the channel type for TFUNCARGS type t.
+// FuncArgs returns the func type for TFUNCARGS type t.
func (t *Type) FuncArgs() *Type {
t.wantEtype(TFUNCARGS)
return t.Extra.(FuncArgs).T
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index a53b63ab92..e0bb4418ec 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -547,22 +547,22 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.Op386ADDLconstmodify:
- var p *obj.Prog = nil
sc := v.AuxValAndOff()
- off := sc.Off()
val := sc.Val()
- if val == 1 {
- p = s.Prog(x86.AINCL)
- } else if val == -1 {
- p = s.Prog(x86.ADECL)
- } else {
- p = s.Prog(v.Op.Asm())
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = val
+ if val == 1 || val == -1 {
+ var p *obj.Prog
+ if val == 1 {
+ p = s.Prog(x86.AINCL)
+ } else {
+ p = s.Prog(x86.ADECL)
+ }
+ off := sc.Off()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux2(&p.To, v, off)
+ break
}
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = v.Args[0].Reg()
- gc.AddAux2(&p.To, v, off)
+ fallthrough
case ssa.Op386ANDLconstmodify, ssa.Op386ORLconstmodify, ssa.Op386XORLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go
index d4f9dc4fbb..49f4a5e6a7 100644
--- a/src/cmd/dist/build.go
+++ b/src/cmd/dist/build.go
@@ -69,6 +69,7 @@ var okgoarch = []string{
"ppc64le",
"riscv64",
"s390x",
+ "sparc64",
"wasm",
}
@@ -86,6 +87,7 @@ var okgoos = []string{
"openbsd",
"plan9",
"windows",
+ "aix",
}
// find reports the first index of p in l[0:n], or else -1.
@@ -1387,6 +1389,7 @@ func checkNotStale(goBinary string, targets ...string) {
// single point of truth for supported platforms. This list is used
// by 'go tool dist list'.
var cgoEnabled = map[string]bool{
+ "aix/ppc64": false,
"darwin/386": true,
"darwin/amd64": true,
"darwin/arm": true,
@@ -1407,6 +1410,7 @@ var cgoEnabled = map[string]bool{
"linux/mips64le": true,
"linux/riscv64": true,
"linux/s390x": true,
+ "linux/sparc64": true,
"android/386": true,
"android/amd64": true,
"android/arm": true,
diff --git a/src/cmd/dist/buildruntime.go b/src/cmd/dist/buildruntime.go
index acf2230cb4..10d1552c94 100644
--- a/src/cmd/dist/buildruntime.go
+++ b/src/cmd/dist/buildruntime.go
@@ -87,6 +87,10 @@ func mkzbootstrap(file string) {
// stack guard size. Larger multipliers are used for non-optimized
// builds that have larger stack frames.
func stackGuardMultiplier() int {
+ // On AIX, a larger stack is needed for syscalls
+ if goos == "aix" {
+ return 2
+ }
for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
if s == "-N" {
return 2
diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go
index 37e37e2733..bf08869afb 100644
--- a/src/cmd/dist/main.go
+++ b/src/cmd/dist/main.go
@@ -81,6 +81,9 @@ func main() {
}
case "windows":
exe = ".exe"
+ case "aix":
+ // uname -m doesn't work under AIX
+ gohostarch = "ppc64"
}
sysinit()
diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go
index 4cd854773f..2d7f7bd2f9 100644
--- a/src/cmd/dist/test.go
+++ b/src/cmd/dist/test.go
@@ -1469,8 +1469,10 @@ func (t *tester) packageHasBenchmarks(pkg string) bool {
// because cmd/dist has to be buildable by Go 1.4.
func raceDetectorSupported(goos, goarch string) bool {
switch goos {
- case "linux", "darwin", "freebsd", "netbsd", "windows":
+ case "linux":
return goarch == "amd64" || goarch == "ppc64le"
+ case "darwin", "freebsd", "netbsd", "windows":
+ return goarch == "amd64"
default:
return false
}
diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go
index f06abae171..f54a5e0d96 100644
--- a/src/cmd/fix/main.go
+++ b/src/cmd/fix/main.go
@@ -52,7 +52,7 @@ func usage() {
fmt.Fprintf(os.Stderr, "\n%s\n", f.name)
}
desc := strings.TrimSpace(f.desc)
- desc = strings.Replace(desc, "\n", "\n\t", -1)
+ desc = strings.ReplaceAll(desc, "\n", "\n\t")
fmt.Fprintf(os.Stderr, "\t%s\n", desc)
}
os.Exit(2)
diff --git a/src/cmd/fix/typecheck.go b/src/cmd/fix/typecheck.go
index eafb626c74..66e0cdcec0 100644
--- a/src/cmd/fix/typecheck.go
+++ b/src/cmd/fix/typecheck.go
@@ -193,12 +193,12 @@ func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, ass
var params, results []string
for _, p := range fn.Type.Params.List {
t := gofmt(p.Type)
- t = strings.Replace(t, "_Ctype_", "C.", -1)
+ t = strings.ReplaceAll(t, "_Ctype_", "C.")
params = append(params, t)
}
for _, r := range fn.Type.Results.List {
t := gofmt(r.Type)
- t = strings.Replace(t, "_Ctype_", "C.", -1)
+ t = strings.ReplaceAll(t, "_Ctype_", "C.")
results = append(results, t)
}
cfg.External["C."+fn.Name.Name[7:]] = joinFunc(params, results)
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index 35cabcac14..9528ca2984 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -144,7 +144,7 @@
// link against shared libraries previously created with
// -buildmode=shared.
// -mod mode
-// module download mode to use: readonly, release, or vendor.
+// module download mode to use: readonly or vendor.
// See 'go help modules' for more.
// -pkgdir dir
// install and load all packages from dir instead of the usual locations.
@@ -1449,6 +1449,12 @@
// The directory where the go command will write
// temporary source files, packages, and binaries.
//
+// Each entry in the GOFLAGS list must be a standalone flag.
+// Because the entries are space-separated, flag values must
+// not contain spaces. In some cases, you can provide multiple flag
+// values instead: for example, to set '-ldflags=-s -w'
+// you can use 'GOFLAGS=-ldflags=-s -ldflags=-w'.
+//
// Environment variables for use with cgo:
//
// CC
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index 85dcada9ec..73d7663ad6 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -1074,6 +1074,8 @@ func testMove(t *testing.T, vcs, url, base, config string) {
defer tg.cleanup()
tg.parallel()
tg.tempDir("src")
+ tg.must(os.Mkdir(tg.path(".hg"), 0700))
+ tg.must(ioutil.WriteFile(filepath.Join(tg.path(".hg"), "hgrc"), nil, 0600))
tg.setenv("GOPATH", tg.path("."))
tg.run("get", "-d", url)
tg.run("get", "-d", "-u", url)
@@ -1088,7 +1090,7 @@ func testMove(t *testing.T, vcs, url, base, config string) {
path := tg.path(filepath.Join("src", config))
data, err := ioutil.ReadFile(path)
tg.must(err)
- data = bytes.Replace(data, []byte(base), []byte(base+"XXX"), -1)
+ data = bytes.ReplaceAll(data, []byte(base), []byte(base+"XXX"))
tg.must(ioutil.WriteFile(path, data, 0644))
}
if vcs == "git" {
@@ -1185,6 +1187,7 @@ func TestImportCycle(t *testing.T) {
}
func TestListImportMap(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@@ -1418,6 +1421,7 @@ func TestRelativeGOBINFail(t *testing.T) {
defer tg.cleanup()
tg.tempFile("triv.go", `package main; func main() {}`)
tg.setenv("GOBIN", ".")
+ tg.cd(tg.path("."))
tg.runFail("install")
tg.grepStderr("cannot install, GOBIN must be an absolute path", "go install must fail if $GOBIN is a relative path")
}
@@ -1729,20 +1733,23 @@ func TestGoListDeps(t *testing.T) {
tg.run("list", "-deps", "p1")
tg.grepStdout("p1/p2/p3/p4", "-deps p1 does not mention p4")
- // Check the list is in dependency order.
- tg.run("list", "-deps", "math")
- want := "internal/cpu\nunsafe\nmath\n"
- out := tg.stdout.String()
- if !strings.Contains(out, "internal/cpu") {
- // Some systems don't use internal/cpu.
- want = "unsafe\nmath\n"
- }
- if tg.stdout.String() != want {
- t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want)
+ if runtime.Compiler != "gccgo" {
+ // Check the list is in dependency order.
+ tg.run("list", "-deps", "math")
+ want := "internal/cpu\nunsafe\nmath\n"
+ out := tg.stdout.String()
+ if !strings.Contains(out, "internal/cpu") {
+ // Some systems don't use internal/cpu.
+ want = "unsafe\nmath\n"
+ }
+ if tg.stdout.String() != want {
+ t.Fatalf("list -deps math: wrong order\nhave %q\nwant %q", tg.stdout.String(), want)
+ }
}
}
func TestGoListTest(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@@ -1815,6 +1822,7 @@ func TestGoListCompiledCgo(t *testing.T) {
}
func TestGoListExport(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not have standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@@ -2051,6 +2059,7 @@ func TestGoTestCpuprofileLeavesBinaryBehind(t *testing.T) {
}
func TestGoTestCpuprofileDashOControlsBinaryLocation(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@@ -2107,6 +2116,7 @@ func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) {
}
func TestGoTestDashOWritesBinary(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@@ -2350,14 +2360,14 @@ func TestShadowingLogic(t *testing.T) {
// The math in root1 is not "math" because the standard math is.
tg.run("list", "-f", "({{.ImportPath}}) ({{.ConflictDir}})", "./testdata/shadow/root1/src/math")
- pwdForwardSlash := strings.Replace(pwd, string(os.PathSeparator), "/", -1)
+ pwdForwardSlash := strings.ReplaceAll(pwd, string(os.PathSeparator), "/")
if !strings.HasPrefix(pwdForwardSlash, "/") {
pwdForwardSlash = "/" + pwdForwardSlash
}
// The output will have makeImportValid applies, but we only
// bother to deal with characters we might reasonably see.
for _, r := range " :" {
- pwdForwardSlash = strings.Replace(pwdForwardSlash, string(r), "_", -1)
+ pwdForwardSlash = strings.ReplaceAll(pwdForwardSlash, string(r), "_")
}
want := "(_" + pwdForwardSlash + "/testdata/shadow/root1/src/math) (" + filepath.Join(runtime.GOROOT(), "src", "math") + ")"
if strings.TrimSpace(tg.getStdout()) != want {
@@ -2402,6 +2412,7 @@ func checkCoverage(tg *testgoData, data string) {
}
func TestCoverageRuns(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@@ -2413,6 +2424,7 @@ func TestCoverageRuns(t *testing.T) {
}
func TestCoverageDotImport(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@@ -2425,6 +2437,7 @@ func TestCoverageDotImport(t *testing.T) {
// Check that coverage analysis uses set mode.
// Also check that coverage profiles merge correctly.
func TestCoverageUsesSetMode(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@@ -2455,6 +2468,7 @@ func TestCoverageUsesAtomicModeForRace(t *testing.T) {
if !canRace {
t.Skip("skipping because race detector not supported")
}
+ skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
@@ -2472,6 +2486,7 @@ func TestCoverageUsesAtomicModeForRace(t *testing.T) {
}
func TestCoverageSyncAtomicImport(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@@ -2493,6 +2508,7 @@ func TestCoverageDepLoop(t *testing.T) {
}
func TestCoverageImportMainLoop(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
@@ -2503,6 +2519,7 @@ func TestCoverageImportMainLoop(t *testing.T) {
}
func TestCoveragePattern(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@@ -2518,6 +2535,7 @@ func TestCoveragePattern(t *testing.T) {
}
func TestCoverageErrorLine(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@@ -2539,7 +2557,7 @@ func TestCoverageErrorLine(t *testing.T) {
// It's OK that stderr2 drops the character position in the error,
// because of the //line directive (see golang.org/issue/22662).
- stderr = strings.Replace(stderr, "p.go:4:2:", "p.go:4:", -1)
+ stderr = strings.ReplaceAll(stderr, "p.go:4:2:", "p.go:4:")
if stderr != stderr2 {
t.Logf("test -cover changed error messages:\nbefore:\n%s\n\nafter:\n%s", stderr, stderr2)
t.Skip("golang.org/issue/22660")
@@ -2561,6 +2579,7 @@ func TestTestBuildFailureOutput(t *testing.T) {
}
func TestCoverageFunc(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
tg := testgo(t)
defer tg.cleanup()
@@ -2576,6 +2595,7 @@ func TestCoverageFunc(t *testing.T) {
// Issue 24588.
func TestCoverageDashC(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@@ -2684,6 +2704,7 @@ func main() {
}
func TestCoverageWithCgo(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tooSlow(t)
if !canCgo {
t.Skip("skipping because cgo not enabled")
@@ -5164,6 +5185,7 @@ func TestCacheCoverage(t *testing.T) {
}
func TestCacheVet(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no standard packages")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
@@ -6082,6 +6104,7 @@ func TestNoRelativeTmpdir(t *testing.T) {
// Issue 24704.
func TestLinkerTmpDirIsDeleted(t *testing.T) {
+ skipIfGccgo(t, "gccgo does not use cmd/link")
if !canCgo {
t.Skip("skipping because cgo not enabled")
}
@@ -6129,6 +6152,7 @@ func TestLinkerTmpDirIsDeleted(t *testing.T) {
}
func testCDAndGOPATHAreDifferent(tg *testgoData, cd, gopath string) {
+ skipIfGccgo(tg.t, "gccgo does not support -ldflags -X")
tg.setenv("GOPATH", gopath)
tg.tempDir("dir")
@@ -6155,7 +6179,7 @@ func TestCDAndGOPATHAreDifferent(t *testing.T) {
testCDAndGOPATHAreDifferent(tg, cd, gopath)
if runtime.GOOS == "windows" {
- testCDAndGOPATHAreDifferent(tg, cd, strings.Replace(gopath, `\`, `/`, -1))
+ testCDAndGOPATHAreDifferent(tg, cd, strings.ReplaceAll(gopath, `\`, `/`))
testCDAndGOPATHAreDifferent(tg, cd, strings.ToUpper(gopath))
testCDAndGOPATHAreDifferent(tg, cd, strings.ToLower(gopath))
}
@@ -6184,6 +6208,7 @@ func TestGoBuildDashODevNull(t *testing.T) {
// Issue 25093.
func TestCoverpkgTestOnly(t *testing.T) {
+ skipIfGccgo(t, "gccgo has no cover tool")
tg := testgo(t)
defer tg.cleanup()
tg.parallel()
diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go
index d023592eed..b12bd981a7 100644
--- a/src/cmd/go/internal/clean/clean.go
+++ b/src/cmd/go/internal/clean/clean.go
@@ -112,9 +112,10 @@ func runClean(cmd *base.Command, args []string) {
}
}
+ var b work.Builder
+ b.Print = fmt.Print
+
if cleanCache {
- var b work.Builder
- b.Print = fmt.Print
dir := cache.DefaultDir()
if dir != "off" {
// Remove the cache subdirectories but not the top cache directory.
@@ -156,8 +157,13 @@ func runClean(cmd *base.Command, args []string) {
if modfetch.PkgMod == "" {
base.Fatalf("go clean -modcache: no module cache")
}
- if err := removeAll(modfetch.PkgMod); err != nil {
- base.Errorf("go clean -modcache: %v", err)
+ if cfg.BuildN || cfg.BuildX {
+ b.Showcmd("", "rm -rf %s", modfetch.PkgMod)
+ }
+ if !cfg.BuildN {
+ if err := removeAll(modfetch.PkgMod); err != nil {
+ base.Errorf("go clean -modcache: %v", err)
+ }
}
}
}
diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go
index afadbade38..85a42e0519 100644
--- a/src/cmd/go/internal/envcmd/env.go
+++ b/src/cmd/go/internal/envcmd/env.go
@@ -203,7 +203,7 @@ func runEnv(cmd *base.Command, args []string) {
fmt.Printf("%s=\"%s\"\n", e.Name, e.Value)
case "plan9":
if strings.IndexByte(e.Value, '\x00') < 0 {
- fmt.Printf("%s='%s'\n", e.Name, strings.Replace(e.Value, "'", "''", -1))
+ fmt.Printf("%s='%s'\n", e.Name, strings.ReplaceAll(e.Value, "'", "''"))
} else {
v := strings.Split(e.Value, "\x00")
fmt.Printf("%s=(", e.Name)
diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go
index 0f7b623ec3..173934b84e 100644
--- a/src/cmd/go/internal/get/vcs.go
+++ b/src/cmd/go/internal/get/vcs.go
@@ -964,7 +964,7 @@ func matchGoImport(imports []metaImport, importPath string) (metaImport, error)
// expand rewrites s to replace {k} with match[k] for each key k in match.
func expand(match map[string]string, s string) string {
for k, v := range match {
- s = strings.Replace(s, "{"+k+"}", v, -1)
+ s = strings.ReplaceAll(s, "{"+k+"}", v)
}
return s
}
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
index aff4ce12f6..e2c4e61615 100644
--- a/src/cmd/go/internal/help/helpdoc.go
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -507,6 +507,12 @@ General-purpose environment variables:
The directory where the go command will write
temporary source files, packages, and binaries.
+Each entry in the GOFLAGS list must be a standalone flag.
+Because the entries are space-separated, flag values must
+not contain spaces. In some cases, you can provide multiple flag
+values instead: for example, to set '-ldflags=-s -w'
+you can use 'GOFLAGS=-ldflags=-s -ldflags=-w'.
+
Environment variables for use with cgo:
CC
diff --git a/src/cmd/go/internal/modconv/convert_test.go b/src/cmd/go/internal/modconv/convert_test.go
index ad27abb8ef..4d55d73f21 100644
--- a/src/cmd/go/internal/modconv/convert_test.go
+++ b/src/cmd/go/internal/modconv/convert_test.go
@@ -146,7 +146,7 @@ func TestConvertLegacyConfig(t *testing.T) {
}
for _, tt := range tests {
- t.Run(strings.Replace(tt.path, "/", "_", -1)+"_"+tt.vers, func(t *testing.T) {
+ t.Run(strings.ReplaceAll(tt.path, "/", "_")+"_"+tt.vers, func(t *testing.T) {
f, err := modfile.Parse("golden", []byte(tt.gomod), nil)
if err != nil {
t.Fatal(err)
diff --git a/src/cmd/go/internal/modfetch/codehost/codehost.go b/src/cmd/go/internal/modfetch/codehost/codehost.go
index 4103ddc717..4205cd26bd 100644
--- a/src/cmd/go/internal/modfetch/codehost/codehost.go
+++ b/src/cmd/go/internal/modfetch/codehost/codehost.go
@@ -185,7 +185,7 @@ func (e *RunError) Error() string {
text := e.Cmd + ": " + e.Err.Error()
stderr := bytes.TrimRight(e.Stderr, "\n")
if len(stderr) > 0 {
- text += ":\n\t" + strings.Replace(string(stderr), "\n", "\n\t", -1)
+ text += ":\n\t" + strings.ReplaceAll(string(stderr), "\n", "\n\t")
}
return text
}
diff --git a/src/cmd/go/internal/modfetch/coderepo_test.go b/src/cmd/go/internal/modfetch/coderepo_test.go
index 79b82786cb..73c4bd2cca 100644
--- a/src/cmd/go/internal/modfetch/coderepo_test.go
+++ b/src/cmd/go/internal/modfetch/coderepo_test.go
@@ -423,7 +423,7 @@ func TestCodeRepo(t *testing.T) {
}
}
}
- t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.rev, f)
+ t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f)
if strings.HasPrefix(tt.path, vgotest1git) {
for _, alt := range altVgotests {
// Note: Communicating with f through tt; should be cleaned up.
@@ -442,7 +442,7 @@ func TestCodeRepo(t *testing.T) {
tt.rev = remap(tt.rev, m)
tt.gomoderr = remap(tt.gomoderr, m)
tt.ziperr = remap(tt.ziperr, m)
- t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.rev, f)
+ t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f)
tt = old
}
}
@@ -473,9 +473,9 @@ func remap(name string, m map[string]string) string {
}
}
for k, v := range m {
- name = strings.Replace(name, k, v, -1)
+ name = strings.ReplaceAll(name, k, v)
if codehost.AllHex(k) {
- name = strings.Replace(name, k[:12], v[:12], -1)
+ name = strings.ReplaceAll(name, k[:12], v[:12])
}
}
return name
@@ -505,11 +505,11 @@ var codeRepoVersionsTests = []struct {
},
{
path: "gopkg.in/russross/blackfriday.v2",
- versions: []string{"v2.0.0"},
+ versions: []string{"v2.0.0", "v2.0.1"},
},
{
path: "gopkg.in/natefinch/lumberjack.v2",
- versions: nil,
+ versions: []string{"v2.0.0"},
},
}
@@ -522,7 +522,7 @@ func TestCodeRepoVersions(t *testing.T) {
}
defer os.RemoveAll(tmpdir)
for _, tt := range codeRepoVersionsTests {
- t.Run(strings.Replace(tt.path, "/", "_", -1), func(t *testing.T) {
+ t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
repo, err := Lookup(tt.path)
if err != nil {
t.Fatalf("Lookup(%q): %v", tt.path, err)
@@ -570,7 +570,7 @@ func TestLatest(t *testing.T) {
}
defer os.RemoveAll(tmpdir)
for _, tt := range latestTests {
- name := strings.Replace(tt.path, "/", "_", -1)
+ name := strings.ReplaceAll(tt.path, "/", "_")
t.Run(name, func(t *testing.T) {
repo, err := Lookup(tt.path)
if err != nil {
diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go
index 2e26bac434..8485932b42 100644
--- a/src/cmd/go/internal/modfetch/fetch.go
+++ b/src/cmd/go/internal/modfetch/fetch.go
@@ -123,7 +123,7 @@ func downloadZip(mod module.Version, target string) error {
for _, f := range z.File {
if !strings.HasPrefix(f.Name, prefix) {
z.Close()
- return fmt.Errorf("zip for %s has unexpected file %s", prefix[:len(prefix)-1], f.Name)
+ return fmt.Errorf("zip for %s has unexpected file %s", prefix, f.Name)
}
}
z.Close()
diff --git a/src/cmd/go/internal/modfetch/proxy.go b/src/cmd/go/internal/modfetch/proxy.go
index 5f856b80d2..7c78502f31 100644
--- a/src/cmd/go/internal/modfetch/proxy.go
+++ b/src/cmd/go/internal/modfetch/proxy.go
@@ -248,5 +248,5 @@ func (p *proxyRepo) Zip(version string, tmpdir string) (tmpfile string, err erro
// That is, it escapes things like ? and # (which really shouldn't appear anyway).
// It does not escape / to %2F: our REST API is designed so that / can be left as is.
func pathEscape(s string) string {
- return strings.Replace(url.PathEscape(s), "%2F", "/", -1)
+ return strings.ReplaceAll(url.PathEscape(s), "%2F", "/")
}
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
index cebb802db9..acee4a91e7 100644
--- a/src/cmd/go/internal/modload/build.go
+++ b/src/cmd/go/internal/modload/build.go
@@ -14,6 +14,7 @@ import (
"cmd/go/internal/search"
"encoding/hex"
"fmt"
+ "internal/goroot"
"os"
"path/filepath"
"strings"
@@ -30,13 +31,11 @@ func isStandardImportPath(path string) bool {
func findStandardImportPath(path string) string {
if search.IsStandardImportPath(path) {
- dir := filepath.Join(cfg.GOROOT, "src", path)
- if _, err := os.Stat(dir); err == nil {
- return dir
+ if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
+ return filepath.Join(cfg.GOROOT, "src", path)
}
- dir = filepath.Join(cfg.GOROOT, "src/vendor", path)
- if _, err := os.Stat(dir); err == nil {
- return dir
+ if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, "vendor/"+path) {
+ return filepath.Join(cfg.GOROOT, "src/vendor", path)
}
}
return ""
@@ -232,11 +231,16 @@ func findModule(target, path string) module.Version {
}
func ModInfoProg(info string) []byte {
+ // Inject a variable with the debug information as runtime/debug.modinfo,
+ // but compile it in package main so that it is specific to the binary.
+ // Populate it in an init func so that it will work with go:linkname,
+ // but use a string constant instead of the name 'string' in case
+ // package main shadows the built-in 'string' with some local declaration.
return []byte(fmt.Sprintf(`
package main
import _ "unsafe"
//go:linkname __debug_modinfo__ runtime/debug.modinfo
- var __debug_modinfo__ string
+ var __debug_modinfo__ = ""
func init() {
__debug_modinfo__ = %q
}
diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go
index 12d9407f6e..44c2a23726 100644
--- a/src/cmd/go/internal/modload/import.go
+++ b/src/cmd/go/internal/modload/import.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"go/build"
+ "internal/goroot"
"os"
"path/filepath"
"strings"
@@ -60,8 +61,8 @@ func Import(path string) (m module.Version, dir string, err error) {
if strings.HasPrefix(path, "golang_org/") {
return module.Version{}, filepath.Join(cfg.GOROOT, "src/vendor", path), nil
}
- dir := filepath.Join(cfg.GOROOT, "src", path)
- if _, err := os.Stat(dir); err == nil {
+ if goroot.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) {
+ dir := filepath.Join(cfg.GOROOT, "src", path)
return module.Version{}, dir, nil
}
}
diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go
index 3f4ddab436..9422a3d960 100644
--- a/src/cmd/go/internal/modload/import_test.go
+++ b/src/cmd/go/internal/modload/import_test.go
@@ -45,7 +45,7 @@ func TestImport(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
for _, tt := range importTests {
- t.Run(strings.Replace(tt.path, "/", "_", -1), func(t *testing.T) {
+ t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
// Note that there is no build list, so Import should always fail.
m, dir, err := Import(tt.path)
if err == nil {
diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go
index 3b550f1db7..4071341313 100644
--- a/src/cmd/go/internal/modload/query.go
+++ b/src/cmd/go/internal/modload/query.go
@@ -207,7 +207,7 @@ func matchSemverPrefix(p, v string) bool {
// If multiple modules with revisions matching the query provide the requested
// package, QueryPackage picks the one with the longest module path.
//
-// If the path is in the the main module and the query is "latest",
+// If the path is in the main module and the query is "latest",
// QueryPackage returns Target as the version.
func QueryPackage(path, query string, allowed func(module.Version) bool) (module.Version, *modfetch.RevInfo, error) {
if _, ok := dirInModule(path, Target.Path, ModRoot, true); ok {
@@ -221,7 +221,7 @@ func QueryPackage(path, query string, allowed func(module.Version) bool) (module
}
finalErr := errMissing
- for p := path; p != "."; p = pathpkg.Dir(p) {
+ for p := path; p != "." && p != "/"; p = pathpkg.Dir(p) {
info, err := Query(p, query, allowed)
if err != nil {
if _, ok := err.(*codehost.VCSError); ok {
diff --git a/src/cmd/go/internal/modload/query_test.go b/src/cmd/go/internal/modload/query_test.go
index 7f3ffabef7..9b07383217 100644
--- a/src/cmd/go/internal/modload/query_test.go
+++ b/src/cmd/go/internal/modload/query_test.go
@@ -132,7 +132,7 @@ func TestQuery(t *testing.T) {
ok, _ := path.Match(allow, m.Version)
return ok
}
- t.Run(strings.Replace(tt.path, "/", "_", -1)+"/"+tt.query+"/"+allow, func(t *testing.T) {
+ t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+allow, func(t *testing.T) {
info, err := Query(tt.path, tt.query, allowed)
if tt.err != "" {
if err != nil && err.Error() == tt.err {
diff --git a/src/cmd/go/internal/search/search.go b/src/cmd/go/internal/search/search.go
index 60ae73696b..0ca60e7349 100644
--- a/src/cmd/go/internal/search/search.go
+++ b/src/cmd/go/internal/search/search.go
@@ -275,7 +275,7 @@ func MatchPattern(pattern string) func(name string) bool {
case strings.HasSuffix(re, `/\.\.\.`):
re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?`
}
- re = strings.Replace(re, `\.\.\.`, `[^`+vendorChar+`]*`, -1)
+ re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`)
reg := regexp.MustCompile(`^` + re + `$`)
@@ -353,7 +353,7 @@ func CleanPatterns(patterns []string) []string {
// as a courtesy to Windows developers, rewrite \ to /
// in command-line arguments. Handles .\... and so on.
if filepath.Separator == '\\' {
- a = strings.Replace(a, `\`, `/`, -1)
+ a = strings.ReplaceAll(a, `\`, `/`)
}
// Put argument in canonical form, but preserve leading ./.
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index ed41ce5d07..145b87513a 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -99,7 +99,7 @@ and test commands:
link against shared libraries previously created with
-buildmode=shared.
-mod mode
- module download mode to use: readonly, release, or vendor.
+ module download mode to use: readonly or vendor.
See 'go help modules' for more.
-pkgdir dir
install and load all packages from dir instead of the usual locations.
@@ -398,10 +398,10 @@ func libname(args []string, pkgs []*load.Package) (string, error) {
arg = bp.ImportPath
}
}
- appendName(strings.Replace(arg, "/", "-", -1))
+ appendName(strings.ReplaceAll(arg, "/", "-"))
} else {
for _, pkg := range pkgs {
- appendName(strings.Replace(pkg.ImportPath, "/", "-", -1))
+ appendName(strings.ReplaceAll(pkg.ImportPath, "/", "-"))
}
}
} else if haveNonMeta { // have both meta package and a non-meta one
diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go
index f6b79711f9..8b97e8b75b 100644
--- a/src/cmd/go/internal/work/buildid.go
+++ b/src/cmd/go/internal/work/buildid.go
@@ -348,8 +348,12 @@ func (b *Builder) gccgoBuildIDELFFile(a *Action) (string, error) {
}
fmt.Fprintf(&buf, "\n")
if cfg.Goos != "solaris" {
- fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",@progbits`+"\n")
- fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",@progbits`+"\n")
+ secType := "@progbits"
+ if cfg.Goarch == "arm" {
+ secType = "%progbits"
+ }
+ fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",%s`+"\n", secType)
+ fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",%s`+"\n", secType)
}
if cfg.BuildN || cfg.BuildX {
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
index 01414a3d57..158f5f3b17 100644
--- a/src/cmd/go/internal/work/exec.go
+++ b/src/cmd/go/internal/work/exec.go
@@ -1705,14 +1705,14 @@ func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string
if dir[len(dir)-1] == filepath.Separator {
dot += string(filepath.Separator)
}
- cmd = strings.Replace(" "+cmd, " "+dir, dot, -1)[1:]
+ cmd = strings.ReplaceAll(" "+cmd, " "+dir, dot)[1:]
if b.scriptDir != dir {
b.scriptDir = dir
cmd = "cd " + dir + "\n" + cmd
}
}
if b.WorkDir != "" {
- cmd = strings.Replace(cmd, b.WorkDir, "$WORK", -1)
+ cmd = strings.ReplaceAll(cmd, b.WorkDir, "$WORK")
}
return cmd
}
@@ -1754,10 +1754,10 @@ func (b *Builder) showOutput(a *Action, dir, desc, out string) {
prefix := "# " + desc
suffix := "\n" + out
if reldir := base.ShortPath(dir); reldir != dir {
- suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1)
- suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1)
+ suffix = strings.ReplaceAll(suffix, " "+dir, " "+reldir)
+ suffix = strings.ReplaceAll(suffix, "\n"+dir, "\n"+reldir)
}
- suffix = strings.Replace(suffix, " "+b.WorkDir, " $WORK", -1)
+ suffix = strings.ReplaceAll(suffix, " "+b.WorkDir, " $WORK")
if a != nil && a.output != nil {
a.output = append(a.output, prefix...)
diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go
index d5d126123a..1a401b8981 100644
--- a/src/cmd/go/internal/work/security.go
+++ b/src/cmd/go/internal/work/security.go
@@ -89,7 +89,9 @@ var validCompilerFlags = []*regexp.Regexp{
re(`-m32`),
re(`-m64`),
re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
+ re(`-m(no-)?v?aes`),
re(`-marm`),
+ re(`-m(no-)?avx[0-9a-z]*`),
re(`-mfloat-abi=([^@\-].*)`),
re(`-mfpmath=[0-9a-z,+]*`),
re(`-m(no-)?avx[0-9a-z.]*`),
@@ -100,6 +102,7 @@ var validCompilerFlags = []*regexp.Regexp{
re(`-miphoneos-version-min=(.+)`),
re(`-mnop-fun-dllimport`),
re(`-m(no-)?sse[0-9.]*`),
+ re(`-m(no-)?ssse3`),
re(`-mthumb(-interwork)?`),
re(`-mthreads`),
re(`-mwindows`),
@@ -170,6 +173,7 @@ var validLinkerFlags = []*regexp.Regexp{
re(`-Wl,-e[=,][a-zA-Z0-9]*`),
re(`-Wl,--enable-new-dtags`),
re(`-Wl,--end-group`),
+ re(`-Wl,--(no-)?export-dynamic`),
re(`-Wl,-framework,[^,@\-][^,]+`),
re(`-Wl,-headerpad_max_install_names`),
re(`-Wl,--no-undefined`),
diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go
index 31c554e715..d6934ce5e9 100644
--- a/src/cmd/go/main.go
+++ b/src/cmd/go/main.go
@@ -93,6 +93,15 @@ func main() {
*get.CmdGet = *modget.CmdGet
}
+ if args[0] == "get" || args[0] == "help" {
+ // Replace get with module-aware get if appropriate.
+ // Note that if MustUseModules is true, this happened already above,
+ // but no harm in doing it again.
+ if modload.Init(); modload.Enabled() {
+ *get.CmdGet = *modget.CmdGet
+ }
+ }
+
cfg.CmdName = args[0] // for error messages
if args[0] == "help" {
help.Help(os.Stdout, args[1:])
@@ -161,15 +170,6 @@ func main() {
os.Exit(2)
}
- if args[0] == "get" {
- // Replace get with module-aware get if appropriate.
- // Note that if MustUseModules is true, this happened already above,
- // but no harm in doing it again.
- if modload.Init(); modload.Enabled() {
- *get.CmdGet = *modget.CmdGet
- }
- }
-
// Set environment (GOOS, GOARCH, etc) explicitly.
// In theory all the commands we invoke should have
// the same default computation of these as we do,
diff --git a/src/cmd/go/proxy_test.go b/src/cmd/go/proxy_test.go
index 212e5aa08f..97fc4b0e80 100644
--- a/src/cmd/go/proxy_test.go
+++ b/src/cmd/go/proxy_test.go
@@ -78,7 +78,7 @@ func readModList() {
if i < 0 {
continue
}
- encPath := strings.Replace(name[:i], "_", "/", -1)
+ encPath := strings.ReplaceAll(name[:i], "_", "/")
path, err := module.DecodePath(encPath)
if err != nil {
fmt.Fprintf(os.Stderr, "go proxy_test: %v\n", err)
@@ -256,7 +256,7 @@ func readArchive(path, vers string) *txtar.Archive {
return nil
}
- prefix := strings.Replace(enc, "/", "_", -1)
+ prefix := strings.ReplaceAll(enc, "/", "_")
name := filepath.Join(cmdGoDir, "testdata/mod", prefix+"_"+encVers+".txt")
a := archiveCache.Do(name, func() interface{} {
a, err := txtar.ParseFile(name)
diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
index 7c083a87b9..31c6ede2a5 100644
--- a/src/cmd/go/script_test.go
+++ b/src/cmd/go/script_test.go
@@ -329,7 +329,7 @@ func (ts *testScript) cmdAddcrlf(neg bool, args []string) {
file = ts.mkabs(file)
data, err := ioutil.ReadFile(file)
ts.check(err)
- ts.check(ioutil.WriteFile(file, bytes.Replace(data, []byte("\n"), []byte("\r\n"), -1), 0666))
+ ts.check(ioutil.WriteFile(file, bytes.ReplaceAll(data, []byte("\n"), []byte("\r\n")), 0666))
}
}
@@ -630,7 +630,7 @@ func scriptMatch(ts *testScript, neg bool, args []string, text, name string) {
}
// Matching against workdir would be misleading.
- text = strings.Replace(text, ts.workdir, "$WORK", -1)
+ text = strings.ReplaceAll(text, ts.workdir, "$WORK")
if neg {
if re.MatchString(text) {
@@ -691,7 +691,7 @@ func (ts *testScript) cmdSymlink(neg bool, args []string) {
// abbrev abbreviates the actual work directory in the string s to the literal string "$WORK".
func (ts *testScript) abbrev(s string) string {
- s = strings.Replace(s, ts.workdir, "$WORK", -1)
+ s = strings.ReplaceAll(s, ts.workdir, "$WORK")
if *testWork {
// Expose actual $WORK value in environment dump on first line of work script,
// so that the user can find out what directory -testwork left behind.
@@ -885,17 +885,17 @@ var diffTests = []struct {
func TestDiff(t *testing.T) {
for _, tt := range diffTests {
// Turn spaces into \n.
- text1 := strings.Replace(tt.text1, " ", "\n", -1)
+ text1 := strings.ReplaceAll(tt.text1, " ", "\n")
if text1 != "" {
text1 += "\n"
}
- text2 := strings.Replace(tt.text2, " ", "\n", -1)
+ text2 := strings.ReplaceAll(tt.text2, " ", "\n")
if text2 != "" {
text2 += "\n"
}
out := diff(text1, text2)
// Cut final \n, cut spaces, turn remaining \n into spaces.
- out = strings.Replace(strings.Replace(strings.TrimSuffix(out, "\n"), " ", "", -1), "\n", " ", -1)
+ out = strings.ReplaceAll(strings.ReplaceAll(strings.TrimSuffix(out, "\n"), " ", ""), "\n", " ")
if out != tt.diff {
t.Errorf("diff(%q, %q) = %q, want %q", text1, text2, out, tt.diff)
}
diff --git a/src/cmd/go/testdata/addmod.go b/src/cmd/go/testdata/addmod.go
index 19850af0f3..8bb6056a54 100644
--- a/src/cmd/go/testdata/addmod.go
+++ b/src/cmd/go/testdata/addmod.go
@@ -142,7 +142,7 @@ func main() {
}
data := txtar.Format(a)
- target := filepath.Join("mod", strings.Replace(path, "/", "_", -1)+"_"+vers+".txt")
+ target := filepath.Join("mod", strings.ReplaceAll(path, "/", "_")+"_"+vers+".txt")
if err := ioutil.WriteFile(target, data, 0666); err != nil {
log.Printf("%s: %v", arg, err)
exitCode = 1
diff --git a/src/cmd/go/testdata/script/help.txt b/src/cmd/go/testdata/script/help.txt
index 939da30283..656e680100 100644
--- a/src/cmd/go/testdata/script/help.txt
+++ b/src/cmd/go/testdata/script/help.txt
@@ -34,3 +34,8 @@ stderr 'Run ''go help mod'' for usage.'
! go vet -h
stderr 'usage: go vet'
stderr 'Run ''go help vet'' for details'
+
+# go help get shows usage for get
+go help get
+stdout 'usage: go get'
+stdout 'get when using GOPATH' \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/list_bad_import.txt b/src/cmd/go/testdata/script/list_bad_import.txt
index ba66b0937f..3d9cac0d5f 100644
--- a/src/cmd/go/testdata/script/list_bad_import.txt
+++ b/src/cmd/go/testdata/script/list_bad_import.txt
@@ -47,7 +47,7 @@ stdout error
stdout incomplete
-# The pattern "all" should match only packages that acutally exist,
+# The pattern "all" should match only packages that actually exist,
# ignoring those whose existence is merely implied by imports.
go list -e -f '{{.ImportPath}}' all
stdout example.com/direct
diff --git a/src/cmd/go/testdata/script/mod_clean_cache.txt b/src/cmd/go/testdata/script/mod_clean_cache.txt
new file mode 100644
index 0000000000..66a0e9ea7e
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_clean_cache.txt
@@ -0,0 +1,23 @@
+env GO111MODULE=on
+
+go mod download rsc.io/quote@v1.5.0
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.zip
+
+go clean -modcache -n
+stdout '^rm -rf .*pkg.mod$'
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.zip
+
+go clean -modcache
+! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
+! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
+! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.zip
+
+-- go.mod --
+module m
+
+-- m.go --
+package m \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/mod_help.txt b/src/cmd/go/testdata/script/mod_help.txt
new file mode 100644
index 0000000000..b5cd30c521
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_help.txt
@@ -0,0 +1,6 @@
+env GO111MODULE=on
+
+# go help get shows usage for get
+go help get
+stdout 'usage: go get'
+stdout 'get using modules to manage source' \ No newline at end of file
diff --git a/src/cmd/go/testdata/script/mod_list_bad_import.txt b/src/cmd/go/testdata/script/mod_list_bad_import.txt
index 258eb6a567..8a66e0b72a 100644
--- a/src/cmd/go/testdata/script/mod_list_bad_import.txt
+++ b/src/cmd/go/testdata/script/mod_list_bad_import.txt
@@ -47,7 +47,7 @@ stdout error
stdout incomplete
-# The pattern "all" should match only packages that acutally exist,
+# The pattern "all" should match only packages that actually exist,
# ignoring those whose existence is merely implied by imports.
go list -e -f '{{.ImportPath}} {{.Error}}' all
stdout example.com/direct
diff --git a/src/cmd/go/testdata/script/mod_string_alias.txt b/src/cmd/go/testdata/script/mod_string_alias.txt
new file mode 100644
index 0000000000..5c3d4287cc
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_string_alias.txt
@@ -0,0 +1,14 @@
+[short] skip
+
+env GO111MODULE=on
+
+go mod init golang.org/issue/27584
+
+go build .
+
+-- main.go --
+package main
+
+type string = []int
+
+func main() {}
diff --git a/src/cmd/go/testdata/script/mod_test.txt b/src/cmd/go/testdata/script/mod_test.txt
index caeb25ada8..af4fd76d70 100644
--- a/src/cmd/go/testdata/script/mod_test.txt
+++ b/src/cmd/go/testdata/script/mod_test.txt
@@ -1,5 +1,8 @@
env GO111MODULE=on
+# TODO(bcmills): Convert the 'go test' calls below to 'go list -test' once 'go
+# list' is more sensitive to package loading errors.
+
# A test in the module's root package should work.
cd a/
cp go.mod.empty go.mod
@@ -48,6 +51,10 @@ cd ../d_test
go test
stdout PASS
+cd ../e
+go test
+stdout PASS
+
-- a/go.mod.empty --
module example.com/user/a
diff --git a/src/cmd/go/vendor_test.go b/src/cmd/go/vendor_test.go
index 22aa643b00..c302d7e9b5 100644
--- a/src/cmd/go/vendor_test.go
+++ b/src/cmd/go/vendor_test.go
@@ -37,7 +37,7 @@ func TestVendorImports(t *testing.T) {
vend/x/vendor/p/p [notfound]
vend/x/vendor/r []
`
- want = strings.Replace(want+"\t", "\n\t\t", "\n", -1)
+ want = strings.ReplaceAll(want+"\t", "\n\t\t", "\n")
want = strings.TrimPrefix(want, "\n")
have := tg.stdout.String()
diff --git a/src/cmd/gofmt/gofmt_test.go b/src/cmd/gofmt/gofmt_test.go
index 16b653b646..3008365cd2 100644
--- a/src/cmd/gofmt/gofmt_test.go
+++ b/src/cmd/gofmt/gofmt_test.go
@@ -200,7 +200,7 @@ func TestDiff(t *testing.T) {
}
if runtime.GOOS == "windows" {
- b = bytes.Replace(b, []byte{'\r', '\n'}, []byte{'\n'}, -1)
+ b = bytes.ReplaceAll(b, []byte{'\r', '\n'}, []byte{'\n'})
}
bs := bytes.SplitN(b, []byte{'\n'}, 3)
diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go
index 96fb2b765b..355091feda 100644
--- a/src/cmd/internal/dwarf/dwarf.go
+++ b/src/cmd/internal/dwarf/dwarf.go
@@ -304,6 +304,7 @@ const (
const (
DW_ABRV_NULL = iota
DW_ABRV_COMPUNIT
+ DW_ABRV_COMPUNIT_TEXTLESS
DW_ABRV_FUNCTION
DW_ABRV_FUNCTION_ABSTRACT
DW_ABRV_FUNCTION_CONCRETE
@@ -368,6 +369,18 @@ var abbrevs = [DW_NABRV]dwAbbrev{
},
},
+ /* COMPUNIT_TEXTLESS */
+ {
+ DW_TAG_compile_unit,
+ DW_CHILDREN_yes,
+ []dwAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_language, DW_FORM_data1},
+ {DW_AT_comp_dir, DW_FORM_string},
+ {DW_AT_producer, DW_FORM_string},
+ },
+ },
+
/* FUNCTION */
{
DW_TAG_subprogram,
diff --git a/src/cmd/internal/goobj/read.go b/src/cmd/internal/goobj/read.go
index e39180cad6..2d618eefa5 100644
--- a/src/cmd/internal/goobj/read.go
+++ b/src/cmd/internal/goobj/read.go
@@ -293,7 +293,7 @@ func (r *objReader) readRef() {
// In a symbol name in an object file, "". denotes the
// prefix for the package in which the object file has been found.
// Expand it.
- name = strings.Replace(name, `"".`, r.pkgprefix, -1)
+ name = strings.ReplaceAll(name, `"".`, r.pkgprefix)
// An individual object file only records version 0 (extern) or 1 (static).
// To make static symbols unique across all files being read, we
diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go
index 3427ea9161..dd6d9265c4 100644
--- a/src/cmd/internal/obj/arm/asm5.go
+++ b/src/cmd/internal/obj/arm/asm5.go
@@ -2007,7 +2007,7 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) {
o2 = c.oprrr(p, p.As, int(p.Scond))
o2 |= REGTMP & 15
r := int(p.Reg)
- if p.As == AMOVW || p.As == AMVN {
+ if p.As == AMVN {
r = 0
} else if r == 0 {
r = int(p.To.Reg)
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 7507976257..46fdcdcf7d 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -1085,6 +1085,23 @@ func (c *ctxt7) regoff(a *obj.Addr) uint32 {
return uint32(c.instoffset)
}
+func isSTLXRop(op obj.As) bool {
+ switch op {
+ case ASTLXR, ASTLXRW, ASTLXRB, ASTLXRH,
+ ASTXR, ASTXRW, ASTXRB, ASTXRH:
+ return true
+ }
+ return false
+}
+
+func isSTXPop(op obj.As) bool {
+ switch op {
+ case ASTXP, ASTLXP, ASTXPW, ASTLXPW:
+ return true
+ }
+ return false
+}
+
func isRegShiftOrExt(a *obj.Addr) bool {
return (a.Index-obj.RBaseARM64)&REG_EXT != 0 || (a.Index-obj.RBaseARM64)&REG_LSL != 0
}
@@ -1409,6 +1426,10 @@ func (c *ctxt7) aclass(a *obj.Addr) int {
return C_LIST
case obj.TYPE_MEM:
+ // The base register should be an integer register.
+ if int16(REG_F0) <= a.Reg && a.Reg <= int16(REG_V31) {
+ break
+ }
switch a.Name {
case obj.NAME_EXTERN, obj.NAME_STATIC:
if a.Sym == nil {
@@ -2502,6 +2523,17 @@ func SYSARG4(op1 int, Cn int, Cm int, op2 int) int {
return SYSARG5(0, op1, Cn, Cm, op2)
}
+// checkUnpredictable checks if the sourse and transfer registers are the same register.
+// ARM64 manual says it is "constrained unpredictable" if the src and dst registers of STP/LDP are same.
+func (c *ctxt7) checkUnpredictable(p *obj.Prog, isload bool, wback bool, rn int16, rt1 int16, rt2 int16) {
+ if wback && rn != REGSP && (rn == rt1 || rn == rt2) {
+ c.ctxt.Diag("constrained unpredictable behavior: %v", p)
+ }
+ if isload && rt1 == rt2 {
+ c.ctxt.Diag("constrained unpredictable behavior: %v", p)
+ }
+}
+
/* checkindex checks if index >= 0 && index <= maxindex */
func (c *ctxt7) checkindex(p *obj.Prog, index, maxindex int) {
if index < 0 || index > maxindex {
@@ -2940,6 +2972,10 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
}
case 22: /* movT (R)O!,R; movT O(R)!, R -> ldrT */
+ if p.From.Reg != REGSP && p.From.Reg == p.To.Reg {
+ c.ctxt.Diag("constrained unpredictable behavior: %v", p)
+ }
+
v := int32(p.From.Offset)
if v < -256 || v > 255 {
@@ -2954,6 +2990,10 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 |= ((uint32(v) & 0x1FF) << 12) | (uint32(p.From.Reg&31) << 5) | uint32(p.To.Reg&31)
case 23: /* movT R,(R)O!; movT O(R)!, R -> strT */
+ if p.To.Reg != REGSP && p.From.Reg == p.To.Reg {
+ c.ctxt.Diag("constrained unpredictable behavior: %v", p)
+ }
+
v := int32(p.To.Offset)
if v < -256 || v > 255 {
@@ -3551,6 +3591,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 |= 0x1F << 16
o1 |= uint32(p.From.Reg&31) << 5
if p.As == ALDXP || p.As == ALDXPW || p.As == ALDAXP || p.As == ALDAXPW {
+ if int(p.To.Reg) == int(p.To.Offset) {
+ c.ctxt.Diag("constrained unpredictable behavior: %v", p)
+ }
o1 |= uint32(p.To.Offset&31) << 10
} else {
o1 |= 0x1F << 10
@@ -3558,6 +3601,19 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 |= uint32(p.To.Reg & 31)
case 59: /* stxr/stlxr/stxp/stlxp */
+ s := p.RegTo2
+ n := p.To.Reg
+ t := p.From.Reg
+ if isSTLXRop(p.As) {
+ if s == t || (s == n && n != REGSP) {
+ c.ctxt.Diag("constrained unpredictable behavior: %v", p)
+ }
+ } else if isSTXPop(p.As) {
+ t2 := int16(p.From.Offset)
+ if (s == t || s == t2) || (s == n && n != REGSP) {
+ c.ctxt.Diag("constrained unpredictable behavior: %v", p)
+ }
+ }
o1 = c.opstore(p, p.As)
if p.RegTo2 != obj.REG_NONE {
@@ -3565,7 +3621,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
} else {
o1 |= 0x1F << 16
}
- if p.As == ASTXP || p.As == ASTXPW || p.As == ASTLXP || p.As == ASTLXPW {
+ if isSTXPop(p.As) {
o1 |= uint32(p.From.Offset&31) << 10
}
o1 |= uint32(p.To.Reg&31)<<5 | uint32(p.From.Reg&31)
@@ -6177,6 +6233,20 @@ func (c *ctxt7) opextr(p *obj.Prog, a obj.As, v int32, rn int, rm int, rt int) u
/* genrate instruction encoding for LDP/LDPW/LDPSW/STP/STPW */
func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh, ldp uint32) uint32 {
+ wback := false
+ if o.scond == C_XPOST || o.scond == C_XPRE {
+ wback = true
+ }
+ switch p.As {
+ case ALDP, ALDPW, ALDPSW:
+ c.checkUnpredictable(p, true, wback, p.From.Reg, p.To.Reg, int16(p.To.Offset))
+ case ASTP, ASTPW:
+ if wback == true {
+ c.checkUnpredictable(p, false, true, p.To.Reg, p.From.Reg, int16(p.From.Offset))
+ }
+ case AFLDPD, AFLDPS:
+ c.checkUnpredictable(p, true, false, p.From.Reg, p.To.Reg, int16(p.To.Offset))
+ }
var ret uint32
// check offset
switch p.As {
diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go
index 132f7836ef..354bda5e48 100644
--- a/src/cmd/internal/obj/link.go
+++ b/src/cmd/internal/obj/link.go
@@ -402,9 +402,10 @@ type FuncInfo struct {
dwarfAbsFnSym *LSym
dwarfIsStmtSym *LSym
- GCArgs LSym
- GCLocals LSym
- GCRegs LSym
+ GCArgs LSym
+ GCLocals LSym
+ GCRegs LSym
+ StackObjects *LSym
}
// Attribute is a set of symbol attributes.
diff --git a/src/cmd/internal/objabi/funcdata.go b/src/cmd/internal/objabi/funcdata.go
index a7827125bf..231d11b185 100644
--- a/src/cmd/internal/objabi/funcdata.go
+++ b/src/cmd/internal/objabi/funcdata.go
@@ -18,6 +18,7 @@ const (
FUNCDATA_LocalsPointerMaps = 1
FUNCDATA_InlTree = 2
FUNCDATA_RegPointerMaps = 3
+ FUNCDATA_StackObjects = 4
// ArgsSizeUnknown is set in Func.argsize to mark all functions
// whose argument size is unknown (C vararg functions, and
diff --git a/src/cmd/internal/objabi/funcid.go b/src/cmd/internal/objabi/funcid.go
index 15a63ab8b3..92799107da 100644
--- a/src/cmd/internal/objabi/funcid.go
+++ b/src/cmd/internal/objabi/funcid.go
@@ -9,7 +9,7 @@ package objabi
// Note that in some situations involving plugins, there may be multiple
// copies of a particular special runtime function.
// Note: this list must match the list in runtime/symtab.go.
-type FuncID uint32
+type FuncID uint8
const (
FuncID_normal FuncID = iota // not a special function
diff --git a/src/cmd/internal/objabi/head.go b/src/cmd/internal/objabi/head.go
index 23c7b62daf..db2221d6b1 100644
--- a/src/cmd/internal/objabi/head.go
+++ b/src/cmd/internal/objabi/head.go
@@ -48,10 +48,13 @@ const (
Hplan9
Hsolaris
Hwindows
+ Haix
)
func (h *HeadType) Set(s string) error {
switch s {
+ case "aix":
+ *h = Haix
case "darwin":
*h = Hdarwin
case "dragonfly":
@@ -82,6 +85,8 @@ func (h *HeadType) Set(s string) error {
func (h *HeadType) String() string {
switch *h {
+ case Haix:
+ return "aix"
case Hdarwin:
return "darwin"
case Hdragonfly:
diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go
index ff11689bbc..2c01456f6b 100644
--- a/src/cmd/link/dwarf_test.go
+++ b/src/cmd/link/dwarf_test.go
@@ -122,6 +122,9 @@ func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string)
r.SkipChildren()
continue
}
+ if cu.Val(dwarf.AttrStmtList) == nil {
+ continue
+ }
lr, err := d.LineReader(cu)
if err != nil {
t.Fatal(err)
diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go
index 959fc8290c..743f4cedd4 100644
--- a/src/cmd/link/internal/ld/dwarf.go
+++ b/src/cmd/link/internal/ld/dwarf.go
@@ -5,7 +5,7 @@
// TODO/NICETOHAVE:
// - eliminate DW_CLS_ if not used
// - package info in compilation units
-// - assign global variables and types to their packages
+// - assign types to their packages
// - gdb uses c syntax, meaning clumsy quoting is needed for go identifiers. eg
// ptype struct '[]uint8' and qualifiers need to be quoted away
// - file:line info for variables
@@ -106,15 +106,8 @@ func writeabbrev(ctxt *Link) *sym.Symbol {
return s
}
-/*
- * Root DIEs for compilation units, types and global variables.
- */
-var dwroot dwarf.DWDie
-
var dwtypes dwarf.DWDie
-var dwglobals dwarf.DWDie
-
func newattr(die *dwarf.DWDie, attr uint16, cls int, value int64, data interface{}) *dwarf.DWAttr {
a := new(dwarf.DWAttr)
a.Link = die.Attr
@@ -340,19 +333,19 @@ func lookupOrDiag(ctxt *Link, n string) *sym.Symbol {
return s
}
-func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) {
+func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) *dwarf.DWDie {
// Only emit typedefs for real names.
if strings.HasPrefix(name, "map[") {
- return
+ return nil
}
if strings.HasPrefix(name, "struct {") {
- return
+ return nil
}
if strings.HasPrefix(name, "chan ") {
- return
+ return nil
}
if name[0] == '[' || name[0] == '*' {
- return
+ return nil
}
if def == nil {
Errorf(nil, "dwarf: bad def in dotypedef")
@@ -370,6 +363,8 @@ func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) {
die := newdie(ctxt, parent, dwarf.DW_ABRV_TYPEDECL, name, 0)
newrefattr(die, dwarf.DW_AT_type, s)
+
+ return die
}
// Define gotype, for composite ones recurse into constituents.
@@ -399,7 +394,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie {
kind := decodetypeKind(ctxt.Arch, gotype)
bytesize := decodetypeSize(ctxt.Arch, gotype)
- var die *dwarf.DWDie
+ var die, typedefdie *dwarf.DWDie
switch kind {
case objabi.KindBool:
die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, name, 0)
@@ -439,7 +434,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie {
case objabi.KindArray:
die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_ARRAYTYPE, name, 0)
- dotypedef(ctxt, &dwtypes, name, die)
+ typedefdie = dotypedef(ctxt, &dwtypes, name, die)
newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
s := decodetypeArrayElem(ctxt.Arch, gotype)
newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s))
@@ -461,7 +456,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie {
case objabi.KindFunc:
die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_FUNCTYPE, name, 0)
newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
- dotypedef(ctxt, &dwtypes, name, die)
+ typedefdie = dotypedef(ctxt, &dwtypes, name, die)
nfields := decodetypeFuncInCount(ctxt.Arch, gotype)
for i := 0; i < nfields; i++ {
s := decodetypeFuncInType(ctxt.Arch, gotype, i)
@@ -481,7 +476,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie {
case objabi.KindInterface:
die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_IFACETYPE, name, 0)
- dotypedef(ctxt, &dwtypes, name, die)
+ typedefdie = dotypedef(ctxt, &dwtypes, name, die)
nfields := int(decodetypeIfaceMethodCount(ctxt.Arch, gotype))
var s *sym.Symbol
if nfields == 0 {
@@ -503,13 +498,13 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie {
case objabi.KindPtr:
die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_PTRTYPE, name, 0)
- dotypedef(ctxt, &dwtypes, name, die)
+ typedefdie = dotypedef(ctxt, &dwtypes, name, die)
s := decodetypePtrElem(ctxt.Arch, gotype)
newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s))
case objabi.KindSlice:
die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_SLICETYPE, name, 0)
- dotypedef(ctxt, &dwtypes, name, die)
+ typedefdie = dotypedef(ctxt, &dwtypes, name, die)
newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
s := decodetypeArrayElem(ctxt.Arch, gotype)
elem := defgotype(ctxt, s)
@@ -521,7 +516,7 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie {
case objabi.KindStruct:
die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_STRUCTTYPE, name, 0)
- dotypedef(ctxt, &dwtypes, name, die)
+ typedefdie = dotypedef(ctxt, &dwtypes, name, die)
newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0)
nfields := decodetypeStructFieldCount(ctxt.Arch, gotype)
for i := 0; i < nfields; i++ {
@@ -557,6 +552,9 @@ func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie {
prototypedies[gotype.Name] = die
}
+ if typedefdie != nil {
+ return typedefdie
+ }
return die
}
@@ -830,7 +828,11 @@ func synthesizechantypes(ctxt *Link, die *dwarf.DWDie) {
}
func dwarfDefineGlobal(ctxt *Link, s *sym.Symbol, str string, v int64, gotype *sym.Symbol) {
- dv := newdie(ctxt, &dwglobals, dwarf.DW_ABRV_VARIABLE, str, int(s.Version))
+ lib := s.Lib
+ if lib == nil {
+ lib = ctxt.LibraryByPkg["runtime"]
+ }
+ dv := newdie(ctxt, ctxt.compUnitByPackage[lib].dwinfo, dwarf.DW_ABRV_VARIABLE, str, int(s.Version))
newabslocexprattr(dv, v, s)
if s.Version == 0 {
newattr(dv, dwarf.DW_AT_external, dwarf.DW_CLS_FLAG, 1, 0)
@@ -905,10 +907,11 @@ func calcCompUnitRanges(ctxt *Link) {
}
}
-func movetomodule(parent *dwarf.DWDie) {
- die := dwroot.Child.Child
+func movetomodule(ctxt *Link, parent *dwarf.DWDie) {
+ runtimelib := ctxt.LibraryByPkg["runtime"]
+ die := ctxt.compUnitByPackage[runtimelib].dwinfo.Child
if die == nil {
- dwroot.Child.Child = parent.Child
+ ctxt.compUnitByPackage[runtimelib].dwinfo.Child = parent.Child
return
}
for die.Link != nil {
@@ -1062,7 +1065,7 @@ func importInfoSymbol(ctxt *Link, dsym *sym.Symbol) {
}
}
-func writelines(ctxt *Link, unit *compilationUnit, ls *sym.Symbol) (dwinfo *dwarf.DWDie) {
+func writelines(ctxt *Link, unit *compilationUnit, ls *sym.Symbol) {
var dwarfctxt dwarf.Context = dwctxt{ctxt}
is_stmt := uint8(1) // initially = recommended default_is_stmt = 1, tracks is_stmt toggles.
@@ -1071,29 +1074,7 @@ func writelines(ctxt *Link, unit *compilationUnit, ls *sym.Symbol) (dwinfo *dwar
headerstart := int64(-1)
headerend := int64(-1)
- lang := dwarf.DW_LANG_Go
-
- dwinfo = newdie(ctxt, &dwroot, dwarf.DW_ABRV_COMPUNIT, unit.lib.Pkg, 0)
- newattr(dwinfo, dwarf.DW_AT_language, dwarf.DW_CLS_CONSTANT, int64(lang), 0)
- newattr(dwinfo, dwarf.DW_AT_stmt_list, dwarf.DW_CLS_PTR, ls.Size, ls)
- // OS X linker requires compilation dir or absolute path in comp unit name to output debug info.
- compDir := getCompilationDir()
- // TODO: Make this be the actual compilation directory, not
- // the linker directory. If we move CU construction into the
- // compiler, this should happen naturally.
- newattr(dwinfo, dwarf.DW_AT_comp_dir, dwarf.DW_CLS_STRING, int64(len(compDir)), compDir)
- producerExtra := ctxt.Syms.Lookup(dwarf.CUInfoPrefix+"producer."+unit.lib.Pkg, 0)
- producer := "Go cmd/compile " + objabi.Version
- if len(producerExtra.P) > 0 {
- // We put a semicolon before the flags to clearly
- // separate them from the version, which can be long
- // and have lots of weird things in it in development
- // versions. We promise not to put a semicolon in the
- // version, so it should be safe for readers to scan
- // forward to the semicolon.
- producer += "; " + string(producerExtra.P)
- }
- newattr(dwinfo, dwarf.DW_AT_producer, dwarf.DW_CLS_STRING, int64(len(producer)), producer)
+ newattr(unit.dwinfo, dwarf.DW_AT_stmt_list, dwarf.DW_CLS_PTR, ls.Size, ls)
// Write .debug_line Line Number Program Header (sec 6.2.4)
// Fields marked with (*) must be changed for 64-bit dwarf
@@ -1295,8 +1276,6 @@ func writelines(ctxt *Link, unit *compilationUnit, ls *sym.Symbol) (dwinfo *dwar
}
}
}
-
- return dwinfo
}
// writepcranges generates the DW_AT_ranges table for compilation unit cu.
@@ -1463,15 +1442,13 @@ func writeinfo(ctxt *Link, syms []*sym.Symbol, units []*compilationUnit, abbrevs
var dwarfctxt dwarf.Context = dwctxt{ctxt}
- // Re-index per-package information by its CU die.
- unitByDIE := make(map[*dwarf.DWDie]*compilationUnit)
for _, u := range units {
- unitByDIE[u.dwinfo] = u
- }
-
- for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link {
+ compunit := u.dwinfo
s := dtolsym(compunit.Sym)
- u := unitByDIE[compunit]
+
+ if len(u.lib.Textp) == 0 && u.dwinfo.Child == nil {
+ continue
+ }
// Write .debug_info Compilation Unit Header (sec 7.5.1)
// Fields marked with (*) must be changed for 64-bit dwarf
@@ -1531,7 +1508,11 @@ func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*s
s.Type = sym.SDWARFSECT
syms = append(syms, s)
- for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link {
+ for _, u := range ctxt.compUnits {
+ if len(u.lib.Textp) == 0 && u.dwinfo.Child == nil {
+ continue
+ }
+ compunit := u.dwinfo
sectionstart := s.Size
culength := uint32(getattr(compunit, dwarf.DW_AT_byte_size).Value) + 4
@@ -1666,13 +1647,10 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
defgotype(ctxt, lookupOrDiag(ctxt, typ))
}
- // Create DIEs for global variables and the types they use.
- genasmsym(ctxt, defdwsymb)
+ // fake root DIE for compile unit DIEs
+ var dwroot dwarf.DWDie
for _, lib := range ctxt.Library {
- if len(lib.Textp) == 0 {
- continue
- }
unit := &compilationUnit{lib: lib}
if s := ctxt.Syms.ROLookup(dwarf.ConstInfoPrefix+lib.Pkg, 0); s != nil {
importInfoSymbol(ctxt, s)
@@ -1681,6 +1659,31 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
ctxt.compUnits = append(ctxt.compUnits, unit)
ctxt.compUnitByPackage[lib] = unit
+ unit.dwinfo = newdie(ctxt, &dwroot, dwarf.DW_ABRV_COMPUNIT, unit.lib.Pkg, 0)
+ newattr(unit.dwinfo, dwarf.DW_AT_language, dwarf.DW_CLS_CONSTANT, int64(dwarf.DW_LANG_Go), 0)
+ // OS X linker requires compilation dir or absolute path in comp unit name to output debug info.
+ compDir := getCompilationDir()
+ // TODO: Make this be the actual compilation directory, not
+ // the linker directory. If we move CU construction into the
+ // compiler, this should happen naturally.
+ newattr(unit.dwinfo, dwarf.DW_AT_comp_dir, dwarf.DW_CLS_STRING, int64(len(compDir)), compDir)
+ producerExtra := ctxt.Syms.Lookup(dwarf.CUInfoPrefix+"producer."+unit.lib.Pkg, 0)
+ producer := "Go cmd/compile " + objabi.Version
+ if len(producerExtra.P) > 0 {
+ // We put a semicolon before the flags to clearly
+ // separate them from the version, which can be long
+ // and have lots of weird things in it in development
+ // versions. We promise not to put a semicolon in the
+ // version, so it should be safe for readers to scan
+ // forward to the semicolon.
+ producer += "; " + string(producerExtra.P)
+ }
+ newattr(unit.dwinfo, dwarf.DW_AT_producer, dwarf.DW_CLS_STRING, int64(len(producer)), producer)
+
+ if len(lib.Textp) == 0 {
+ unit.dwinfo.Abbrev = dwarf.DW_ABRV_COMPUNIT_TEXTLESS
+ }
+
// Scan all functions in this compilation unit, create DIEs for all
// referenced types, create the file table for debug_line, find all
// referenced abstract functions.
@@ -1721,6 +1724,9 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
}
}
+ // Create DIEs for global variables and the types they use.
+ genasmsym(ctxt, defdwsymb)
+
synthesizestringtypes(ctxt, dwtypes.Child)
synthesizeslicetypes(ctxt, dwtypes.Child)
synthesizemaptypes(ctxt, dwtypes.Child)
@@ -1753,19 +1759,19 @@ func dwarfGenerateDebugSyms(ctxt *Link) {
debugRanges.Attr |= sym.AttrReachable
syms = append(syms, debugLine)
for _, u := range ctxt.compUnits {
- u.dwinfo = writelines(ctxt, u, debugLine)
+ reversetree(&u.dwinfo.Child)
+ if u.dwinfo.Abbrev == dwarf.DW_ABRV_COMPUNIT_TEXTLESS {
+ continue
+ }
+ writelines(ctxt, u, debugLine)
writepcranges(ctxt, u.dwinfo, u.lib.Textp[0], u.pcs, debugRanges)
}
// newdie adds DIEs to the *beginning* of the parent's DIE list.
// Now that we're done creating DIEs, reverse the trees so DIEs
// appear in the order they were created.
- reversetree(&dwroot.Child)
reversetree(&dwtypes.Child)
- reversetree(&dwglobals.Child)
-
- movetomodule(&dwtypes)
- movetomodule(&dwglobals)
+ movetomodule(ctxt, &dwtypes)
// Need to reorder symbols so sym.SDWARFINFO is after all sym.SDWARFSECT
// (but we need to generate dies before writepub)
@@ -2000,5 +2006,14 @@ func (v compilationUnitByStartPC) Len() int { return len(v) }
func (v compilationUnitByStartPC) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (v compilationUnitByStartPC) Less(i, j int) bool {
- return v[i].lib.Textp[0].Value < v[j].lib.Textp[0].Value
+ switch {
+ case len(v[i].lib.Textp) == 0 && len(v[j].lib.Textp) == 0:
+ return v[i].lib.Pkg < v[j].lib.Pkg
+ case len(v[i].lib.Textp) != 0 && len(v[j].lib.Textp) == 0:
+ return true
+ case len(v[i].lib.Textp) == 0 && len(v[j].lib.Textp) != 0:
+ return false
+ default:
+ return v[i].lib.Textp[0].Value < v[j].lib.Textp[0].Value
+ }
}
diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go
index 157bebbb41..5d2aadf589 100644
--- a/src/cmd/link/internal/ld/dwarf_test.go
+++ b/src/cmd/link/internal/ld/dwarf_test.go
@@ -948,3 +948,117 @@ func main() {
t.Errorf("DWARF type offset was %#x+%#x, but test program said %#x", rtAttr.(uint64), types.Addr, addr)
}
}
+
+func TestIssue27614(t *testing.T) {
+ // Type references in debug_info should always use the DW_TAG_typedef_type
+ // for the type, when that's generated.
+
+ testenv.MustHaveGoBuild(t)
+
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+
+ dir, err := ioutil.TempDir("", "go-build")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ const prog = `package main
+
+import "fmt"
+
+type astruct struct {
+ X int
+}
+
+type bstruct struct {
+ X float32
+}
+
+var globalptr *astruct
+var globalvar astruct
+var bvar0, bvar1, bvar2 bstruct
+
+func main() {
+ fmt.Println(globalptr, globalvar, bvar0, bvar1, bvar2)
+}
+`
+
+ f := gobuild(t, dir, prog, NoOpt)
+
+ defer f.Close()
+
+ data, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rdr := data.Reader()
+
+ var astructTypeDIE, bstructTypeDIE, ptrastructTypeDIE *dwarf.Entry
+ var globalptrDIE, globalvarDIE *dwarf.Entry
+ var bvarDIE [3]*dwarf.Entry
+
+ for {
+ e, err := rdr.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if e == nil {
+ break
+ }
+
+ name, _ := e.Val(dwarf.AttrName).(string)
+
+ switch e.Tag {
+ case dwarf.TagTypedef:
+ switch name {
+ case "main.astruct":
+ astructTypeDIE = e
+ case "main.bstruct":
+ bstructTypeDIE = e
+ }
+ case dwarf.TagPointerType:
+ if name == "*main.astruct" {
+ ptrastructTypeDIE = e
+ }
+ case dwarf.TagVariable:
+ switch name {
+ case "main.globalptr":
+ globalptrDIE = e
+ case "main.globalvar":
+ globalvarDIE = e
+ default:
+ const bvarprefix = "main.bvar"
+ if strings.HasPrefix(name, bvarprefix) {
+ i, _ := strconv.Atoi(name[len(bvarprefix):])
+ bvarDIE[i] = e
+ }
+ }
+ }
+ }
+
+ typedieof := func(e *dwarf.Entry) dwarf.Offset {
+ return e.Val(dwarf.AttrType).(dwarf.Offset)
+ }
+
+ if off := typedieof(ptrastructTypeDIE); off != astructTypeDIE.Offset {
+ t.Errorf("type attribute of *main.astruct references %#x, not main.astruct DIE at %#x\n", off, astructTypeDIE.Offset)
+ }
+
+ if off := typedieof(globalptrDIE); off != ptrastructTypeDIE.Offset {
+ t.Errorf("type attribute of main.globalptr references %#x, not *main.astruct DIE at %#x\n", off, ptrastructTypeDIE.Offset)
+ }
+
+ if off := typedieof(globalvarDIE); off != astructTypeDIE.Offset {
+ t.Errorf("type attribute of main.globalvar1 references %#x, not main.astruct DIE at %#x\n", off, astructTypeDIE.Offset)
+ }
+
+ for i := range bvarDIE {
+ if off := typedieof(bvarDIE[i]); off != bstructTypeDIE.Offset {
+ t.Errorf("type attribute of main.bvar%d references %#x, not main.bstruct DIE at %#x\n", i, off, bstructTypeDIE.Offset)
+ }
+ }
+}
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 919fa08f21..e911d7bf08 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -1380,9 +1380,58 @@ func linkerFlagSupported(linker, flag string) bool {
}
})
+ flagsWithNextArgSkip := []string{
+ "-F",
+ "-l",
+ "-L",
+ "-framework",
+ "-Wl,-framework",
+ "-Wl,-rpath",
+ "-Wl,-undefined",
+ }
+ flagsWithNextArgKeep := []string{
+ "-arch",
+ "-isysroot",
+ "--sysroot",
+ "-target",
+ }
+ prefixesToKeep := []string{
+ "-f",
+ "-m",
+ "-p",
+ "-Wl,",
+ "-arch",
+ "-isysroot",
+ "--sysroot",
+ "-target",
+ }
+
var flags []string
- flags = append(flags, ldflag...)
- flags = append(flags, strings.Fields(*flagExtldflags)...)
+ keep := false
+ skip := false
+ extldflags := strings.Fields(*flagExtldflags)
+ for _, f := range append(extldflags, ldflag...) {
+ if keep {
+ flags = append(flags, f)
+ keep = false
+ } else if skip {
+ skip = false
+ } else if f == "" || f[0] != '-' {
+ } else if contains(flagsWithNextArgSkip, f) {
+ skip = true
+ } else if contains(flagsWithNextArgKeep, f) {
+ flags = append(flags, f)
+ keep = true
+ } else {
+ for _, p := range prefixesToKeep {
+ if strings.HasPrefix(f, p) {
+ flags = append(flags, f)
+ break
+ }
+ }
+ }
+ }
+
flags = append(flags, flag, "trivial.c")
cmd := exec.Command(linker, flags...)
@@ -1758,26 +1807,6 @@ func addsection(arch *sys.Arch, seg *sym.Segment, name string, rwx int) *sym.Sec
return sect
}
-func Le16(b []byte) uint16 {
- return uint16(b[0]) | uint16(b[1])<<8
-}
-
-func Le32(b []byte) uint32 {
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func Le64(b []byte) uint64 {
- return uint64(Le32(b)) | uint64(Le32(b[4:]))<<32
-}
-
-func Be16(b []byte) uint16 {
- return uint16(b[0])<<8 | uint16(b[1])
-}
-
-func Be32(b []byte) uint32 {
- return uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
-}
-
type chain struct {
sym *sym.Symbol
up *chain
diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go
index 7b7f7068e7..24398fcc87 100644
--- a/src/cmd/link/internal/ld/pcln.go
+++ b/src/cmd/link/internal/ld/pcln.go
@@ -312,45 +312,19 @@ func (ctxt *Link) pclntab() {
}
off = int32(ftab.SetUint32(ctxt.Arch, int64(off), args))
- // funcID uint32
- funcID := objabi.FuncID_normal
- switch s.Name {
- case "runtime.main":
- funcID = objabi.FuncID_runtime_main
- case "runtime.goexit":
- funcID = objabi.FuncID_goexit
- case "runtime.jmpdefer":
- funcID = objabi.FuncID_jmpdefer
- case "runtime.mcall":
- funcID = objabi.FuncID_mcall
- case "runtime.morestack":
- funcID = objabi.FuncID_morestack
- case "runtime.mstart":
- funcID = objabi.FuncID_mstart
- case "runtime.rt0_go":
- funcID = objabi.FuncID_rt0_go
- case "runtime.asmcgocall":
- funcID = objabi.FuncID_asmcgocall
- case "runtime.sigpanic":
- funcID = objabi.FuncID_sigpanic
- case "runtime.runfinq":
- funcID = objabi.FuncID_runfinq
- case "runtime.gcBgMarkWorker":
- funcID = objabi.FuncID_gcBgMarkWorker
- case "runtime.systemstack_switch":
- funcID = objabi.FuncID_systemstack_switch
- case "runtime.systemstack":
- funcID = objabi.FuncID_systemstack
- case "runtime.cgocallback_gofunc":
- funcID = objabi.FuncID_cgocallback_gofunc
- case "runtime.gogo":
- funcID = objabi.FuncID_gogo
- case "runtime.externalthreadhandler":
- funcID = objabi.FuncID_externalthreadhandler
- case "runtime.debugCallV1":
- funcID = objabi.FuncID_debugCallV1
+ // deferreturn
+ deferreturn := uint32(0)
+ for _, r := range s.R {
+ if r.Sym != nil && r.Sym.Name == "runtime.deferreturn" && r.Add == 0 {
+ // Note: the relocation target is in the call instruction, but
+ // is not necessarily the whole instruction (for instance, on
+ // x86 the relocation applies to bytes [1:5] of the 5 byte call
+ // instruction).
+ deferreturn = uint32(r.Off)
+ break // only need one
+ }
}
- off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(funcID)))
+ off = int32(ftab.SetUint32(ctxt.Arch, int64(off), deferreturn))
if pcln != &pclntabZpcln {
renumberfiles(ctxt, pcln.File, &pcln.Pcfile)
@@ -396,7 +370,52 @@ func (ctxt *Link) pclntab() {
off = addpctab(ctxt, ftab, off, &pcln.Pcfile)
off = addpctab(ctxt, ftab, off, &pcln.Pcline)
off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(len(pcln.Pcdata))))
- off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(len(pcln.Funcdata))))
+
+ // funcID uint8
+ funcID := objabi.FuncID_normal
+ switch s.Name {
+ case "runtime.main":
+ funcID = objabi.FuncID_runtime_main
+ case "runtime.goexit":
+ funcID = objabi.FuncID_goexit
+ case "runtime.jmpdefer":
+ funcID = objabi.FuncID_jmpdefer
+ case "runtime.mcall":
+ funcID = objabi.FuncID_mcall
+ case "runtime.morestack":
+ funcID = objabi.FuncID_morestack
+ case "runtime.mstart":
+ funcID = objabi.FuncID_mstart
+ case "runtime.rt0_go":
+ funcID = objabi.FuncID_rt0_go
+ case "runtime.asmcgocall":
+ funcID = objabi.FuncID_asmcgocall
+ case "runtime.sigpanic":
+ funcID = objabi.FuncID_sigpanic
+ case "runtime.runfinq":
+ funcID = objabi.FuncID_runfinq
+ case "runtime.gcBgMarkWorker":
+ funcID = objabi.FuncID_gcBgMarkWorker
+ case "runtime.systemstack_switch":
+ funcID = objabi.FuncID_systemstack_switch
+ case "runtime.systemstack":
+ funcID = objabi.FuncID_systemstack
+ case "runtime.cgocallback_gofunc":
+ funcID = objabi.FuncID_cgocallback_gofunc
+ case "runtime.gogo":
+ funcID = objabi.FuncID_gogo
+ case "runtime.externalthreadhandler":
+ funcID = objabi.FuncID_externalthreadhandler
+ case "runtime.debugCallV1":
+ funcID = objabi.FuncID_debugCallV1
+ }
+ off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(funcID)))
+
+ // unused
+ off += 2
+
+ // nfuncdata must be the final entry.
+ off = int32(ftab.SetUint8(ctxt.Arch, int64(off), uint8(len(pcln.Funcdata))))
for i := range pcln.Pcdata {
off = addpctab(ctxt, ftab, off, &pcln.Pcdata[i])
}
diff --git a/src/cmd/link/internal/ld/util.go b/src/cmd/link/internal/ld/util.go
index b80e6106ba..b5b02296a1 100644
--- a/src/cmd/link/internal/ld/util.go
+++ b/src/cmd/link/internal/ld/util.go
@@ -89,3 +89,13 @@ var start = time.Now()
func elapsed() float64 {
return time.Since(start).Seconds()
}
+
+// contains reports whether v is in s.
+func contains(s []string, v string) bool {
+ for _, x := range s {
+ if x == v {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/link/internal/objfile/objfile.go b/src/cmd/link/internal/objfile/objfile.go
index e3800de304..3a8923b073 100644
--- a/src/cmd/link/internal/objfile/objfile.go
+++ b/src/cmd/link/internal/objfile/objfile.go
@@ -203,6 +203,7 @@ func (r *objReader) readSym() {
overwrite:
s.File = pkg
+ s.Lib = r.lib
if dupok {
s.Attr |= sym.AttrDuplicateOK
}
@@ -320,7 +321,6 @@ overwrite:
s.FuncInfo.IsStmtSym = r.syms.Lookup(dwarf.IsStmtPrefix+s.Name, int(s.Version))
- s.Lib = r.lib
if !dupok {
if s.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s.Name)
diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go
index 9445fbebcb..3e833b686e 100644
--- a/src/cmd/link/internal/ppc64/asm.go
+++ b/src/cmd/link/internal/ppc64/asm.go
@@ -716,9 +716,9 @@ func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64
// overflow depends on the instruction
var o1 uint32
if ctxt.Arch.ByteOrder == binary.BigEndian {
- o1 = ld.Be32(s.P[r.Off-2:])
+ o1 = binary.BigEndian.Uint32(s.P[r.Off-2:])
} else {
- o1 = ld.Le32(s.P[r.Off:])
+ o1 = binary.LittleEndian.Uint32(s.P[r.Off:])
}
switch o1 >> 26 {
case 24, // ori
@@ -750,9 +750,9 @@ func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64
// overflow depends on the instruction
var o1 uint32
if ctxt.Arch.ByteOrder == binary.BigEndian {
- o1 = ld.Be32(s.P[r.Off-2:])
+ o1 = binary.BigEndian.Uint32(s.P[r.Off-2:])
} else {
- o1 = ld.Le32(s.P[r.Off:])
+ o1 = binary.LittleEndian.Uint32(s.P[r.Off:])
}
switch o1 >> 26 {
case 25, // oris
@@ -774,9 +774,9 @@ func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64
case sym.RV_POWER_DS:
var o1 uint32
if ctxt.Arch.ByteOrder == binary.BigEndian {
- o1 = uint32(ld.Be16(s.P[r.Off:]))
+ o1 = uint32(binary.BigEndian.Uint16(s.P[r.Off:]))
} else {
- o1 = uint32(ld.Le16(s.P[r.Off:]))
+ o1 = uint32(binary.LittleEndian.Uint16(s.P[r.Off:]))
}
if t&3 != 0 {
ld.Errorf(s, "relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t)
diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go
index d986b71f79..07fc4333eb 100644
--- a/src/cmd/trace/trace.go
+++ b/src/cmd/trace/trace.go
@@ -38,7 +38,7 @@ func httpTrace(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
- html := strings.Replace(templTrace, "{{PARAMS}}", r.Form.Encode(), -1)
+ html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode())
w.Write([]byte(html))
}
@@ -685,13 +685,14 @@ func generateTrace(params *traceParams, consumer traceConsumer) error {
}
ctx.emitSlice(&fakeMarkStart, text)
case trace.EvGCSweepStart:
- slice := ctx.emitSlice(ev, "SWEEP")
+ slice := ctx.makeSlice(ev, "SWEEP")
if done := ev.Link; done != nil && done.Args[0] != 0 {
slice.Arg = struct {
Swept uint64 `json:"Swept bytes"`
Reclaimed uint64 `json:"Reclaimed bytes"`
}{done.Args[0], done.Args[1]}
}
+ ctx.emit(slice)
case trace.EvGoStart, trace.EvGoStartLabel:
info := getGInfo(ev.G)
if ev.Type == trace.EvGoStartLabel {
@@ -846,7 +847,11 @@ func (ctx *traceContext) proc(ev *trace.Event) uint64 {
}
}
-func (ctx *traceContext) emitSlice(ev *trace.Event, name string) *ViewerEvent {
+func (ctx *traceContext) emitSlice(ev *trace.Event, name string) {
+ ctx.emit(ctx.makeSlice(ev, name))
+}
+
+func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *ViewerEvent {
// If ViewerEvent.Dur is not a positive value,
// trace viewer handles it as a non-terminating time interval.
// Avoid it by setting the field with a small value.
@@ -885,7 +890,6 @@ func (ctx *traceContext) emitSlice(ev *trace.Event, name string) *ViewerEvent {
sl.Cname = colorLightGrey
}
}
- ctx.emit(sl)
return sl
}
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/aliases.go b/src/cmd/vendor/golang.org/x/sys/windows/aliases.go
new file mode 100644
index 0000000000..af3af60db9
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/sys/windows/aliases.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+// +build go1.9
+
+package windows
+
+import "syscall"
+
+type Errno = syscall.Errno
+type SysProcAttr = syscall.SysProcAttr
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/asm_windows_arm.s b/src/cmd/vendor/golang.org/x/sys/windows/asm_windows_arm.s
new file mode 100644
index 0000000000..55d8b91a28
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/sys/windows/asm_windows_arm.s
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·getprocaddress(SB),NOSPLIT,$0
+ B syscall·getprocaddress(SB)
+
+TEXT ·loadlibrary(SB),NOSPLIT,$0
+ B syscall·loadlibrary(SB)
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
index ceebdd7726..3778075da0 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -1,4 +1,4 @@
-// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
+// Code generated by 'go generate'; DO NOT EDIT.
package registry
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/security_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/security_windows.go
index f1ec5dc4ee..4f17a3331f 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/security_windows.go
@@ -296,6 +296,7 @@ const (
TOKEN_ADJUST_PRIVILEGES
TOKEN_ADJUST_GROUPS
TOKEN_ADJUST_DEFAULT
+ TOKEN_ADJUST_SESSIONID
TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
@@ -305,7 +306,8 @@ const (
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
- TOKEN_ADJUST_DEFAULT
+ TOKEN_ADJUST_DEFAULT |
+ TOKEN_ADJUST_SESSIONID
TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY
TOKEN_WRITE = STANDARD_RIGHTS_WRITE |
TOKEN_ADJUST_PRIVILEGES |
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/service.go b/src/cmd/vendor/golang.org/x/sys/windows/service.go
index 24aa90bbbe..62fc31b40b 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/service.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/service.go
@@ -43,6 +43,11 @@ const (
SC_STATUS_PROCESS_INFO = 0
+ SC_ACTION_NONE = 0
+ SC_ACTION_RESTART = 1
+ SC_ACTION_REBOOT = 2
+ SC_ACTION_RUN_COMMAND = 3
+
SERVICE_STOPPED = 1
SERVICE_START_PENDING = 2
SERVICE_STOP_PENDING = 3
@@ -148,6 +153,19 @@ type ENUM_SERVICE_STATUS_PROCESS struct {
ServiceStatusProcess SERVICE_STATUS_PROCESS
}
+type SERVICE_FAILURE_ACTIONS struct {
+ ResetPeriod uint32
+ RebootMsg *uint16
+ Command *uint16
+ ActionsCount uint32
+ Actions *SC_ACTION
+}
+
+type SC_ACTION struct {
+ Type uint32
+ Delay uint32
+}
+
//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle
//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW
//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/config.go b/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/config.go
index 03bf41f516..d804e31f1f 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/config.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/config.go
@@ -88,23 +88,11 @@ func (s *Service) Config() (Config, error) {
}
}
- var p2 *windows.SERVICE_DESCRIPTION
- n = uint32(1024)
- for {
- b := make([]byte, n)
- p2 = (*windows.SERVICE_DESCRIPTION)(unsafe.Pointer(&b[0]))
- err := windows.QueryServiceConfig2(s.Handle,
- windows.SERVICE_CONFIG_DESCRIPTION, &b[0], n, &n)
- if err == nil {
- break
- }
- if err.(syscall.Errno) != syscall.ERROR_INSUFFICIENT_BUFFER {
- return Config{}, err
- }
- if n <= uint32(len(b)) {
- return Config{}, err
- }
+ b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_DESCRIPTION)
+ if err != nil {
+ return Config{}, err
}
+ p2 := (*windows.SERVICE_DESCRIPTION)(unsafe.Pointer(&b[0]))
return Config{
ServiceType: p.ServiceType,
@@ -137,3 +125,21 @@ func (s *Service) UpdateConfig(c Config) error {
}
return updateDescription(s.Handle, c.Description)
}
+
+// queryServiceConfig2 calls Windows QueryServiceConfig2 with infoLevel parameter and returns retrieved service configuration information.
+func (s *Service) queryServiceConfig2(infoLevel uint32) ([]byte, error) {
+ n := uint32(1024)
+ for {
+ b := make([]byte, n)
+ err := windows.QueryServiceConfig2(s.Handle, infoLevel, &b[0], n, &n)
+ if err == nil {
+ return b, nil
+ }
+ if err.(syscall.Errno) != syscall.ERROR_INSUFFICIENT_BUFFER {
+ return nil, err
+ }
+ if n <= uint32(len(b)) {
+ return nil, err
+ }
+ }
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go b/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go
index 1569a22177..9171f5bcf1 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go
@@ -95,6 +95,113 @@ func testConfig(t *testing.T, s *mgr.Service, should mgr.Config) mgr.Config {
return is
}
+func testRecoveryActions(t *testing.T, s *mgr.Service, should []mgr.RecoveryAction) {
+ is, err := s.RecoveryActions()
+ if err != nil {
+ t.Fatalf("RecoveryActions failed: %s", err)
+ }
+ if len(should) != len(is) {
+ t.Errorf("recovery action mismatch: contains %v actions, but should have %v", len(is), len(should))
+ }
+ for i, _ := range is {
+ if should[i].Type != is[i].Type {
+ t.Errorf("recovery action mismatch: Type is %v, but should have %v", is[i].Type, should[i].Type)
+ }
+ if should[i].Delay != is[i].Delay {
+ t.Errorf("recovery action mismatch: Delay is %v, but should have %v", is[i].Delay, should[i].Delay)
+ }
+ }
+}
+
+func testResetPeriod(t *testing.T, s *mgr.Service, should uint32) {
+ is, err := s.ResetPeriod()
+ if err != nil {
+ t.Fatalf("ResetPeriod failed: %s", err)
+ }
+ if should != is {
+ t.Errorf("reset period mismatch: reset period is %v, but should have %v", is, should)
+ }
+}
+
+func testSetRecoveryActions(t *testing.T, s *mgr.Service) {
+ r := []mgr.RecoveryAction{
+ mgr.RecoveryAction{
+ Type: mgr.NoAction,
+ Delay: 60000 * time.Millisecond,
+ },
+ mgr.RecoveryAction{
+ Type: mgr.ServiceRestart,
+ Delay: 4 * time.Minute,
+ },
+ mgr.RecoveryAction{
+ Type: mgr.ServiceRestart,
+ Delay: time.Minute,
+ },
+ mgr.RecoveryAction{
+ Type: mgr.RunCommand,
+ Delay: 4000 * time.Millisecond,
+ },
+ }
+
+ // 4 recovery actions with reset period
+ err := s.SetRecoveryActions(r, uint32(10000))
+ if err != nil {
+ t.Fatalf("SetRecoveryActions failed: %v", err)
+ }
+ testRecoveryActions(t, s, r)
+ testResetPeriod(t, s, uint32(10000))
+
+ // Infinite reset period
+ err = s.SetRecoveryActions(r, syscall.INFINITE)
+ if err != nil {
+ t.Fatalf("SetRecoveryActions failed: %v", err)
+ }
+ testRecoveryActions(t, s, r)
+ testResetPeriod(t, s, syscall.INFINITE)
+
+ // nil recovery actions
+ err = s.SetRecoveryActions(nil, 0)
+ if err.Error() != "recoveryActions cannot be nil" {
+ t.Fatalf("SetRecoveryActions failed with unexpected error message of %q", err)
+ }
+
+ // Delete all recovery actions and reset period
+ err = s.ResetRecoveryActions()
+ if err != nil {
+ t.Fatalf("ResetRecoveryActions failed: %v", err)
+ }
+ testRecoveryActions(t, s, nil)
+ testResetPeriod(t, s, 0)
+}
+
+func testRebootMessage(t *testing.T, s *mgr.Service, should string) {
+ err := s.SetRebootMessage(should)
+ if err != nil {
+ t.Fatalf("SetRebootMessage failed: %v", err)
+ }
+ is, err := s.RebootMessage()
+ if err != nil {
+ t.Fatalf("RebootMessage failed: %v", err)
+ }
+ if should != is {
+ t.Errorf("reboot message mismatch: message is %q, but should have %q", is, should)
+ }
+}
+
+func testRecoveryCommand(t *testing.T, s *mgr.Service, should string) {
+ err := s.SetRecoveryCommand(should)
+ if err != nil {
+ t.Fatalf("SetRecoveryCommand failed: %v", err)
+ }
+ is, err := s.RecoveryCommand()
+ if err != nil {
+ t.Fatalf("RecoveryCommand failed: %v", err)
+ }
+ if should != is {
+ t.Errorf("recovery command mismatch: command is %q, but should have %q", is, should)
+ }
+}
+
func remove(t *testing.T, s *mgr.Service) {
err := s.Delete()
if err != nil {
@@ -165,5 +272,11 @@ func TestMyService(t *testing.T) {
t.Errorf("ListServices failed to find %q service", name)
}
+ testSetRecoveryActions(t, s)
+ testRebootMessage(t, s, "myservice failed")
+ testRebootMessage(t, s, "") // delete reboot message
+ testRecoveryCommand(t, s, "sc query myservice")
+ testRecoveryCommand(t, s, "") // delete recovery command
+
remove(t, s)
}
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go b/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go
new file mode 100644
index 0000000000..71ce2b8199
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/sys/windows/svc/mgr/recovery.go
@@ -0,0 +1,135 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package mgr
+
+import (
+ "errors"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+const (
+ // Possible recovery actions that the service control manager can perform.
+ NoAction = windows.SC_ACTION_NONE // no action
+ ComputerReboot = windows.SC_ACTION_REBOOT // reboot the computer
+ ServiceRestart = windows.SC_ACTION_RESTART // restart the service
+ RunCommand = windows.SC_ACTION_RUN_COMMAND // run a command
+)
+
+// RecoveryAction represents an action that the service control manager can perform when service fails.
+// A service is considered failed when it terminates without reporting a status of SERVICE_STOPPED to the service controller.
+type RecoveryAction struct {
+ Type int // one of NoAction, ComputerReboot, ServiceRestart or RunCommand
+ Delay time.Duration // the time to wait before performing the specified action
+}
+
+// SetRecoveryActions sets actions that service controller performs when service fails and
+// the time after which to reset the service failure count to zero if there are no failures, in seconds.
+// Specify INFINITE to indicate that service failure count should never be reset.
+func (s *Service) SetRecoveryActions(recoveryActions []RecoveryAction, resetPeriod uint32) error {
+ if recoveryActions == nil {
+ return errors.New("recoveryActions cannot be nil")
+ }
+ actions := []windows.SC_ACTION{}
+ for _, a := range recoveryActions {
+ action := windows.SC_ACTION{
+ Type: uint32(a.Type),
+ Delay: uint32(a.Delay.Nanoseconds() / 1000000),
+ }
+ actions = append(actions, action)
+ }
+ rActions := windows.SERVICE_FAILURE_ACTIONS{
+ ActionsCount: uint32(len(actions)),
+ Actions: &actions[0],
+ ResetPeriod: resetPeriod,
+ }
+ return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions)))
+}
+
+// RecoveryActions returns actions that service controller performs when service fails.
+// The service control manager counts the number of times service s has failed since the system booted.
+// The count is reset to 0 if the service has not failed for ResetPeriod seconds.
+// When the service fails for the Nth time, the service controller performs the action specified in element [N-1] of returned slice.
+// If N is greater than slice length, the service controller repeats the last action in the slice.
+func (s *Service) RecoveryActions() ([]RecoveryAction, error) {
+ b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS)
+ if err != nil {
+ return nil, err
+ }
+ p := (*windows.SERVICE_FAILURE_ACTIONS)(unsafe.Pointer(&b[0]))
+ if p.Actions == nil {
+ return nil, err
+ }
+
+ var recoveryActions []RecoveryAction
+ actions := (*[1024]windows.SC_ACTION)(unsafe.Pointer(p.Actions))[:p.ActionsCount]
+ for _, action := range actions {
+ recoveryActions = append(recoveryActions, RecoveryAction{Type: int(action.Type), Delay: time.Duration(action.Delay) * time.Millisecond})
+ }
+ return recoveryActions, nil
+}
+
+// ResetRecoveryActions deletes both reset period and array of failure actions.
+func (s *Service) ResetRecoveryActions() error {
+ actions := make([]windows.SC_ACTION, 1)
+ rActions := windows.SERVICE_FAILURE_ACTIONS{
+ Actions: &actions[0],
+ }
+ return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions)))
+}
+
+// ResetPeriod is the time after which to reset the service failure
+// count to zero if there are no failures, in seconds.
+func (s *Service) ResetPeriod() (uint32, error) {
+ b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS)
+ if err != nil {
+ return 0, err
+ }
+ p := (*windows.SERVICE_FAILURE_ACTIONS)(unsafe.Pointer(&b[0]))
+ return p.ResetPeriod, nil
+}
+
+// SetRebootMessage sets service s reboot message.
+// If msg is "", the reboot message is deleted and no message is broadcast.
+func (s *Service) SetRebootMessage(msg string) error {
+ rActions := windows.SERVICE_FAILURE_ACTIONS{
+ RebootMsg: syscall.StringToUTF16Ptr(msg),
+ }
+ return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions)))
+}
+
+// RebootMessage is broadcast to server users before rebooting in response to the ComputerReboot service controller action.
+func (s *Service) RebootMessage() (string, error) {
+ b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS)
+ if err != nil {
+ return "", err
+ }
+ p := (*windows.SERVICE_FAILURE_ACTIONS)(unsafe.Pointer(&b[0]))
+ return toString(p.RebootMsg), nil
+}
+
+// SetRecoveryCommand sets the command line of the process to execute in response to the RunCommand service controller action.
+// If cmd is "", the command is deleted and no program is run when the service fails.
+func (s *Service) SetRecoveryCommand(cmd string) error {
+ rActions := windows.SERVICE_FAILURE_ACTIONS{
+ Command: syscall.StringToUTF16Ptr(cmd),
+ }
+ return windows.ChangeServiceConfig2(s.Handle, windows.SERVICE_CONFIG_FAILURE_ACTIONS, (*byte)(unsafe.Pointer(&rActions)))
+}
+
+// RecoveryCommand is the command line of the process to execute in response to the RunCommand service controller action. This process runs under the same account as the service.
+func (s *Service) RecoveryCommand() (string, error) {
+ b, err := s.queryServiceConfig2(windows.SERVICE_CONFIG_FAILURE_ACTIONS)
+ if err != nil {
+ return "", err
+ }
+ p := (*windows.SERVICE_FAILURE_ACTIONS)(unsafe.Pointer(&b[0]))
+ return toString(p.Command), nil
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/svc/sys_arm.s b/src/cmd/vendor/golang.org/x/sys/windows/svc/sys_arm.s
new file mode 100644
index 0000000000..33c692a8de
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/sys/windows/svc/sys_arm.s
@@ -0,0 +1,38 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+#include "textflag.h"
+
+// func servicemain(argc uint32, argv **uint16)
+TEXT ·servicemain(SB),NOSPLIT|NOFRAME,$0
+ MOVM.DB.W [R4, R14], (R13) // push {r4, lr}
+ MOVW R13, R4
+ BIC $0x7, R13 // alignment for ABI
+
+ MOVW R0, ·sArgc(SB)
+ MOVW R1, ·sArgv(SB)
+
+ MOVW ·sName(SB), R0
+ MOVW ·ctlHandlerExProc(SB), R1
+ MOVW $0, R2
+ MOVW ·cRegisterServiceCtrlHandlerExW(SB), R3
+ BL (R3)
+ CMP $0, R0
+ BEQ exit
+ MOVW R0, ·ssHandle(SB)
+
+ MOVW ·goWaitsH(SB), R0
+ MOVW ·cSetEvent(SB), R1
+ BL (R1)
+
+ MOVW ·cWaitsH(SB), R0
+ MOVW $-1, R1
+ MOVW ·cWaitForSingleObject(SB), R2
+ BL (R2)
+
+exit:
+ MOVW R4, R13 // free extra stack space
+ MOVM.IA.W (R13), [R4, R15] // pop {r4, pc}
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
index 1e9f4bb4a3..8a00b71f1d 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -112,12 +112,14 @@ func Getpagesize() int { return 4096 }
// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
// This is useful when interoperating with Windows code requiring callbacks.
+// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
func NewCallback(fn interface{}) uintptr {
return syscall.NewCallback(fn)
}
// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention.
// This is useful when interoperating with Windows code requiring callbacks.
+// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
func NewCallbackCDecl(fn interface{}) uintptr {
return syscall.NewCallbackCDecl(fn)
}
@@ -653,7 +655,7 @@ type RawSockaddr struct {
type RawSockaddrAny struct {
Addr RawSockaddr
- Pad [96]int8
+ Pad [100]int8
}
type Sockaddr interface {
@@ -702,19 +704,69 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) {
return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil
}
+type RawSockaddrUnix struct {
+ Family uint16
+ Path [UNIX_PATH_MAX]int8
+}
+
type SockaddrUnix struct {
Name string
+ raw RawSockaddrUnix
}
func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) {
- // TODO(brainman): implement SockaddrUnix.sockaddr()
- return nil, 0, syscall.EWINDOWS
+ name := sa.Name
+ n := len(name)
+ if n > len(sa.raw.Path) {
+ return nil, 0, syscall.EINVAL
+ }
+ if n == len(sa.raw.Path) && name[0] != '@' {
+ return nil, 0, syscall.EINVAL
+ }
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ // length is family (uint16), name, NUL.
+ sl := int32(2)
+ if n > 0 {
+ sl += int32(n) + 1
+ }
+ if sa.raw.Path[0] == '@' {
+ sa.raw.Path[0] = 0
+ // Don't count trailing NUL for abstract address.
+ sl--
+ }
+
+ return unsafe.Pointer(&sa.raw), sl, nil
}
func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) {
switch rsa.Addr.Family {
case AF_UNIX:
- return nil, syscall.EWINDOWS
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ sa := new(SockaddrUnix)
+ if pp.Path[0] == 0 {
+ // "Abstract" Unix domain socket.
+ // Rewrite leading NUL as @ for textual display.
+ // (This is the standard convention.)
+ // Not friendly to overwrite in place,
+ // but the callers below don't care.
+ pp.Path[0] = '@'
+ }
+
+ // Assume path ends at NUL.
+ // This is not technically the Linux semantics for
+ // abstract Unix domain sockets--they are supposed
+ // to be uninterpreted fixed-size binary blobs--but
+ // everyone uses this convention.
+ n := 0
+ for n < len(pp.Path) && pp.Path[n] != 0 {
+ n++
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
case AF_INET:
pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows_test.go b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows_test.go
index 9c7133cc41..0e27464e8c 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows_test.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows_test.go
@@ -105,3 +105,9 @@ func ExampleLoadLibrary() {
build := uint16(r >> 16)
print("windows version ", major, ".", minor, " (Build ", build, ")\n")
}
+
+func TestTOKEN_ALL_ACCESS(t *testing.T) {
+ if windows.TOKEN_ALL_ACCESS != 0xF01FF {
+ t.Errorf("TOKEN_ALL_ACCESS = %x, want 0xF01FF", windows.TOKEN_ALL_ACCESS)
+ }
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
index 52c2037b68..141ca81bd7 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go
@@ -94,16 +94,29 @@ const (
FILE_APPEND_DATA = 0x00000004
FILE_WRITE_ATTRIBUTES = 0x00000100
- FILE_SHARE_READ = 0x00000001
- FILE_SHARE_WRITE = 0x00000002
- FILE_SHARE_DELETE = 0x00000004
- FILE_ATTRIBUTE_READONLY = 0x00000001
- FILE_ATTRIBUTE_HIDDEN = 0x00000002
- FILE_ATTRIBUTE_SYSTEM = 0x00000004
- FILE_ATTRIBUTE_DIRECTORY = 0x00000010
- FILE_ATTRIBUTE_ARCHIVE = 0x00000020
- FILE_ATTRIBUTE_NORMAL = 0x00000080
- FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
+ FILE_SHARE_READ = 0x00000001
+ FILE_SHARE_WRITE = 0x00000002
+ FILE_SHARE_DELETE = 0x00000004
+
+ FILE_ATTRIBUTE_READONLY = 0x00000001
+ FILE_ATTRIBUTE_HIDDEN = 0x00000002
+ FILE_ATTRIBUTE_SYSTEM = 0x00000004
+ FILE_ATTRIBUTE_DIRECTORY = 0x00000010
+ FILE_ATTRIBUTE_ARCHIVE = 0x00000020
+ FILE_ATTRIBUTE_DEVICE = 0x00000040
+ FILE_ATTRIBUTE_NORMAL = 0x00000080
+ FILE_ATTRIBUTE_TEMPORARY = 0x00000100
+ FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200
+ FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400
+ FILE_ATTRIBUTE_COMPRESSED = 0x00000800
+ FILE_ATTRIBUTE_OFFLINE = 0x00001000
+ FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000
+ FILE_ATTRIBUTE_ENCRYPTED = 0x00004000
+ FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000
+ FILE_ATTRIBUTE_VIRTUAL = 0x00010000
+ FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000
+ FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000
+ FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000
INVALID_FILE_ATTRIBUTES = 0xffffffff
@@ -257,15 +270,87 @@ const (
USAGE_MATCH_TYPE_AND = 0
USAGE_MATCH_TYPE_OR = 1
+ /* msgAndCertEncodingType values for CertOpenStore function */
X509_ASN_ENCODING = 0x00000001
PKCS_7_ASN_ENCODING = 0x00010000
- CERT_STORE_PROV_MEMORY = 2
-
- CERT_STORE_ADD_ALWAYS = 4
+ /* storeProvider values for CertOpenStore function */
+ CERT_STORE_PROV_MSG = 1
+ CERT_STORE_PROV_MEMORY = 2
+ CERT_STORE_PROV_FILE = 3
+ CERT_STORE_PROV_REG = 4
+ CERT_STORE_PROV_PKCS7 = 5
+ CERT_STORE_PROV_SERIALIZED = 6
+ CERT_STORE_PROV_FILENAME_A = 7
+ CERT_STORE_PROV_FILENAME_W = 8
+ CERT_STORE_PROV_FILENAME = CERT_STORE_PROV_FILENAME_W
+ CERT_STORE_PROV_SYSTEM_A = 9
+ CERT_STORE_PROV_SYSTEM_W = 10
+ CERT_STORE_PROV_SYSTEM = CERT_STORE_PROV_SYSTEM_W
+ CERT_STORE_PROV_COLLECTION = 11
+ CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12
+ CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13
+ CERT_STORE_PROV_SYSTEM_REGISTRY = CERT_STORE_PROV_SYSTEM_REGISTRY_W
+ CERT_STORE_PROV_PHYSICAL_W = 14
+ CERT_STORE_PROV_PHYSICAL = CERT_STORE_PROV_PHYSICAL_W
+ CERT_STORE_PROV_SMART_CARD_W = 15
+ CERT_STORE_PROV_SMART_CARD = CERT_STORE_PROV_SMART_CARD_W
+ CERT_STORE_PROV_LDAP_W = 16
+ CERT_STORE_PROV_LDAP = CERT_STORE_PROV_LDAP_W
+ CERT_STORE_PROV_PKCS12 = 17
+ /* store characteristics (low WORD of flag) for CertOpenStore function */
+ CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001
+ CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002
CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004
+ CERT_STORE_DELETE_FLAG = 0x00000010
+ CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020
+ CERT_STORE_SHARE_STORE_FLAG = 0x00000040
+ CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080
+ CERT_STORE_MANIFOLD_FLAG = 0x00000100
+ CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200
+ CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400
+ CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800
+ CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000
+ CERT_STORE_CREATE_NEW_FLAG = 0x00002000
+ CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000
+ CERT_STORE_READONLY_FLAG = 0x00008000
+
+ /* store locations (high WORD of flag) for CertOpenStore function */
+ CERT_SYSTEM_STORE_CURRENT_USER = 0x00010000
+ CERT_SYSTEM_STORE_LOCAL_MACHINE = 0x00020000
+ CERT_SYSTEM_STORE_CURRENT_SERVICE = 0x00040000
+ CERT_SYSTEM_STORE_SERVICES = 0x00050000
+ CERT_SYSTEM_STORE_USERS = 0x00060000
+ CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = 0x00070000
+ CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000
+ CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = 0x00090000
+ CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000
+ CERT_SYSTEM_STORE_RELOCATE_FLAG = 0x80000000
+
+ /* Miscellaneous high-WORD flags for CertOpenStore function */
+ CERT_REGISTRY_STORE_REMOTE_FLAG = 0x00010000
+ CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x00020000
+ CERT_REGISTRY_STORE_ROAMING_FLAG = 0x00040000
+ CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000
+ CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000
+ CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = 0x80000000
+ CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x00010000
+ CERT_LDAP_STORE_SIGN_FLAG = 0x00010000
+ CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x00020000
+ CERT_LDAP_STORE_OPENED_FLAG = 0x00040000
+ CERT_LDAP_STORE_UNBIND_FLAG = 0x00080000
+ /* addDisposition values for CertAddCertificateContextToStore function */
+ CERT_STORE_ADD_NEW = 1
+ CERT_STORE_ADD_USE_EXISTING = 2
+ CERT_STORE_ADD_REPLACE_EXISTING = 3
+ CERT_STORE_ADD_ALWAYS = 4
+ CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5
+ CERT_STORE_ADD_NEWER = 6
+ CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7
+
+ /* ErrorStatus values for CertTrustStatus struct */
CERT_TRUST_NO_ERROR = 0x00000000
CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001
CERT_TRUST_IS_REVOKED = 0x00000004
@@ -282,11 +367,31 @@ const (
CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000
CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000
CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000
+ CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000
+ CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000
+ CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000
+ CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000
+ CERT_TRUST_HAS_WEAK_SIGNATURE = 0x00100000
CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000
CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000
CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000
CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000
+ /* InfoStatus values for CertTrustStatus struct */
+ CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001
+ CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002
+ CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004
+ CERT_TRUST_IS_SELF_SIGNED = 0x00000008
+ CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100
+ CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000400
+ CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400
+ CERT_TRUST_IS_PEER_TRUSTED = 0x00000800
+ CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED = 0x00001000
+ CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000
+ CERT_TRUST_IS_CA_TRUSTED = 0x00004000
+ CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000
+
+ /* policyOID values for CertVerifyCertificateChainPolicy function */
CERT_CHAIN_POLICY_BASE = 1
CERT_CHAIN_POLICY_AUTHENTICODE = 2
CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3
@@ -295,6 +400,7 @@ const (
CERT_CHAIN_POLICY_NT_AUTH = 6
CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7
CERT_CHAIN_POLICY_EV = 8
+ CERT_CHAIN_POLICY_SSL_F12 = 9
CERT_E_EXPIRED = 0x800B0101
CERT_E_ROLE = 0x800B0103
@@ -302,8 +408,16 @@ const (
CERT_E_UNTRUSTEDROOT = 0x800B0109
CERT_E_CN_NO_MATCH = 0x800B010F
+ /* AuthType values for SSLExtraCertChainPolicyPara struct */
AUTHTYPE_CLIENT = 1
AUTHTYPE_SERVER = 2
+
+ /* Checks values for SSLExtraCertChainPolicyPara struct */
+ SECURITY_FLAG_IGNORE_REVOCATION = 0x00000080
+ SECURITY_FLAG_IGNORE_UNKNOWN_CA = 0x00000100
+ SECURITY_FLAG_IGNORE_WRONG_USAGE = 0x00000200
+ SECURITY_FLAG_IGNORE_CERT_CN_INVALID = 0x00001000
+ SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000
)
var (
@@ -312,6 +426,14 @@ var (
OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00")
)
+// Pointer represents a pointer to an arbitrary Windows type.
+//
+// Pointer-typed fields may point to one of many different types. It's
+// up to the caller to provide a pointer to the appropriate type, cast
+// to Pointer. The caller must obey the unsafe.Pointer rules while
+// doing so.
+type Pointer *struct{}
+
// Invented values to support what package os expects.
type Timeval struct {
Sec int32
@@ -880,11 +1002,15 @@ type MibIfRow struct {
Descr [MAXLEN_IFDESCR]byte
}
+type CertInfo struct {
+ // Not implemented
+}
+
type CertContext struct {
EncodingType uint32
EncodedCert *byte
Length uint32
- CertInfo uintptr
+ CertInfo *CertInfo
Store Handle
}
@@ -899,12 +1025,16 @@ type CertChainContext struct {
RevocationFreshnessTime uint32
}
+type CertTrustListInfo struct {
+ // Not implemented
+}
+
type CertSimpleChain struct {
Size uint32
TrustStatus CertTrustStatus
NumElements uint32
Elements **CertChainElement
- TrustListInfo uintptr
+ TrustListInfo *CertTrustListInfo
HasRevocationFreshnessTime uint32
RevocationFreshnessTime uint32
}
@@ -919,14 +1049,18 @@ type CertChainElement struct {
ExtendedErrorInfo *uint16
}
+type CertRevocationCrlInfo struct {
+ // Not implemented
+}
+
type CertRevocationInfo struct {
Size uint32
RevocationResult uint32
RevocationOid *byte
- OidSpecificInfo uintptr
+ OidSpecificInfo Pointer
HasFreshnessTime uint32
FreshnessTime uint32
- CrlInfo uintptr // *CertRevocationCrlInfo
+ CrlInfo *CertRevocationCrlInfo
}
type CertTrustStatus struct {
@@ -957,7 +1091,7 @@ type CertChainPara struct {
type CertChainPolicyPara struct {
Size uint32
Flags uint32
- ExtraPolicyPara uintptr
+ ExtraPolicyPara Pointer
}
type SSLExtraCertChainPolicyPara struct {
@@ -972,7 +1106,7 @@ type CertChainPolicyStatus struct {
Error uint32
ChainIndex uint32
ElementIndex uint32
- ExtraPolicyStatus uintptr
+ ExtraPolicyStatus Pointer
}
const (
@@ -1319,7 +1453,7 @@ type SmallRect struct {
Bottom int16
}
-// Used with GetConsoleScreenBuffer to retreive information about a console
+// Used with GetConsoleScreenBuffer to retrieve information about a console
// screen buffer. See
// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str
// for details.
@@ -1331,3 +1465,5 @@ type ConsoleScreenBufferInfo struct {
Window SmallRect
MaximumWindowSize Coord
}
+
+const UNIX_PATH_MAX = 108 // defined in afunix.h
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/types_windows_arm.go b/src/cmd/vendor/golang.org/x/sys/windows/types_windows_arm.go
new file mode 100644
index 0000000000..74571e3600
--- /dev/null
+++ b/src/cmd/vendor/golang.org/x/sys/windows/types_windows_arm.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type WSAData struct {
+ Version uint16
+ HighVersion uint16
+ Description [WSADESCRIPTION_LEN + 1]byte
+ SystemStatus [WSASYS_STATUS_LEN + 1]byte
+ MaxSockets uint16
+ MaxUdpDg uint16
+ VendorInfo *byte
+}
+
+type Servent struct {
+ Name *byte
+ Aliases **byte
+ Port uint16
+ Proto *byte
+}
diff --git a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 318c61634e..fc56aec035 100644
--- a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -1,4 +1,4 @@
-// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
+// Code generated by 'go generate'; DO NOT EDIT.
package windows
diff --git a/src/cmd/vendor/vendor.json b/src/cmd/vendor/vendor.json
index 8009661879..6e077e4ae1 100644
--- a/src/cmd/vendor/vendor.json
+++ b/src/cmd/vendor/vendor.json
@@ -131,40 +131,40 @@
"revisionTime": "2018-06-27T13:57:12Z"
},
{
- "checksumSHA1": "y0x0I9zDxnxn9nCxwP/MdPyq1E8=",
+ "checksumSHA1": "s+lofQ+SCdhmy0cQp9FpdQncuuI=",
"path": "golang.org/x/sys/windows",
- "revision": "c11f84a56e43e20a78cee75a7c034031ecf57d1f",
- "revisionTime": "2018-05-25T13:55:20Z"
+ "revision": "90868a75fefd03942536221d7c0e2f84ec62a668",
+ "revisionTime": "2018-08-01T20:46:00Z"
},
{
- "checksumSHA1": "BnZkq/3Ejb7961bDhybRraW6jzI=",
+ "checksumSHA1": "yEg3f1MGwuyDh5NrNEGkWKlTyqY=",
"path": "golang.org/x/sys/windows/registry",
- "revision": "c11f84a56e43e20a78cee75a7c034031ecf57d1f",
- "revisionTime": "2018-05-25T13:55:20Z"
+ "revision": "90868a75fefd03942536221d7c0e2f84ec62a668",
+ "revisionTime": "2018-08-01T20:46:00Z"
},
{
- "checksumSHA1": "dQbFeoiAxfB3WFFVcAdeSwSgeDk=",
+ "checksumSHA1": "ZDwqsuoZqQq/XMQ0R0dJ4oK41lU=",
"path": "golang.org/x/sys/windows/svc",
- "revision": "c11f84a56e43e20a78cee75a7c034031ecf57d1f",
- "revisionTime": "2018-05-25T13:55:20Z"
+ "revision": "90868a75fefd03942536221d7c0e2f84ec62a668",
+ "revisionTime": "2018-08-01T20:46:00Z"
},
{
"checksumSHA1": "e9KJPWrdqg5PMkbE2w60Io8rY4M=",
"path": "golang.org/x/sys/windows/svc/debug",
- "revision": "c11f84a56e43e20a78cee75a7c034031ecf57d1f",
- "revisionTime": "2018-05-25T13:55:20Z"
+ "revision": "90868a75fefd03942536221d7c0e2f84ec62a668",
+ "revisionTime": "2018-08-01T20:46:00Z"
},
{
"checksumSHA1": "dz53pQfqAnXG8HdJj+nazXN9YRw=",
"path": "golang.org/x/sys/windows/svc/eventlog",
- "revision": "c11f84a56e43e20a78cee75a7c034031ecf57d1f",
- "revisionTime": "2018-05-25T13:55:20Z"
+ "revision": "90868a75fefd03942536221d7c0e2f84ec62a668",
+ "revisionTime": "2018-08-01T20:46:00Z"
},
{
- "checksumSHA1": "wz+0tf0Z7cVBaz/35P1m1cAiI7k=",
+ "checksumSHA1": "vV6Mr/b+1GaHiHLnq2zEejQJVec=",
"path": "golang.org/x/sys/windows/svc/mgr",
- "revision": "c11f84a56e43e20a78cee75a7c034031ecf57d1f",
- "revisionTime": "2018-05-25T13:55:20Z"
+ "revision": "90868a75fefd03942536221d7c0e2f84ec62a668",
+ "revisionTime": "2018-08-01T20:46:00Z"
}
],
"rootPath": "/cmd"
diff --git a/src/cmd/vet/all/main.go b/src/cmd/vet/all/main.go
index e7fe4edc2a..7e4a68101f 100644
--- a/src/cmd/vet/all/main.go
+++ b/src/cmd/vet/all/main.go
@@ -192,9 +192,9 @@ func vetPlatforms(pp []platform) {
}
func (p platform) vet() {
- if p.os == "linux" && p.arch == "riscv64" {
- // TODO(tklauser): enable as soon as the riscv64 port has fully landed
- fmt.Println("skipping linux/riscv64")
+ if p.os == "linux" && (p.arch == "riscv64" || p.arch == "sparc64") {
+ // TODO(tklauser): enable as soon as these ports have fully landed
+ fmt.Printf("skipping %s/%s\n", p.os, p.arch)
return
}
@@ -204,6 +204,12 @@ func (p platform) vet() {
return
}
+ if p.os == "aix" && p.arch == "ppc64" {
+ // TODO(aix): enable as soon as the aix/ppc64 port has fully landed
+ fmt.Println("skipping aix/ppc64")
+ return
+ }
+
var buf bytes.Buffer
fmt.Fprintf(&buf, "go run main.go -p %s\n", p)
diff --git a/src/cmd/vet/all/whitelist/all.txt b/src/cmd/vet/all/whitelist/all.txt
index b974d21c6a..5425f84fc6 100644
--- a/src/cmd/vet/all/whitelist/all.txt
+++ b/src/cmd/vet/all/whitelist/all.txt
@@ -24,6 +24,7 @@ runtime/asm_ARCHSUFF.s: [GOARCH] gcWriteBarrier: function gcWriteBarrier missing
// in bad situations that vet can also detect statically.
encoding/json/decode_test.go: struct field m has json tag but is not exported
encoding/json/decode_test.go: struct field m2 has json tag but is not exported
+encoding/json/decode_test.go: struct field s has json tag but is not exported
encoding/json/tagkey_test.go: struct field tag `:"BadFormat"` not compatible with reflect.StructTag.Get: bad syntax for struct tag key
runtime/testdata/testprog/deadlock.go: unreachable code
runtime/testdata/testprog/deadlock.go: unreachable code
diff --git a/src/cmd/vet/all/whitelist/windows_386.txt b/src/cmd/vet/all/whitelist/windows_386.txt
index 788684a49d..d910022ef6 100644
--- a/src/cmd/vet/all/whitelist/windows_386.txt
+++ b/src/cmd/vet/all/whitelist/windows_386.txt
@@ -3,7 +3,6 @@
runtime/sys_windows_386.s: [386] profileloop: use of 4(SP) points beyond argument frame
runtime/sys_windows_386.s: [386] ctrlhandler: 4(SP) should be _type+0(FP)
runtime/sys_windows_386.s: [386] setldt: function setldt missing Go declaration
-runtime/zcallback_windows.s: [386] callbackasm: function callbackasm missing Go declaration
runtime/sys_windows_386.s: [386] callbackasm1+0: function callbackasm1+0 missing Go declaration
runtime/sys_windows_386.s: [386] tstart: function tstart missing Go declaration
runtime/sys_windows_386.s: [386] tstart_stdcall: RET without writing to 4-byte ret+4(FP)
diff --git a/src/cmd/vet/all/whitelist/windows_amd64.txt b/src/cmd/vet/all/whitelist/windows_amd64.txt
index 3be4602579..676e6baf71 100644
--- a/src/cmd/vet/all/whitelist/windows_amd64.txt
+++ b/src/cmd/vet/all/whitelist/windows_amd64.txt
@@ -6,4 +6,3 @@ runtime/sys_windows_amd64.s: [amd64] callbackasm1: function callbackasm1 missing
runtime/sys_windows_amd64.s: [amd64] tstart_stdcall: RET without writing to 4-byte ret+8(FP)
runtime/sys_windows_amd64.s: [amd64] settls: function settls missing Go declaration
runtime/sys_windows_amd64.s: [amd64] cannot check cross-package assembly function: now is in package time
-runtime/zcallback_windows.s: [amd64] callbackasm: function callbackasm missing Go declaration
diff --git a/src/cmd/vet/main.go b/src/cmd/vet/main.go
index c50d4885a0..6e885121c8 100644
--- a/src/cmd/vet/main.go
+++ b/src/cmd/vet/main.go
@@ -273,7 +273,7 @@ func main() {
// Accept space-separated tags because that matches
// the go command's other subcommands.
// Accept commas because go tool vet traditionally has.
- tagList = strings.Fields(strings.Replace(*tags, ",", " ", -1))
+ tagList = strings.Fields(strings.ReplaceAll(*tags, ",", " "))
initPrintFlags()
initUnusedFlags()
@@ -467,6 +467,7 @@ type Package struct {
path string
defs map[*ast.Ident]types.Object
uses map[*ast.Ident]types.Object
+ implicits map[ast.Node]types.Object
selectors map[*ast.SelectorExpr]*types.Selection
types map[ast.Expr]types.TypeAndValue
spans map[types.Object]Span
diff --git a/src/cmd/vet/shadow.go b/src/cmd/vet/shadow.go
index 29c952fd88..47a48834bf 100644
--- a/src/cmd/vet/shadow.go
+++ b/src/cmd/vet/shadow.go
@@ -86,14 +86,11 @@ func (s Span) contains(pos token.Pos) bool {
return s.min <= pos && pos < s.max
}
-// growSpan expands the span for the object to contain the instance represented
-// by the identifier.
-func (pkg *Package) growSpan(ident *ast.Ident, obj types.Object) {
+// growSpan expands the span for the object to contain the source range [pos, end).
+func (pkg *Package) growSpan(obj types.Object, pos, end token.Pos) {
if *strictShadowing {
return // No need
}
- pos := ident.Pos()
- end := ident.End()
span, ok := pkg.spans[obj]
if ok {
if span.min > pos {
@@ -232,7 +229,7 @@ func checkShadowing(f *File, ident *ast.Ident) {
// the shadowing identifier.
span, ok := f.pkg.spans[shadowed]
if !ok {
- f.Badf(ident.Pos(), "internal error: no range for %q", ident.Name)
+ f.Badf(shadowed.Pos(), "internal error: no range for %q", shadowed.Name())
return
}
if !span.contains(ident.Pos()) {
diff --git a/src/cmd/vet/testdata/shadow.go b/src/cmd/vet/testdata/shadow.go
index c55cb2772a..d10fde2b81 100644
--- a/src/cmd/vet/testdata/shadow.go
+++ b/src/cmd/vet/testdata/shadow.go
@@ -57,3 +57,35 @@ func ShadowRead(f *os.File, buf []byte) (err error) {
func one() int {
return 1
}
+
+// Must not complain with an internal error for the
+// implicitly declared type switch variable v.
+func issue26725(x interface{}) int {
+ switch v := x.(type) {
+ case int, int32:
+ if v, ok := x.(int); ok {
+ return v
+ }
+ case int64:
+ return int(v)
+ }
+ return 0
+}
+
+// Verify that implicitly declared variables from
+// type switches are considered in shadowing analysis.
+func shadowTypeSwitch(a interface{}) {
+ switch t := a.(type) {
+ case int:
+ {
+ t := 0 // ERROR "declaration of .t. shadows declaration at shadow.go:78"
+ _ = t
+ }
+ _ = t
+ case uint:
+ {
+ t := uint(0) // OK because t is not mentioned later in this function
+ _ = t
+ }
+ }
+}
diff --git a/src/cmd/vet/types.go b/src/cmd/vet/types.go
index 5f8e481e01..3ff4b5966d 100644
--- a/src/cmd/vet/types.go
+++ b/src/cmd/vet/types.go
@@ -73,6 +73,7 @@ func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) []error {
}
pkg.defs = make(map[*ast.Ident]types.Object)
pkg.uses = make(map[*ast.Ident]types.Object)
+ pkg.implicits = make(map[ast.Node]types.Object)
pkg.selectors = make(map[*ast.SelectorExpr]*types.Selection)
pkg.spans = make(map[types.Object]Span)
pkg.types = make(map[ast.Expr]types.TypeAndValue)
@@ -95,6 +96,7 @@ func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) []error {
Types: pkg.types,
Defs: pkg.defs,
Uses: pkg.uses,
+ Implicits: pkg.implicits,
}
typesPkg, err := config.Check(pkg.path, fs, astFiles, info)
if len(allErrors) == 0 && err != nil {
@@ -103,10 +105,28 @@ func (pkg *Package) check(fs *token.FileSet, astFiles []*ast.File) []error {
pkg.typesPkg = typesPkg
// update spans
for id, obj := range pkg.defs {
- pkg.growSpan(id, obj)
+ // Ignore identifiers that don't denote objects
+ // (package names, symbolic variables such as t
+ // in t := x.(type) of type switch headers).
+ if obj != nil {
+ pkg.growSpan(obj, id.Pos(), id.End())
+ }
}
for id, obj := range pkg.uses {
- pkg.growSpan(id, obj)
+ pkg.growSpan(obj, id.Pos(), id.End())
+ }
+ for node, obj := range pkg.implicits {
+ // A type switch with a short variable declaration
+ // such as t := x.(type) doesn't declare the symbolic
+ // variable (t in the example) at the switch header;
+ // instead a new variable t (with specific type) is
+ // declared implicitly for each case. Such variables
+ // are found in the types.Info.Implicits (not Defs)
+ // map. Add them here, assuming they are declared at
+ // the type cases' colon ":".
+ if cc, ok := node.(*ast.CaseClause); ok {
+ pkg.growSpan(obj, cc.Colon, cc.Colon)
+ }
}
return allErrors
}
diff --git a/src/cmd/vet/vet_test.go b/src/cmd/vet/vet_test.go
index 90665d77bc..df84d6cc98 100644
--- a/src/cmd/vet/vet_test.go
+++ b/src/cmd/vet/vet_test.go
@@ -243,7 +243,7 @@ func errorCheck(outStr string, wantAuto bool, fullshort ...string) (err error) {
for i := range out {
for j := 0; j < len(fullshort); j += 2 {
full, short := fullshort[j], fullshort[j+1]
- out[i] = strings.Replace(out[i], full, short, -1)
+ out[i] = strings.ReplaceAll(out[i], full, short)
}
}