diff options
| author | Youlin Feng <fengyoulin@live.com> | 2026-02-14 18:55:34 +0800 |
|---|---|---|
| committer | Gopher Robot <gobot@golang.org> | 2026-02-14 12:18:00 -0800 |
| commit | 3c8b5e673816c733f13a38b1ed1f53d7d49ea084 (patch) | |
| tree | 578e297570770c623ee6311a1ca0bcbaf4428ad6 /src/cmd | |
| parent | 6837583eec31bf197a8f16bcb431e3beb73b2aa5 (diff) | |
| download | go-3c8b5e673816c733f13a38b1ed1f53d7d49ea084.tar.xz | |
cmd/compile: avoid folding 64-bit integers into 32-bit constants
Folding a 64-bit integer into a 32-bit constant may result in a negative
integer if the value exceeds math.MaxInt32 (the maximum value of a 32-
bit signed integer). This negative value will be sign-extended to 64
bits at runtime, leading to unexpected results when used in bitwise
AND/OR operations.
Fixes #77613
Change-Id: Idb081a3c20c28bddddcc8eff1225d62123b37a2d
Reviewed-on: https://go-review.googlesource.com/c/go/+/745581
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
Diffstat (limited to 'src/cmd')
| -rw-r--r-- | src/cmd/compile/internal/ssa/_gen/AMD64.rules | 3 | ||||
| -rw-r--r-- | src/cmd/compile/internal/ssa/rewriteAMD64.go | 21 |
2 files changed, 20 insertions, 4 deletions
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index ac3a4b0a07..6e56f71471 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -1578,7 +1578,8 @@ (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))]) (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) -(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVLload <t> [off] {sym} (SB) _) && symIsRO(sym) && is32BitInt(t) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVLload <t> [off] {sym} (SB) _) && symIsRO(sym) && is64BitInt(t) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) (MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) (MOVBQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int8(read8(sym, int64(off))))]) (MOVWQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))]) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 459c33017a..4b8d6e1cdd 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -16399,19 +16399,34 @@ func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { v.AddArg(val) return true } - // match: (MOVLload [off] {sym} (SB) _) - // cond: symIsRO(sym) + // match: (MOVLload <t> [off] {sym} (SB) _) + // cond: symIsRO(sym) && is32BitInt(t) // result: (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) for { + t := v.Type off := auxIntToInt32(v.AuxInt) sym := auxToSym(v.Aux) - if v_0.Op != OpSB || !(symIsRO(sym)) { + if v_0.Op != OpSB || !(symIsRO(sym) && is32BitInt(t)) { break } v.reset(OpAMD64MOVLconst) v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) return true } + // match: (MOVLload <t> [off] {sym} (SB) _) + // cond: symIsRO(sym) && is64BitInt(t) + // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + for { + t := v.Type + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpSB || !(symIsRO(sym) && is64BitInt(t)) { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { |
