diff options
| author | Russ Cox <rsc@golang.org> | 2025-10-29 07:27:38 -0400 |
|---|---|---|
| committer | Gopher Robot <gobot@golang.org> | 2025-10-30 09:17:59 -0700 |
| commit | 235b4e729d22bbca25c372bcccbc2613035d37aa (patch) | |
| tree | e2fdc2118ffeef7c577eb3e97546787a1810fd5c /src/cmd/compile | |
| parent | d44db293f9efabac3abf718a02e7787fb961b63f (diff) | |
| download | go-235b4e729d22bbca25c372bcccbc2613035d37aa.tar.xz | |
cmd/compile/internal/ssa: model right shift more precisely
Prove currently checks for 0 sign bit extraction (x>>63) at the
end of the pass, but it is more general and more useful
(and not really more work) to model right shift during
value range tracking. This handles sign bit extraction (both 0 and -1)
but also makes the value ranges available for proving bounds checks.
'go build -a -gcflags=-d=ssa/prove/debug=1 std'
finds 105 new things to prove.
https://gist.github.com/rsc/8ac41176e53ed9c2f1a664fc668e8336
For example, the compiler now recognizes that this code in
strconv does not need to check the second shift for being ≥ 64.
msb := xHi >> 63
retMantissa := xHi >> (msb + 38)
nor does this code in regexp:
return b < utf8.RuneSelf && specialBytes[b%16]&(1<<(b/16)) != 0
This code in math no longer has a bounds check on the first index:
if 0 <= n && n <= 308 {
return pow10postab32[uint(n)/32] * pow10tab[uint(n)%32]
}
The diff shows one "lost" proof in ycbcr.go but it's not really lost:
the expression was folded to a constant instead, and that only shows
up with debug=2. A diff of that output is at
https://gist.github.com/rsc/9139ed46c6019ae007f5a1ba4bb3250f
Change-Id: I84087311e0a303f00e2820d957a6f8b29ee22519
Reviewed-on: https://go-review.googlesource.com/c/go/+/716140
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Russ Cox <rsc@golang.org>
Reviewed-by: David Chase <drchase@google.com>
Diffstat (limited to 'src/cmd/compile')
| -rw-r--r-- | src/cmd/compile/internal/ssa/prove.go | 55 |
1 files changed, 39 insertions, 16 deletions
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 086e5b3a8f..4919d6ad37 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -12,6 +12,7 @@ import ( "math" "math/bits" "slices" + "strings" ) type branch int @@ -132,7 +133,7 @@ type limit struct { } func (l limit) String() string { - return fmt.Sprintf("sm,SM,um,UM=%d,%d,%d,%d", l.min, l.max, l.umin, l.umax) + return fmt.Sprintf("sm,SM=%d,%d um,UM=%d,%d", l.min, l.max, l.umin, l.umax) } func (l limit) intersect(l2 limit) limit { @@ -1965,6 +1966,30 @@ func (ft *factsTable) flowLimit(v *Value) bool { b := ft.limits[v.Args[1].ID] bitsize := uint(v.Type.Size()) * 8 return ft.newLimit(v, a.mul(b.exp2(bitsize), bitsize)) + case OpRsh64x64, OpRsh64x32, OpRsh64x16, OpRsh64x8, + OpRsh32x64, OpRsh32x32, OpRsh32x16, OpRsh32x8, + OpRsh16x64, OpRsh16x32, OpRsh16x16, OpRsh16x8, + OpRsh8x64, OpRsh8x32, OpRsh8x16, OpRsh8x8: + a := ft.limits[v.Args[0].ID] + b := ft.limits[v.Args[1].ID] + if b.min >= 0 { + // Shift of negative makes a value closer to 0 (greater), + // so if a.min is negative, v.min is a.min>>b.min instead of a.min>>b.max, + // and similarly if a.max is negative, v.max is a.max>>b.max. + // Easier to compute min and max of both than to write sign logic. + vmin := min(a.min>>b.min, a.min>>b.max) + vmax := max(a.max>>b.min, a.max>>b.max) + return ft.signedMinMax(v, vmin, vmax) + } + case OpRsh64Ux64, OpRsh64Ux32, OpRsh64Ux16, OpRsh64Ux8, + OpRsh32Ux64, OpRsh32Ux32, OpRsh32Ux16, OpRsh32Ux8, + OpRsh16Ux64, OpRsh16Ux32, OpRsh16Ux16, OpRsh16Ux8, + OpRsh8Ux64, OpRsh8Ux32, OpRsh8Ux16, OpRsh8Ux8: + a := ft.limits[v.Args[0].ID] + b := ft.limits[v.Args[1].ID] + if b.min >= 0 { + return ft.unsignedMinMax(v, a.umin>>b.max, a.umax>>b.min) + } case OpDiv64, OpDiv32, OpDiv16, OpDiv8: a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] @@ -2621,6 +2646,17 @@ var bytesizeToAnd = [...]Op{ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { for iv, v := range b.Values { switch v.Op { + case OpStaticLECall: + if b.Func.pass.debug > 0 && len(v.Args) == 2 { + fn := auxToCall(v.Aux).Fn + if fn != nil && strings.Contains(fn.String(), "prove") { + // Print bounds of any argument to single-arg function with "prove" in name, + // for debugging and especially for test/prove.go. + // (v.Args[1] is mem). + x := v.Args[0] + b.Func.Warnl(v.Pos, "Proved %v (%v)", ft.limits[x.ID], x) + } + } case OpSlicemask: // Replace OpSlicemask operations in b with constants where possible. cap := v.Args[0] @@ -2670,21 +2706,8 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { case OpRsh8x8, OpRsh8x16, OpRsh8x32, OpRsh8x64, OpRsh16x8, OpRsh16x16, OpRsh16x32, OpRsh16x64, OpRsh32x8, OpRsh32x16, OpRsh32x32, OpRsh32x64, - OpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64: - // Check whether, for a >> b, we know that a is non-negative - // and b is all of a's bits except the MSB. If so, a is shifted to zero. - bits := 8 * v.Args[0].Type.Size() - if v.Args[1].isGenericIntConst() && v.Args[1].AuxInt >= bits-1 && ft.isNonNegative(v.Args[0]) { - if b.Func.pass.debug > 0 { - b.Func.Warnl(v.Pos, "Proved %v shifts to zero", v.Op) - } - v.reset(bytesizeToConst[bits/8]) - v.AuxInt = 0 - break // Be sure not to fallthrough - this is no longer OpRsh. - } - // If the Rsh hasn't been replaced with 0, still check if it is bounded. - fallthrough - case OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64, + OpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64, + OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64, OpLsh16x8, OpLsh16x16, OpLsh16x32, OpLsh16x64, OpLsh32x8, OpLsh32x16, OpLsh32x32, OpLsh32x64, OpLsh64x8, OpLsh64x16, OpLsh64x32, OpLsh64x64, |
