aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/rand.go
diff options
context:
space:
mode:
authorGavin Lam <gavin.oss@tutamail.com>2026-01-23 03:44:50 +0000
committerGopher Robot <gobot@golang.org>2026-01-23 14:41:52 -0800
commit27fcec4d8f3041e845762303d85d868cc3e351e8 (patch)
treed1e01667e89ba83ddf89296a22921b2e39f76b04 /src/runtime/rand.go
parent478d86446e88dc9e0b46e08914cb564d7c705d1e (diff)
downloadgo-27fcec4d8f3041e845762303d85d868cc3e351e8.tar.xz
runtime: speed up cheaprand and cheaprand64
The current cheaprand performs 128-bit multiplication on 64-bit numbers and truncate the result to 32 bits, which is inefficient. A 32-bit specific implementation is more performant because it performs 64-bit multiplication on 32-bit numbers instead. The current cheaprand64 involves two cheaprand calls. Implementing it as 64-bit wyrand is significantly faster. Since cheaprand64 discards one bit, I have preserved this behavior. The underlying uint64 function is made available as cheaprandu64. │ old │ new │ │ sec/op │ sec/op vs base │ Cheaprand-8 1.358n ± 0% 1.218n ± 0% -10.31% (n=100) Cheaprand64-8 2.424n ± 0% 1.391n ± 0% -42.62% (n=100) Blocksampled-8 8.347n ± 0% 2.022n ± 0% -75.78% (n=100) Fixes #77149 Change-Id: Ib0b5da4a642cd34d0401b03c1d343041f8230d11 GitHub-Last-Rev: 549d8d407e2bbcaecdee0b52cbf3a513dda637fb GitHub-Pull-Request: golang/go#77150 Reviewed-on: https://go-review.googlesource.com/c/go/+/735480 Auto-Submit: Keith Randall <khr@golang.org> Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: Keith Randall <khr@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/runtime/rand.go')
-rw-r--r--src/runtime/rand.go47
1 files changed, 36 insertions, 11 deletions
diff --git a/src/runtime/rand.go b/src/runtime/rand.go
index a30845b585..353e0e0f42 100644
--- a/src/runtime/rand.go
+++ b/src/runtime/rand.go
@@ -192,7 +192,8 @@ func mrandinit(mp *m) {
}
bootstrapRandReseed() // erase key we just extracted
mp.chacha8.Init64(seed)
- mp.cheaprand = rand()
+ mp.cheaprand = uint32(rand())
+ mp.cheaprand64 = rand()
}
// randn is like rand() % n but faster.
@@ -227,14 +228,12 @@ func randn(n uint32) uint32 {
func cheaprand() uint32 {
mp := getg().m
// Implement wyrand: https://github.com/wangyi-fudan/wyhash
- // Only the platform that bits.Mul64 can be lowered
- // by the compiler should be in this list.
- if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
- goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
- goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 {
- mp.cheaprand += 0xa0761d6478bd642f
- hi, lo := bits.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db)
- return uint32(hi ^ lo)
+ // Only the platform that supports 64-bit multiplication
+ // natively should be allowed.
+ if bits.UintSize == 64 {
+ mp.cheaprand += 0x53c5ca59
+ hi, lo := bits.Mul32(mp.cheaprand, mp.cheaprand^0x74743c1b)
+ return hi ^ lo
}
// Implement xorshift64+: 2 32-bit xorshift sequences added together.
@@ -242,7 +241,7 @@ func cheaprand() uint32 {
// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
// This generator passes the SmallCrush suite, part of TestU01 framework:
// http://simul.iro.umontreal.ca/testu01/tu01.html
- t := (*[2]uint32)(unsafe.Pointer(&mp.cheaprand))
+ t := (*[2]uint32)(unsafe.Pointer(&mp.cheaprand64))
s1, s0 := t[0], t[1]
s1 ^= s1 << 17
s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
@@ -269,7 +268,33 @@ func cheaprand() uint32 {
//go:linkname cheaprand64
//go:nosplit
func cheaprand64() int64 {
- return int64(cheaprand())<<31 ^ int64(cheaprand())
+ return int64(cheaprandu64() & ^(uint64(1) << 63))
+}
+
+// cheaprandu64 is a non-cryptographic-quality 64-bit random generator
+// suitable for calling at very high frequency (such as during sampling decisions).
+// it is "cheap" in the sense of both expense and quality.
+//
+// cheaprandu64 must not be exported to other packages:
+// the rule is that other packages using runtime-provided
+// randomness must always use rand.
+//
+//go:nosplit
+func cheaprandu64() uint64 {
+ // Implement wyrand: https://github.com/wangyi-fudan/wyhash
+ // Only the platform that bits.Mul64 can be lowered
+ // by the compiler should be in this list.
+ if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64|
+ goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le|
+ goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 {
+ mp := getg().m
+ // Implement wyrand: https://github.com/wangyi-fudan/wyhash
+ mp.cheaprand64 += 0xa0761d6478bd642f
+ hi, lo := bits.Mul64(mp.cheaprand64, mp.cheaprand64^0xe7037ed1a0b428db)
+ return hi ^ lo
+ }
+
+ return uint64(cheaprand())<<32 | uint64(cheaprand())
}
// cheaprandn is like cheaprand() % n but faster.