aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2017-11-20 09:21:00 -0500
committerRuss Cox <rsc@golang.org>2017-11-20 09:21:00 -0500
commitcda3c6f91d7c3cdd370cbc4c34f34fd597d028bd (patch)
treee70ceeaa27261783f88e5c1ca935b3ff60078f90 /src/runtime
parentadc1f587ac20d76434aa140413afc537a8aaabc7 (diff)
parent2ea7d3461bb41d0ae12b56ee52d43314bcdb97f9 (diff)
downloadgo-cda3c6f91d7c3cdd370cbc4c34f34fd597d028bd.tar.xz
[dev.boringcrypto] all: merge go1.9.2 into dev.boringcrypto
Change-Id: I695e804ad8bbb6d90a28108bcf8623fc2bfab659
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/chan_test.go57
-rw-r--r--src/runtime/cpuprof.go1
-rw-r--r--src/runtime/gc_test.go18
-rw-r--r--src/runtime/mgc.go5
-rw-r--r--src/runtime/proc.go16
-rw-r--r--src/runtime/stubs.go8
-rw-r--r--src/runtime/time.go8
7 files changed, 104 insertions, 9 deletions
diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go
index a75fa1b992..0c94cf1a63 100644
--- a/src/runtime/chan_test.go
+++ b/src/runtime/chan_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ "math"
"runtime"
"sync"
"sync/atomic"
@@ -430,6 +431,62 @@ func TestSelectStress(t *testing.T) {
wg.Wait()
}
+func TestSelectFairness(t *testing.T) {
+ const trials = 10000
+ c1 := make(chan byte, trials+1)
+ c2 := make(chan byte, trials+1)
+ for i := 0; i < trials+1; i++ {
+ c1 <- 1
+ c2 <- 2
+ }
+ c3 := make(chan byte)
+ c4 := make(chan byte)
+ out := make(chan byte)
+ done := make(chan byte)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ var b byte
+ select {
+ case b = <-c3:
+ case b = <-c4:
+ case b = <-c1:
+ case b = <-c2:
+ }
+ select {
+ case out <- b:
+ case <-done:
+ return
+ }
+ }
+ }()
+ cnt1, cnt2 := 0, 0
+ for i := 0; i < trials; i++ {
+ switch b := <-out; b {
+ case 1:
+ cnt1++
+ case 2:
+ cnt2++
+ default:
+ t.Fatalf("unexpected value %d on channel", b)
+ }
+ }
+ // If the select in the goroutine is fair,
+ // cnt1 and cnt2 should be about the same value.
+ // With 10,000 trials, the expected margin of error at
+ // a confidence level of five nines is 4.4172 / (2 * Sqrt(10000)).
+ r := float64(cnt1) / trials
+ e := math.Abs(r - 0.5)
+ t.Log(cnt1, cnt2, r, e)
+ if e > 4.4172/(2*math.Sqrt(trials)) {
+ t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2)
+ }
+ close(done)
+ wg.Wait()
+}
+
func TestChanSendInterface(t *testing.T) {
type mt struct{}
m := &mt{}
diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go
index fb841a9f3d..e00dcb1bbd 100644
--- a/src/runtime/cpuprof.go
+++ b/src/runtime/cpuprof.go
@@ -160,6 +160,7 @@ func (p *cpuProfile) addExtra() {
funcPC(_ExternalCode) + sys.PCQuantum,
}
cpuprof.log.write(nil, 0, hdr[:], lostStk[:])
+ p.lostExtra = 0
}
}
diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go
index 03acc8aaa6..0620f2d61e 100644
--- a/src/runtime/gc_test.go
+++ b/src/runtime/gc_test.go
@@ -170,7 +170,7 @@ func TestPeriodicGC(t *testing.T) {
// slack if things are slow.
var numGCs uint32
const want = 2
- for i := 0; i < 20 && numGCs < want; i++ {
+ for i := 0; i < 200 && numGCs < want; i++ {
time.Sleep(5 * time.Millisecond)
// Test that periodic GC actually happened.
@@ -499,3 +499,19 @@ func BenchmarkReadMemStats(b *testing.B) {
hugeSink = nil
}
+
+func TestUserForcedGC(t *testing.T) {
+ // Test that runtime.GC() triggers a GC even if GOGC=off.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+
+ var ms1, ms2 runtime.MemStats
+ runtime.ReadMemStats(&ms1)
+ runtime.GC()
+ runtime.ReadMemStats(&ms2)
+ if ms1.NumGC == ms2.NumGC {
+ t.Fatalf("runtime.GC() did not trigger GC")
+ }
+ if ms1.NumForcedGC == ms2.NumForcedGC {
+ t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
+ }
+}
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 111fa781e1..b708720322 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -1158,7 +1158,7 @@ func (t gcTrigger) test() bool {
if t.kind == gcTriggerAlways {
return true
}
- if gcphase != _GCoff || gcpercent < 0 {
+ if gcphase != _GCoff {
return false
}
switch t.kind {
@@ -1169,6 +1169,9 @@ func (t gcTrigger) test() bool {
// own write.
return memstats.heap_live >= memstats.gc_trigger
case gcTriggerTime:
+ if gcpercent < 0 {
+ return false
+ }
lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
return lastgc != 0 && t.now-lastgc > forcegcperiod
case gcTriggerCycle:
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index ed333bb92e..5787991f07 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -96,6 +96,9 @@ var main_init_done chan bool
//go:linkname main_main main.main
func main_main()
+// mainStarted indicates that the main M has started.
+var mainStarted bool
+
// runtimeInitTime is the nanotime() at which the runtime started.
var runtimeInitTime int64
@@ -119,8 +122,8 @@ func main() {
maxstacksize = 250000000
}
- // Record when the world started.
- runtimeInitTime = nanotime()
+ // Allow newproc to start new Ms.
+ mainStarted = true
systemstack(func() {
newm(sysmon, nil)
@@ -139,6 +142,9 @@ func main() {
}
runtime_init() // must be before defer
+ if nanotime() == 0 {
+ throw("nanotime returning zero")
+ }
// Defer unlock so that runtime.Goexit during init does the unlock too.
needUnlock := true
@@ -148,6 +154,10 @@ func main() {
}
}()
+ // Record when the world started. Must be after runtime_init
+ // because nanotime on some platforms depends on startNano.
+ runtimeInitTime = nanotime()
+
gcenable()
main_init_done = make(chan bool)
@@ -3024,7 +3034,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
}
runqput(_p_, newg, true)
- if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && runtimeInitTime != 0 {
+ if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
wakep()
}
_g_.m.locks--
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index c4f32a8482..72d21187ec 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -105,9 +105,11 @@ func fastrand() uint32 {
//go:nosplit
func fastrandn(n uint32) uint32 {
- // This is similar to fastrand() % n, but faster.
- // See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
- return uint32(uint64(fastrand()) * uint64(n) >> 32)
+ // Don't be clever.
+ // fastrand is not good enough for cleverness.
+ // Just use mod.
+ // See golang.org/issue/21806.
+ return fastrand() % n
}
//go:linkname sync_fastrand sync.fastrand
diff --git a/src/runtime/time.go b/src/runtime/time.go
index abf200d7d3..23f61d62d0 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -309,4 +309,10 @@ func time_runtimeNano() int64 {
return nanotime()
}
-var startNano int64 = nanotime()
+// Monotonic times are reported as offsets from startNano.
+// We initialize startNano to nanotime() - 1 so that on systems where
+// monotonic time resolution is fairly low (e.g. Windows 2008
+// which appears to have a default resolution of 15ms),
+// we avoid ever reporting a nanotime of 0.
+// (Callers may want to use 0 as "time not set".)
+var startNano int64 = nanotime() - 1