aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2014-11-20 11:48:08 -0500
committerRuss Cox <rsc@golang.org>2014-11-20 11:48:08 -0500
commit50e0749f8730e88e22e552449049b93ce2a418ac (patch)
treefa3e2e93d26fe9de978bbaeeefbc2a427780b329 /src/runtime
parent754de8d40331ecef4fde116ab5f10f3a8c8904ef (diff)
parent2b3f37908060837f8715c61af110b01b8a590c7c (diff)
downloadgo-50e0749f8730e88e22e552449049b93ce2a418ac.tar.xz
[dev.cc] all: merge default (e4ab8f908aac) into dev.cc
TBR=austin CC=golang-codereviews https://golang.org/cl/179040044
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/chan.go1
-rw-r--r--src/runtime/heapdump.go2
-rw-r--r--src/runtime/malloc.go11
-rw-r--r--src/runtime/mgc0.go16
-rw-r--r--src/runtime/proc.go10
-rw-r--r--src/runtime/race/race_unix_test.go30
-rw-r--r--src/runtime/race1.go37
-rw-r--r--src/runtime/race_amd64.s43
-rw-r--r--src/runtime/select.go2
-rw-r--r--src/runtime/sema.go2
-rw-r--r--src/runtime/stubs.go2
11 files changed, 143 insertions, 13 deletions
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index bb0110f94c..330422ad09 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -630,6 +630,7 @@ func (q *waitq) dequeue() *sudog {
return nil
}
q.first = sgp.next
+ sgp.next = nil
if q.last == sgp {
q.last = nil
}
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index c942e01639..0c1a60c8bb 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -7,7 +7,7 @@
// finalizers, etc.) to a file.
// The format of the dumped file is described at
-// http://code.google.com/p/go-wiki/wiki/heapdump14
+// http://golang.org/s/go14heapdump.
package runtime
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 20cb6818d2..d73d1ba6a6 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -477,6 +477,8 @@ func GC() {
// linker-provided
var noptrdata struct{}
+var enoptrdata struct{}
+var noptrbss struct{}
var enoptrbss struct{}
// SetFinalizer sets the finalizer associated with x to f.
@@ -553,8 +555,13 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
// func main() {
// runtime.SetFinalizer(Foo, nil)
// }
- // The segments are, in order: text, rodata, noptrdata, data, bss, noptrbss.
- if uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) {
+ // The relevant segments are: noptrdata, data, bss, noptrbss.
+ // We cannot assume they are in any order or even contiguous,
+ // due to external linking.
+ if uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrdata)) ||
+ uintptr(unsafe.Pointer(&data)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&edata)) ||
+ uintptr(unsafe.Pointer(&bss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&ebss)) ||
+ uintptr(unsafe.Pointer(&noptrbss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) {
return
}
gothrow("runtime.SetFinalizer: pointer not in allocated block")
diff --git a/src/runtime/mgc0.go b/src/runtime/mgc0.go
index 6d4ae61c11..38406f33a8 100644
--- a/src/runtime/mgc0.go
+++ b/src/runtime/mgc0.go
@@ -51,10 +51,26 @@ func clearpools() {
if c := p.mcache; c != nil {
c.tiny = nil
c.tinysize = 0
+
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var sg, sgnext *sudog
+ for sg = c.sudogcache; sg != nil; sg = sgnext {
+ sgnext = sg.next
+ sg.next = nil
+ }
c.sudogcache = nil
}
+
// clear defer pools
for i := range p.deferpool {
+ // disconnect cached list before dropping it on the floor,
+ // so that a dangling ref to one entry does not pin all of them.
+ var d, dlink *_defer
+ for d = p.deferpool[i]; d != nil; d = dlink {
+ dlink = d.link
+ d.link = nil
+ }
p.deferpool[i] = nil
}
}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 05ecb3d9eb..50920afe8b 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -165,6 +165,7 @@ func acquireSudog() *sudog {
gothrow("acquireSudog: found s.elem != nil in cache")
}
c.sudogcache = s.next
+ s.next = nil
return s
}
@@ -190,6 +191,15 @@ func releaseSudog(s *sudog) {
if s.selectdone != nil {
gothrow("runtime: sudog with non-nil selectdone")
}
+ if s.next != nil {
+ gothrow("runtime: sudog with non-nil next")
+ }
+ if s.prev != nil {
+ gothrow("runtime: sudog with non-nil prev")
+ }
+ if s.waitlink != nil {
+ gothrow("runtime: sudog with non-nil waitlink")
+ }
gp := getg()
if gp.param != nil {
gothrow("runtime: releaseSudog with non-nil gp.param")
diff --git a/src/runtime/race/race_unix_test.go b/src/runtime/race/race_unix_test.go
new file mode 100644
index 0000000000..84f0acece6
--- /dev/null
+++ b/src/runtime/race/race_unix_test.go
@@ -0,0 +1,30 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build race
+// +build darwin freebsd linux
+
+package race_test
+
+import (
+ "sync/atomic"
+ "syscall"
+ "testing"
+ "unsafe"
+)
+
+// Test that race detector does not crash when accessing non-Go allocated memory (issue 9136).
+func TestNonGoMemory(t *testing.T) {
+ data, err := syscall.Mmap(-1, 0, 4096, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE)
+ if err != nil {
+ t.Fatalf("failed to mmap memory: %v", err)
+ }
+ p := (*uint32)(unsafe.Pointer(&data[0]))
+ atomic.AddUint32(p, 1)
+ (*p)++
+ if *p != 2 {
+ t.Fatalf("data[0] = %v, expect 2", *p)
+ }
+ syscall.Munmap(data)
+}
diff --git a/src/runtime/race1.go b/src/runtime/race1.go
index 4c580429c8..2ec2bee65b 100644
--- a/src/runtime/race1.go
+++ b/src/runtime/race1.go
@@ -81,6 +81,10 @@ var __tsan_go_ignore_sync_end byte
//go:cgo_import_static __tsan_go_atomic32_compare_exchange
//go:cgo_import_static __tsan_go_atomic64_compare_exchange
+// start/end of global data (data+bss).
+var racedatastart uintptr
+var racedataend uintptr
+
// start/end of heap for race_amd64.s
var racearenastart uintptr
var racearenaend uintptr
@@ -99,7 +103,7 @@ func racecall(*byte, uintptr, uintptr, uintptr, uintptr)
//go:nosplit
func isvalidaddr(addr unsafe.Pointer) bool {
return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
- uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(addr) && uintptr(addr) < uintptr(unsafe.Pointer(&enoptrbss))
+ racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
}
//go:nosplit
@@ -113,9 +117,36 @@ func raceinit() uintptr {
racecall(&__tsan_init, uintptr(unsafe.Pointer(&racectx)), funcPC(racesymbolizethunk), 0, 0)
// Round data segment to page boundaries, because it's used in mmap().
- start := uintptr(unsafe.Pointer(&noptrdata)) &^ (_PageSize - 1)
- size := round(uintptr(unsafe.Pointer(&enoptrbss))-start, _PageSize)
+ start := ^uintptr(0)
+ end := uintptr(0)
+ if start > uintptr(unsafe.Pointer(&noptrdata)) {
+ start = uintptr(unsafe.Pointer(&noptrdata))
+ }
+ if start > uintptr(unsafe.Pointer(&data)) {
+ start = uintptr(unsafe.Pointer(&data))
+ }
+ if start > uintptr(unsafe.Pointer(&noptrbss)) {
+ start = uintptr(unsafe.Pointer(&noptrbss))
+ }
+ if start > uintptr(unsafe.Pointer(&bss)) {
+ start = uintptr(unsafe.Pointer(&bss))
+ }
+ if end < uintptr(unsafe.Pointer(&enoptrdata)) {
+ end = uintptr(unsafe.Pointer(&enoptrdata))
+ }
+ if end < uintptr(unsafe.Pointer(&edata)) {
+ end = uintptr(unsafe.Pointer(&edata))
+ }
+ if end < uintptr(unsafe.Pointer(&enoptrbss)) {
+ end = uintptr(unsafe.Pointer(&enoptrbss))
+ }
+ if end < uintptr(unsafe.Pointer(&ebss)) {
+ end = uintptr(unsafe.Pointer(&ebss))
+ }
+ size := round(end-start, _PageSize)
racecall(&__tsan_map_shadow, start, size, 0, 0)
+ racedatastart = start
+ racedataend = start + size
return racectx
}
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index a7f44870a8..d54d9798f0 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -139,17 +139,15 @@ TEXT racecalladdr<>(SB), NOSPLIT, $0-0
get_tls(R12)
MOVQ g(R12), R14
MOVQ g_racectx(R14), RARG0 // goroutine context
- // Check that addr is within [arenastart, arenaend) or within [noptrdata, enoptrbss).
+ // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
CMPQ RARG1, runtime·racearenastart(SB)
JB data
CMPQ RARG1, runtime·racearenaend(SB)
JB call
data:
- MOVQ $runtime·noptrdata(SB), R13
- CMPQ RARG1, R13
+ CMPQ RARG1, runtime·racedatastart(SB)
JB ret
- MOVQ $runtime·enoptrbss(SB), R13
- CMPQ RARG1, R13
+ CMPQ RARG1, runtime·racedataend(SB)
JAE ret
call:
MOVQ AX, AX // w/o this 6a miscompiles this function
@@ -167,6 +165,7 @@ TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
MOVQ callpc+0(FP), RARG1
// void __tsan_func_enter(ThreadState *thr, void *pc);
MOVQ $__tsan_func_enter(SB), AX
+ // racecall<> preserves R15
CALL racecall<>(SB)
MOVQ R15, DX // restore function entry context
RET
@@ -307,13 +306,45 @@ TEXT sync∕atomic·CompareAndSwapPointer(SB), NOSPLIT, $0-0
TEXT racecallatomic<>(SB), NOSPLIT, $0-0
// Trigger SIGSEGV early.
MOVQ 16(SP), R12
- MOVL (R12), R12
+ MOVL (R12), R13
+ // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
+ CMPQ R12, runtime·racearenastart(SB)
+ JB racecallatomic_data
+ CMPQ R12, runtime·racearenaend(SB)
+ JB racecallatomic_ok
+racecallatomic_data:
+ CMPQ R12, runtime·racedatastart(SB)
+ JB racecallatomic_ignore
+ CMPQ R12, runtime·racedataend(SB)
+ JAE racecallatomic_ignore
+racecallatomic_ok:
+ // Addr is within the good range, call the atomic function.
get_tls(R12)
MOVQ g(R12), R14
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ 8(SP), RARG1 // caller pc
MOVQ (SP), RARG2 // pc
LEAQ 16(SP), RARG3 // arguments
+ JMP racecall<>(SB) // does not return
+racecallatomic_ignore:
+ // Addr is outside the good range.
+ // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
+ // An attempt to synchronize on the address would cause crash.
+ MOVQ AX, R15 // remember the original function
+ MOVQ $__tsan_go_ignore_sync_begin(SB), AX
+ MOVQ g(R12), R14
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ CALL racecall<>(SB)
+ MOVQ R15, AX // restore the original function
+ // Call the atomic function.
+ MOVQ g_racectx(R14), RARG0 // goroutine context
+ MOVQ 8(SP), RARG1 // caller pc
+ MOVQ (SP), RARG2 // pc
+ LEAQ 16(SP), RARG3 // arguments
+ CALL racecall<>(SB)
+ // Call __tsan_go_ignore_sync_end.
+ MOVQ $__tsan_go_ignore_sync_end(SB), AX
+ MOVQ g_racectx(R14), RARG0 // goroutine context
JMP racecall<>(SB)
// void runtime·racecall(void(*f)(...), ...)
diff --git a/src/runtime/select.go b/src/runtime/select.go
index fe9178763e..6717b93956 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -404,6 +404,7 @@ loop:
}
}
sgnext = sglist.waitlink
+ sglist.waitlink = nil
releaseSudog(sglist)
sglist = sgnext
}
@@ -641,6 +642,7 @@ func (q *waitq) dequeueSudoG(s *sudog) {
if q.last == sgp {
q.last = prevsgp
}
+ s.next = nil
return
}
l = &sgp.next
diff --git a/src/runtime/sema.go b/src/runtime/sema.go
index d2a028c01b..26dbd30ea3 100644
--- a/src/runtime/sema.go
+++ b/src/runtime/sema.go
@@ -201,6 +201,7 @@ func syncsemacquire(s *syncSema) {
}
unlock(&s.lock)
if wake != nil {
+ wake.next = nil
goready(wake.g)
}
} else {
@@ -242,6 +243,7 @@ func syncsemrelease(s *syncSema, n uint32) {
if wake.releasetime != 0 {
wake.releasetime = cputicks()
}
+ wake.next = nil
goready(wake.g)
n--
}
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index 217307a1ed..aa7577cf94 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -154,7 +154,7 @@ func setcallerpc(argp unsafe.Pointer, pc uintptr)
//
// func f(arg1, arg2, arg3 int) {
// pc := getcallerpc(unsafe.Pointer(&arg1))
-// sp := getcallerpc(unsafe.Pointer(&arg2))
+// sp := getcallersp(unsafe.Pointer(&arg1))
// }
//
// These two lines find the PC and SP immediately following