aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mbitmap.go
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2016-10-23 11:03:56 -0400
committerAustin Clements <austin@google.com>2016-10-28 21:24:02 +0000
commit5380b22991dfb5f3bad25cd2e29f59fd07716581 (patch)
treecb0d104a0c849385c994f50195b36f41cd774d2f /src/runtime/mbitmap.go
parentee3d20129a89047ccb4a4e157688d2f24db8f343 (diff)
downloadgo-5380b22991dfb5f3bad25cd2e29f59fd07716581.tar.xz
runtime: implement unconditional hybrid barrier
This implements the unconditional version of the hybrid deletion write barrier, which always shades both the old and new pointer. It's unconditional for now because barriers on channel operations require checking both the source and destination stacks and we don't have a way to funnel this information into the write barrier at the moment. As part of this change, we modify the typed memclr operations introduced earlier to invoke the write barrier. This has basically no overall effect on benchmark performance. This is good, since it indicates that neither the extra shade nor the new bulk clear barriers have much effect. It also has little effect on latency. This is expected, since we haven't yet modified mark termination to take advantage of the hybrid barrier. Updates #17503. Change-Id: Iebedf84af2f0e857bd5d3a2d525f760b5cf7224b Reviewed-on: https://go-review.googlesource.com/31765 Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src/runtime/mbitmap.go')
-rw-r--r--src/runtime/mbitmap.go34
1 files changed, 26 insertions, 8 deletions
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index ddbe3efc96..b6d31055b5 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -553,6 +553,10 @@ func (h heapBits) setCheckmarked(size uintptr) {
// src, dst, and size must be pointer-aligned.
// The range [dst, dst+size) must lie within a single object.
//
+// As a special case, src == 0 indicates that this is being used for a
+// memclr. bulkBarrierPreWrite will pass 0 for the src of each write
+// barrier.
+//
// Callers should call bulkBarrierPreWrite immediately before
// calling memmove(dst, src, size). This function is marked nosplit
// to avoid being preempted; the GC must not stop the goroutine
@@ -618,13 +622,23 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
}
h := heapBitsForAddr(dst)
- for i := uintptr(0); i < size; i += sys.PtrSize {
- if h.isPointer() {
- dstx := (*uintptr)(unsafe.Pointer(dst + i))
- srcx := (*uintptr)(unsafe.Pointer(src + i))
- writebarrierptr_prewrite1(dstx, *srcx)
+ if src == 0 {
+ for i := uintptr(0); i < size; i += sys.PtrSize {
+ if h.isPointer() {
+ dstx := (*uintptr)(unsafe.Pointer(dst + i))
+ writebarrierptr_prewrite1(dstx, 0)
+ }
+ h = h.next()
+ }
+ } else {
+ for i := uintptr(0); i < size; i += sys.PtrSize {
+ if h.isPointer() {
+ dstx := (*uintptr)(unsafe.Pointer(dst + i))
+ srcx := (*uintptr)(unsafe.Pointer(src + i))
+ writebarrierptr_prewrite1(dstx, *srcx)
+ }
+ h = h.next()
}
- h = h.next()
}
}
@@ -653,8 +667,12 @@ func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
}
if *bits&mask != 0 {
dstx := (*uintptr)(unsafe.Pointer(dst + i))
- srcx := (*uintptr)(unsafe.Pointer(src + i))
- writebarrierptr_prewrite1(dstx, *srcx)
+ if src == 0 {
+ writebarrierptr_prewrite1(dstx, 0)
+ } else {
+ srcx := (*uintptr)(unsafe.Pointer(src + i))
+ writebarrierptr_prewrite1(dstx, *srcx)
+ }
}
mask <<= 1
}