aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
authorMatthew Dempsky <mdempsky@google.com>2015-10-15 14:33:50 -0700
committerMatthew Dempsky <mdempsky@google.com>2015-10-15 21:48:37 +0000
commit4c2465d47d8c706832bbc57668680a3ffc4d800f (patch)
tree8351222baa9c0f53cc0786bea2e26268a5b6fc0f /src/runtime/malloc.go
parent67722fea5015d43d8fc9a533533c9b580fa99fc8 (diff)
downloadgo-4c2465d47d8c706832bbc57668680a3ffc4d800f.tar.xz
runtime: use unsafe.Pointer(x) instead of (unsafe.Pointer)(x)
This isn't C anymore. No binary change to pkg/linux_amd64/runtime.a. Change-Id: I24d66b0f5ac888f432b874aac684b1395e7c8345 Reviewed-on: https://go-review.googlesource.com/15903 Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 6c7db0ffff..4ce159c267 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -397,7 +397,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
// TODO: It would be bad if part of the arena
// is reserved and part is not.
var reserved bool
- p := uintptr(sysReserve((unsafe.Pointer)(h.arena_end), p_size, &reserved))
+ p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
if p == 0 {
return nil
}
@@ -415,7 +415,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
h.arena_reserved = reserved
} else {
var stat uint64
- sysFree((unsafe.Pointer)(p), p_size, &stat)
+ sysFree(unsafe.Pointer(p), p_size, &stat)
}
}
}
@@ -423,18 +423,18 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
if n <= uintptr(h.arena_end)-uintptr(h.arena_used) {
// Keep taking from our reservation.
p := h.arena_used
- sysMap((unsafe.Pointer)(p), n, h.arena_reserved, &memstats.heap_sys)
+ sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
mHeap_MapBits(h, p+n)
mHeap_MapSpans(h, p+n)
h.arena_used = p + n
if raceenabled {
- racemapshadow((unsafe.Pointer)(p), n)
+ racemapshadow(unsafe.Pointer(p), n)
}
if uintptr(p)&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc")
}
- return (unsafe.Pointer)(p)
+ return unsafe.Pointer(p)
}
// If using 64-bit, our reservation is all we have.
@@ -453,7 +453,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
if p < h.arena_start || uintptr(p)+p_size-uintptr(h.arena_start) >= _MaxArena32 {
print("runtime: memory allocated by OS (", p, ") not in usable range [", hex(h.arena_start), ",", hex(h.arena_start+_MaxArena32), ")\n")
- sysFree((unsafe.Pointer)(p), p_size, &memstats.heap_sys)
+ sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
return nil
}
@@ -467,14 +467,14 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
h.arena_end = p_end
}
if raceenabled {
- racemapshadow((unsafe.Pointer)(p), n)
+ racemapshadow(unsafe.Pointer(p), n)
}
}
if uintptr(p)&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc")
}
- return (unsafe.Pointer)(p)
+ return unsafe.Pointer(p)
}
// base address for all 0-byte allocations