aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorDaniel Martí <mvdan@mvdan.cc>2017-08-17 15:51:35 +0100
committerDaniel Martí <mvdan@mvdan.cc>2017-08-18 06:59:48 +0000
commit59413d34c92cf5ce9b0e70e7105ed73a24849b3e (patch)
tree858c93ecabecd2f768046e33ea11b3530b74f78d /src/runtime
parentb73d46de36e937819f34a37a46af73eb435246aa (diff)
downloadgo-59413d34c92cf5ce9b0e70e7105ed73a24849b3e.tar.xz
all: unindent some big chunks of code
Found with mvdan.cc/unindent. Prioritized the ones with the biggest wins for now. Change-Id: I2b032e45cdd559fc9ed5b1ee4c4de42c4c92e07b Reviewed-on: https://go-review.googlesource.com/56470 Run-TryBot: Daniel Martí <mvdan@mvdan.cc> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/mheap.go51
-rw-r--r--src/runtime/os_linux.go71
-rw-r--r--src/runtime/panic.go57
-rw-r--r--src/runtime/stack.go43
4 files changed, 113 insertions, 109 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index bf41125764..68f32aa01b 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1129,34 +1129,35 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
var sumreleased uintptr
for s := list.first; s != nil; s = s.next {
- if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
- start := s.base()
- end := start + s.npages<<_PageShift
- if physPageSize > _PageSize {
- // We can only release pages in
- // physPageSize blocks, so round start
- // and end in. (Otherwise, madvise
- // will round them *out* and release
- // more memory than we want.)
- start = (start + physPageSize - 1) &^ (physPageSize - 1)
- end &^= physPageSize - 1
- if end <= start {
- // start and end don't span a
- // whole physical page.
- continue
- }
- }
- len := end - start
-
- released := len - (s.npreleased << _PageShift)
- if physPageSize > _PageSize && released == 0 {
+ if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages {
+ continue
+ }
+ start := s.base()
+ end := start + s.npages<<_PageShift
+ if physPageSize > _PageSize {
+ // We can only release pages in
+ // physPageSize blocks, so round start
+ // and end in. (Otherwise, madvise
+ // will round them *out* and release
+ // more memory than we want.)
+ start = (start + physPageSize - 1) &^ (physPageSize - 1)
+ end &^= physPageSize - 1
+ if end <= start {
+ // start and end don't span a
+ // whole physical page.
continue
}
- memstats.heap_released += uint64(released)
- sumreleased += released
- s.npreleased = len >> _PageShift
- sysUnused(unsafe.Pointer(start), len)
}
+ len := end - start
+
+ released := len - (s.npreleased << _PageShift)
+ if physPageSize > _PageSize && released == 0 {
+ continue
+ }
+ memstats.heap_released += uint64(released)
+ sumreleased += released
+ s.npreleased = len >> _PageShift
+ sysUnused(unsafe.Pointer(start), len)
}
return sumreleased
}
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index dac4de4985..83e35f4e27 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -208,45 +208,46 @@ func sysargs(argc int32, argv **byte) {
// now argv+n is auxv
auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
- if sysauxv(auxv[:]) == 0 {
- // In some situations we don't get a loader-provided
- // auxv, such as when loaded as a library on Android.
- // Fall back to /proc/self/auxv.
- fd := open(&procAuxv[0], 0 /* O_RDONLY */, 0)
- if fd < 0 {
- // On Android, /proc/self/auxv might be unreadable (issue 9229), so we fallback to
- // try using mincore to detect the physical page size.
- // mincore should return EINVAL when address is not a multiple of system page size.
- const size = 256 << 10 // size of memory region to allocate
- p := mmap(nil, size, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
- if uintptr(p) < 4096 {
- return
- }
- var n uintptr
- for n = 4 << 10; n < size; n <<= 1 {
- err := mincore(unsafe.Pointer(uintptr(p)+n), 1, &addrspace_vec[0])
- if err == 0 {
- physPageSize = n
- break
- }
- }
- if physPageSize == 0 {
- physPageSize = size
- }
- munmap(p, size)
+ if sysauxv(auxv[:]) != 0 {
+ return
+ }
+ // In some situations we don't get a loader-provided
+ // auxv, such as when loaded as a library on Android.
+ // Fall back to /proc/self/auxv.
+ fd := open(&procAuxv[0], 0 /* O_RDONLY */, 0)
+ if fd < 0 {
+ // On Android, /proc/self/auxv might be unreadable (issue 9229), so we fallback to
+ // try using mincore to detect the physical page size.
+ // mincore should return EINVAL when address is not a multiple of system page size.
+ const size = 256 << 10 // size of memory region to allocate
+ p := mmap(nil, size, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
+ if uintptr(p) < 4096 {
return
}
- var buf [128]uintptr
- n := read(fd, noescape(unsafe.Pointer(&buf[0])), int32(unsafe.Sizeof(buf)))
- closefd(fd)
- if n < 0 {
- return
+ var n uintptr
+ for n = 4 << 10; n < size; n <<= 1 {
+ err := mincore(unsafe.Pointer(uintptr(p)+n), 1, &addrspace_vec[0])
+ if err == 0 {
+ physPageSize = n
+ break
+ }
+ }
+ if physPageSize == 0 {
+ physPageSize = size
}
- // Make sure buf is terminated, even if we didn't read
- // the whole file.
- buf[len(buf)-2] = _AT_NULL
- sysauxv(buf[:])
+ munmap(p, size)
+ return
+ }
+ var buf [128]uintptr
+ n = read(fd, noescape(unsafe.Pointer(&buf[0])), int32(unsafe.Sizeof(buf)))
+ closefd(fd)
+ if n < 0 {
+ return
}
+ // Make sure buf is terminated, even if we didn't read
+ // the whole file.
+ buf[len(buf)-2] = _AT_NULL
+ sysauxv(buf[:])
}
func sysauxv(auxv []uintptr) int {
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 43bfdd7a1e..2a7acb7797 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -244,36 +244,37 @@ func freedefer(d *_defer) {
freedeferfn()
}
sc := deferclass(uintptr(d.siz))
- if sc < uintptr(len(p{}.deferpool)) {
- pp := getg().m.p.ptr()
- if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
- // Transfer half of local cache to the central cache.
- //
- // Take this slow path on the system stack so
- // we don't grow freedefer's stack.
- systemstack(func() {
- var first, last *_defer
- for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
- n := len(pp.deferpool[sc])
- d := pp.deferpool[sc][n-1]
- pp.deferpool[sc][n-1] = nil
- pp.deferpool[sc] = pp.deferpool[sc][:n-1]
- if first == nil {
- first = d
- } else {
- last.link = d
- }
- last = d
+ if sc >= uintptr(len(p{}.deferpool)) {
+ return
+ }
+ pp := getg().m.p.ptr()
+ if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
+ // Transfer half of local cache to the central cache.
+ //
+ // Take this slow path on the system stack so
+ // we don't grow freedefer's stack.
+ systemstack(func() {
+ var first, last *_defer
+ for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
+ n := len(pp.deferpool[sc])
+ d := pp.deferpool[sc][n-1]
+ pp.deferpool[sc][n-1] = nil
+ pp.deferpool[sc] = pp.deferpool[sc][:n-1]
+ if first == nil {
+ first = d
+ } else {
+ last.link = d
}
- lock(&sched.deferlock)
- last.link = sched.deferpool[sc]
- sched.deferpool[sc] = first
- unlock(&sched.deferlock)
- })
- }
- *d = _defer{}
- pp.deferpool[sc] = append(pp.deferpool[sc], d)
+ last = d
+ }
+ lock(&sched.deferlock)
+ last.link = sched.deferpool[sc]
+ sched.deferpool[sc] = first
+ unlock(&sched.deferlock)
+ })
}
+ *d = _defer{}
+ pp.deferpool[sc] = append(pp.deferpool[sc], d)
}
// Separate function so that it can split stack.
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index d353329a39..4e60e80863 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -578,29 +578,30 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
if stackDebug >= 4 {
print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
}
- if ptrbit(&bv, i) == 1 {
- pp := (*uintptr)(add(scanp, i*sys.PtrSize))
- retry:
- p := *pp
- if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
- // Looks like a junk value in a pointer slot.
- // Live analysis wrong?
- getg().m.traceback = 2
- print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
- throw("invalid pointer found on stack")
+ if ptrbit(&bv, i) != 1 {
+ continue
+ }
+ pp := (*uintptr)(add(scanp, i*sys.PtrSize))
+ retry:
+ p := *pp
+ if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
+ // Looks like a junk value in a pointer slot.
+ // Live analysis wrong?
+ getg().m.traceback = 2
+ print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
+ throw("invalid pointer found on stack")
+ }
+ if minp <= p && p < maxp {
+ if stackDebug >= 3 {
+ print("adjust ptr ", hex(p), " ", funcname(f), "\n")
}
- if minp <= p && p < maxp {
- if stackDebug >= 3 {
- print("adjust ptr ", hex(p), " ", funcname(f), "\n")
- }
- if useCAS {
- ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
- if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
- goto retry
- }
- } else {
- *pp = p + delta
+ if useCAS {
+ ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
+ if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
+ goto retry
}
+ } else {
+ *pp = p + delta
}
}
}