From 0c81248bf46f611b56e3ab38b4d83e449b3c8636 Mon Sep 17 00:00:00 2001 From: Dave Cheney Date: Wed, 6 Apr 2016 18:43:23 +1000 Subject: runtime: remove unused return value from lfstackUnpack None of the two places that call lfstackUnpack use the second argument. This simplifies a followup CL that merges the lfstack{Pack,Unpack} implementations. Change-Id: I3c93f6259da99e113d94f8c8027584da79c1ac2c Reviewed-on: https://go-review.googlesource.com/21595 Run-TryBot: Dave Cheney Reviewed-by: Brad Fitzpatrick --- src/runtime/lfstack.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/runtime/lfstack.go') diff --git a/src/runtime/lfstack.go b/src/runtime/lfstack.go index ea640eb12f..1261f54d97 100644 --- a/src/runtime/lfstack.go +++ b/src/runtime/lfstack.go @@ -15,7 +15,7 @@ import ( func lfstackpush(head *uint64, node *lfnode) { node.pushcnt++ new := lfstackPack(node, node.pushcnt) - if node1, _ := lfstackUnpack(new); node1 != node { + if node1 := lfstackUnpack(new); node1 != node { print("runtime: lfstackpush invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n") throw("lfstackpush") } @@ -34,7 +34,7 @@ func lfstackpop(head *uint64) unsafe.Pointer { if old == 0 { return nil } - node, _ := lfstackUnpack(old) + node := lfstackUnpack(old) next := atomic.Load64(&node.next) if atomic.Cas64(head, old, next) { return unsafe.Pointer(node) -- cgit v1.3 From 3b02c5b1b66df9cdb23d5a3243bb37b2c312ea1b Mon Sep 17 00:00:00 2001 From: Dave Cheney Date: Thu, 7 Apr 2016 07:29:22 +1000 Subject: runtime: merge lfstack{Pack,Unpack} into one file Merge the remaining lfstack{Pack,Unpack} implemetations into one file. unsafe.Sizeof(uintptr(0)) == 4 is a constant comparison so this branch folds away at compile time. Dmitry confirmed that the upper 17 bits of an address will be zero for a user mode pointer, so there is no need to sign extend on amd64 during unpack, so we can reuse the same implementation as all othe 64 bit archs. Change-Id: I99f589416d8b181ccde5364c9c2e78e4a5efc7f1 Reviewed-on: https://go-review.googlesource.com/21597 Run-TryBot: Dave Cheney TryBot-Result: Gobot Gobot Reviewed-by: Minux Ma --- src/runtime/lfstack.go | 35 +++++++++++++++++++++++++++++++++++ src/runtime/lfstack_32bit.go | 19 ------------------- src/runtime/lfstack_64bit.go | 33 --------------------------------- src/runtime/lfstack_amd64.go | 22 ---------------------- 4 files changed, 35 insertions(+), 74 deletions(-) delete mode 100644 src/runtime/lfstack_32bit.go delete mode 100644 src/runtime/lfstack_64bit.go delete mode 100644 src/runtime/lfstack_amd64.go (limited to 'src/runtime/lfstack.go') diff --git a/src/runtime/lfstack.go b/src/runtime/lfstack.go index 1261f54d97..8a2d519402 100644 --- a/src/runtime/lfstack.go +++ b/src/runtime/lfstack.go @@ -41,3 +41,38 @@ func lfstackpop(head *uint64) unsafe.Pointer { } } } + +const ( + addrBits = 48 + cntBits = 64 - addrBits + 3 +) + +func lfstackPack(node *lfnode, cnt uintptr) uint64 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // On 32-bit systems, the stored uint64 has a 32-bit pointer and 32-bit count. + return uint64(uintptr(unsafe.Pointer(node)))<<32 | uint64(cnt) + } + // On ppc64, Linux limits the user address space to 46 bits (see + // TASK_SIZE_USER64 in the Linux kernel). This has grown over time, + // so here we allow 48 bit addresses. + // + // On mips64, Linux limits the user address space to 40 bits (see + // TASK_SIZE64 in the Linux kernel). This has grown over time, + // so here we allow 48 bit addresses. + // + // On AMD64, virtual addresses are 48-bit numbers sign extended to 64. + // We shift the address left 16 to eliminate the sign extended part and make + // room in the bottom for the count. + // + // In addition to the 16 bits taken from the top, we can take 3 from the + // bottom, because node must be pointer-aligned, giving a total of 19 bits + // of count. + return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<> 32))) + } + return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3))) +} diff --git a/src/runtime/lfstack_32bit.go b/src/runtime/lfstack_32bit.go deleted file mode 100644 index 2f59e0212e..0000000000 --- a/src/runtime/lfstack_32bit.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build 386 arm nacl - -package runtime - -import "unsafe" - -// On 32-bit systems, the stored uint64 has a 32-bit pointer and 32-bit count. - -func lfstackPack(node *lfnode, cnt uintptr) uint64 { - return uint64(uintptr(unsafe.Pointer(node)))<<32 | uint64(cnt) -} - -func lfstackUnpack(val uint64) *lfnode { - return (*lfnode)(unsafe.Pointer(uintptr(val >> 32))) -} diff --git a/src/runtime/lfstack_64bit.go b/src/runtime/lfstack_64bit.go deleted file mode 100644 index 07c2a141f0..0000000000 --- a/src/runtime/lfstack_64bit.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm64 mips64 mips64le ppc64 ppc64le - -package runtime - -import "unsafe" - -// On ppc64, Linux limits the user address space to 46 bits (see -// TASK_SIZE_USER64 in the Linux kernel). This has grown over time, -// so here we allow 48 bit addresses. -// -// On mips64, Linux limits the user address space to 40 bits (see -// TASK_SIZE64 in the Linux kernel). This has grown over time, -// so here we allow 48 bit addresses. -// -// In addition to the 16 bits taken from the top, we can take 3 from the -// bottom, because node must be pointer-aligned, giving a total of 19 bits -// of count. -const ( - addrBits = 48 - cntBits = 64 - addrBits + 3 -) - -func lfstackPack(node *lfnode, cnt uintptr) uint64 { - return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<> cntBits << 3))) -} diff --git a/src/runtime/lfstack_amd64.go b/src/runtime/lfstack_amd64.go deleted file mode 100644 index 6397e1d47f..0000000000 --- a/src/runtime/lfstack_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package runtime - -import "unsafe" - -// On AMD64, virtual addresses are 48-bit numbers sign extended to 64. -// We shift the address left 16 to eliminate the sign extended part and make -// room in the bottom for the count. -// In addition to the 16 bits taken from the top, we can take 3 from the -// bottom, because node must be pointer-aligned, giving a total of 19 bits -// of count. - -func lfstackPack(node *lfnode, cnt uintptr) uint64 { - return uint64(uintptr(unsafe.Pointer(node)))<<16 | uint64(cnt&(1<<19-1)) -} - -func lfstackUnpack(val uint64) *lfnode { - return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> 19 << 3))) -} -- cgit v1.3 From 9cc9e95b288648d796d92f0b92cb713b35f20062 Mon Sep 17 00:00:00 2001 From: Dave Cheney Date: Thu, 7 Apr 2016 14:05:06 +0000 Subject: Revert "runtime: merge lfstack{Pack,Unpack} into one file" This broke solaris, which apparently does use the upper 17 bits of the address space. This reverts commit 3b02c5b1b66df9cdb23d5a3243bb37b2c312ea1b. Change-Id: Iedfe54abd0384960845468205f20191a97751c0b Reviewed-on: https://go-review.googlesource.com/21652 Reviewed-by: Dave Cheney --- src/runtime/lfstack.go | 35 ----------------------------------- src/runtime/lfstack_32bit.go | 19 +++++++++++++++++++ src/runtime/lfstack_64bit.go | 33 +++++++++++++++++++++++++++++++++ src/runtime/lfstack_amd64.go | 22 ++++++++++++++++++++++ 4 files changed, 74 insertions(+), 35 deletions(-) create mode 100644 src/runtime/lfstack_32bit.go create mode 100644 src/runtime/lfstack_64bit.go create mode 100644 src/runtime/lfstack_amd64.go (limited to 'src/runtime/lfstack.go') diff --git a/src/runtime/lfstack.go b/src/runtime/lfstack.go index 8a2d519402..1261f54d97 100644 --- a/src/runtime/lfstack.go +++ b/src/runtime/lfstack.go @@ -41,38 +41,3 @@ func lfstackpop(head *uint64) unsafe.Pointer { } } } - -const ( - addrBits = 48 - cntBits = 64 - addrBits + 3 -) - -func lfstackPack(node *lfnode, cnt uintptr) uint64 { - if unsafe.Sizeof(uintptr(0)) == 4 { - // On 32-bit systems, the stored uint64 has a 32-bit pointer and 32-bit count. - return uint64(uintptr(unsafe.Pointer(node)))<<32 | uint64(cnt) - } - // On ppc64, Linux limits the user address space to 46 bits (see - // TASK_SIZE_USER64 in the Linux kernel). This has grown over time, - // so here we allow 48 bit addresses. - // - // On mips64, Linux limits the user address space to 40 bits (see - // TASK_SIZE64 in the Linux kernel). This has grown over time, - // so here we allow 48 bit addresses. - // - // On AMD64, virtual addresses are 48-bit numbers sign extended to 64. - // We shift the address left 16 to eliminate the sign extended part and make - // room in the bottom for the count. - // - // In addition to the 16 bits taken from the top, we can take 3 from the - // bottom, because node must be pointer-aligned, giving a total of 19 bits - // of count. - return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<> 32))) - } - return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3))) -} diff --git a/src/runtime/lfstack_32bit.go b/src/runtime/lfstack_32bit.go new file mode 100644 index 0000000000..2f59e0212e --- /dev/null +++ b/src/runtime/lfstack_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 arm nacl + +package runtime + +import "unsafe" + +// On 32-bit systems, the stored uint64 has a 32-bit pointer and 32-bit count. + +func lfstackPack(node *lfnode, cnt uintptr) uint64 { + return uint64(uintptr(unsafe.Pointer(node)))<<32 | uint64(cnt) +} + +func lfstackUnpack(val uint64) *lfnode { + return (*lfnode)(unsafe.Pointer(uintptr(val >> 32))) +} diff --git a/src/runtime/lfstack_64bit.go b/src/runtime/lfstack_64bit.go new file mode 100644 index 0000000000..07c2a141f0 --- /dev/null +++ b/src/runtime/lfstack_64bit.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 mips64 mips64le ppc64 ppc64le + +package runtime + +import "unsafe" + +// On ppc64, Linux limits the user address space to 46 bits (see +// TASK_SIZE_USER64 in the Linux kernel). This has grown over time, +// so here we allow 48 bit addresses. +// +// On mips64, Linux limits the user address space to 40 bits (see +// TASK_SIZE64 in the Linux kernel). This has grown over time, +// so here we allow 48 bit addresses. +// +// In addition to the 16 bits taken from the top, we can take 3 from the +// bottom, because node must be pointer-aligned, giving a total of 19 bits +// of count. +const ( + addrBits = 48 + cntBits = 64 - addrBits + 3 +) + +func lfstackPack(node *lfnode, cnt uintptr) uint64 { + return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<> cntBits << 3))) +} diff --git a/src/runtime/lfstack_amd64.go b/src/runtime/lfstack_amd64.go new file mode 100644 index 0000000000..6397e1d47f --- /dev/null +++ b/src/runtime/lfstack_amd64.go @@ -0,0 +1,22 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// On AMD64, virtual addresses are 48-bit numbers sign extended to 64. +// We shift the address left 16 to eliminate the sign extended part and make +// room in the bottom for the count. +// In addition to the 16 bits taken from the top, we can take 3 from the +// bottom, because node must be pointer-aligned, giving a total of 19 bits +// of count. + +func lfstackPack(node *lfnode, cnt uintptr) uint64 { + return uint64(uintptr(unsafe.Pointer(node)))<<16 | uint64(cnt&(1<<19-1)) +} + +func lfstackUnpack(val uint64) *lfnode { + return (*lfnode)(unsafe.Pointer(uintptr(int64(val) >> 19 << 3))) +} -- cgit v1.3 From 02b8e6978a86c2f4f3a604e8b05014d127f4020a Mon Sep 17 00:00:00 2001 From: Jeremy Jackins Date: Wed, 13 Apr 2016 18:16:21 +0900 Subject: runtime: find a home for orphaned comments These comments were left behind after runtime.h was converted from C to Go. I examined the original code and tried to move these to the places that the most sense. Change-Id: I8769d60234c0113d682f9de3bd8d6c34c450c188 Reviewed-on: https://go-review.googlesource.com/21969 Reviewed-by: Matthew Dempsky Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot --- src/runtime/lfstack.go | 3 ++ src/runtime/lock_futex.go | 6 +-- src/runtime/mmap.go | 3 ++ src/runtime/runtime2.go | 107 +++++++++++++++------------------------------- 4 files changed, 44 insertions(+), 75 deletions(-) (limited to 'src/runtime/lfstack.go') diff --git a/src/runtime/lfstack.go b/src/runtime/lfstack.go index 1261f54d97..db54ecb05e 100644 --- a/src/runtime/lfstack.go +++ b/src/runtime/lfstack.go @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. // Lock-free stack. +// Initialize head to 0, compare with 0 to test for emptiness. +// The stack does not keep pointers to nodes, +// so they can be garbage collected if there are no other pointers to nodes. // The following code runs only on g0 stack. package runtime diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go index d28fd92720..073136abd0 100644 --- a/src/runtime/lock_futex.go +++ b/src/runtime/lock_futex.go @@ -13,13 +13,13 @@ import ( // This implementation depends on OS-specific implementations of // -// runtime·futexsleep(uint32 *addr, uint32 val, int64 ns) +// futexsleep(addr *uint32, val uint32, ns int64) // Atomically, -// if(*addr == val) sleep +// if *addr == val { sleep } // Might be woken up spuriously; that's allowed. // Don't sleep longer than ns; ns < 0 means forever. // -// runtime·futexwakeup(uint32 *addr, uint32 cnt) +// futexwakeup(addr *uint32, cnt uint32) // If any procs are sleeping on addr, wake up at most cnt. const ( diff --git a/src/runtime/mmap.go b/src/runtime/mmap.go index 6363a90242..53617e41e4 100644 --- a/src/runtime/mmap.go +++ b/src/runtime/mmap.go @@ -13,4 +13,7 @@ package runtime import "unsafe" // mmap calls the mmap system call. It is implemented in assembly. +// We only pass the lower 32 bits of file offset to the +// assembly routine; the higher bits (if required), should be provided +// by the assembly routine as 0. func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index e0137f7e97..0fdea400de 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -10,9 +10,7 @@ import ( "unsafe" ) -/* - * defined constants - */ +// defined constants const ( // G status // @@ -99,6 +97,10 @@ const ( _Pdead ) +// Mutual exclusion locks. In the uncontended case, +// as fast as spin locks (just a few user-level instructions), +// but on the contention path they sleep in the kernel. +// A zeroed Mutex is unlocked (no need to initialize each lock). type mutex struct { // Futex-based impl treats it as uint32 key, // while sema-based impl as M* waitm. @@ -106,6 +108,26 @@ type mutex struct { key uintptr } +// sleep and wakeup on one-time events. +// before any calls to notesleep or notewakeup, +// must call noteclear to initialize the Note. +// then, exactly one thread can call notesleep +// and exactly one thread can call notewakeup (once). +// once notewakeup has been called, the notesleep +// will return. future notesleep will return immediately. +// subsequent noteclear must be called only after +// previous notesleep has returned, e.g. it's disallowed +// to call noteclear straight after notewakeup. +// +// notetsleep is like notesleep but wakes up after +// a given number of nanoseconds even if the event +// has not yet happened. if a goroutine uses notetsleep to +// wake up early, it must wait to call noteclear until it +// can be sure that no other goroutine is calling +// notewakeup. +// +// notesleep/notetsleep are generally called on g0, +// notetsleepg is similar to notetsleep but is called on user g. type note struct { // Futex-based impl treats it as uint32 key, // while sema-based impl as M* waitm. @@ -397,8 +419,8 @@ type m struct { waittraceskip int startingtrace bool syscalltick uint32 - //#ifdef GOOS_windows - thread uintptr // thread handle + thread uintptr // thread handle + // these are here because they are too large to be on the stack // of low-level NOSPLIT functions. libcall libcall @@ -406,7 +428,7 @@ type m struct { libcallsp uintptr libcallg guintptr syscall libcall // stores syscall parameters on windows - //#endif + mOS } @@ -530,10 +552,10 @@ type schedt struct { totaltime int64 // ∫gomaxprocs dt up to procresizetime } -// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. +// The m.locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active. // External locks are not recursive; a second lock is silently ignored. -// The upper bits of m->locked record the nesting depth of calls to lockOSThread +// The upper bits of m.locked record the nesting depth of calls to lockOSThread // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal). // Internal locks can be recursive. For instance, a lock for cgo can occur while the main // goroutine is holding the lock during the initialization phase. @@ -603,13 +625,6 @@ type forcegcstate struct { idle uint32 } -/* - * known to compiler - */ -const ( - _Structrnd = sys.RegSize -) - // startup_random_data holds random bytes initialized at startup. These come from // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). var startupRandomData []byte @@ -635,9 +650,7 @@ func extendRandom(r []byte, n int) { } } -/* - * deferred subroutine calls - */ +// deferred subroutine calls type _defer struct { siz int32 started bool @@ -648,9 +661,7 @@ type _defer struct { link *_defer } -/* - * panics - */ +// panics type _panic struct { argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink arg interface{} // argument to panic @@ -659,10 +670,7 @@ type _panic struct { aborted bool // the panic was aborted } -/* - * stack traces - */ - +// stack traces type stkframe struct { fn *_func // function being run pc uintptr // program counter within fn @@ -682,10 +690,8 @@ const ( _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it ) -const ( - // The maximum number of frames we print for a traceback - _TracebackMaxFrames = 100 -) +// The maximum number of frames we print for a traceback +const _TracebackMaxFrames = 100 var ( emptystring string @@ -716,46 +722,3 @@ var ( islibrary bool // -buildmode=c-shared isarchive bool // -buildmode=c-archive ) - -/* - * mutual exclusion locks. in the uncontended case, - * as fast as spin locks (just a few user-level instructions), - * but on the contention path they sleep in the kernel. - * a zeroed Mutex is unlocked (no need to initialize each lock). - */ - -/* - * sleep and wakeup on one-time events. - * before any calls to notesleep or notewakeup, - * must call noteclear to initialize the Note. - * then, exactly one thread can call notesleep - * and exactly one thread can call notewakeup (once). - * once notewakeup has been called, the notesleep - * will return. future notesleep will return immediately. - * subsequent noteclear must be called only after - * previous notesleep has returned, e.g. it's disallowed - * to call noteclear straight after notewakeup. - * - * notetsleep is like notesleep but wakes up after - * a given number of nanoseconds even if the event - * has not yet happened. if a goroutine uses notetsleep to - * wake up early, it must wait to call noteclear until it - * can be sure that no other goroutine is calling - * notewakeup. - * - * notesleep/notetsleep are generally called on g0, - * notetsleepg is similar to notetsleep but is called on user g. - */ -// bool runtime·notetsleep(Note*, int64); // false - timeout -// bool runtime·notetsleepg(Note*, int64); // false - timeout - -/* - * Lock-free stack. - * Initialize uint64 head to 0, compare with 0 to test for emptiness. - * The stack does not keep pointers to nodes, - * so they can be garbage collected if there are no other pointers to nodes. - */ - -// for mmap, we only pass the lower 32 bits of file offset to the -// assembly routine; the higher bits (if required), should be provided -// by the assembly routine as 0. -- cgit v1.3