diff options
| author | Russ Cox <rsc@golang.org> | 2014-09-09 13:39:57 -0400 |
|---|---|---|
| committer | Russ Cox <rsc@golang.org> | 2014-09-09 13:39:57 -0400 |
| commit | 15b76ad94b1054ec7fd6853530bff782790b5727 (patch) | |
| tree | 0721c80e6464caf693a9f31187a7635a3a1dcacb /src/runtime/stack_test.go | |
| parent | d72029e3a3e6726248d84fdd1e468a3683ccb577 (diff) | |
| download | go-15b76ad94b1054ec7fd6853530bff782790b5727.tar.xz | |
runtime: assume precisestack, copystack, StackCopyAlways, ScanStackByFrames
Commit to stack copying for stack growth.
We're carrying around a surprising amount of cruft from older schemes.
I am confident that precise stack scans and stack copying are here to stay.
Delete fallback code for when precise stack info is disabled.
Delete fallback code for when copying stacks is disabled.
Delete fallback code for when StackCopyAlways is disabled.
Delete Stktop chain - there is only one stack segment now.
Delete M.moreargp, M.moreargsize, M.moreframesize, M.cret.
Delete G.writenbuf (unrelated, just dead).
Delete runtime.lessstack, runtime.oldstack.
Delete many amd64 morestack variants.
Delete initialization of morestack frame/arg sizes (shortens split prologue!).
Replace G's stackguard/stackbase/stack0/stacksize/
syscallstack/syscallguard/forkstackguard with simple stack
bounds (lo, hi).
Update liblink, runtime/cgo for adjustments to G.
LGTM=khr
R=khr, bradfitz
CC=golang-codereviews, iant, r
https://golang.org/cl/137410043
Diffstat (limited to 'src/runtime/stack_test.go')
| -rw-r--r-- | src/runtime/stack_test.go | 54 |
1 files changed, 0 insertions, 54 deletions
diff --git a/src/runtime/stack_test.go b/src/runtime/stack_test.go index 7b9412af42..cd525a3fc5 100644 --- a/src/runtime/stack_test.go +++ b/src/runtime/stack_test.go @@ -10,62 +10,8 @@ import ( "sync" "testing" "time" - "unsafe" ) -// See stack.h. -const ( - StackGuard = 256 - StackSmall = 64 - StackLimit = StackGuard - StackSmall -) - -// Test stack split logic by calling functions of every frame size -// from near 0 up to and beyond the default segment size (4k). -// Each of those functions reports its SP + stack limit, and then -// the test (the caller) checks that those make sense. By not -// doing the actual checking and reporting from the suspect functions, -// we minimize the possibility of crashes during the test itself. -// -// Exhaustive test for http://golang.org/issue/3310. -// The linker used to get a few sizes near the segment size wrong: -// -// --- FAIL: TestStackSplit (0.01 seconds) -// stack_test.go:22: after runtime_test.stack3812: sp=0x7f7818d5d078 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3816: sp=0x7f7818d5d078 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3820: sp=0x7f7818d5d070 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3824: sp=0x7f7818d5d070 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3828: sp=0x7f7818d5d068 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3832: sp=0x7f7818d5d068 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3836: sp=0x7f7818d5d060 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3840: sp=0x7f7818d5d060 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3844: sp=0x7f7818d5d058 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3848: sp=0x7f7818d5d058 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3852: sp=0x7f7818d5d050 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3856: sp=0x7f7818d5d050 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3860: sp=0x7f7818d5d048 < limit=0x7f7818d5d080 -// stack_test.go:22: after runtime_test.stack3864: sp=0x7f7818d5d048 < limit=0x7f7818d5d080 -// FAIL -func TestStackSplit(t *testing.T) { - for _, f := range splitTests { - sp, guard := f() - bottom := guard - StackGuard - if sp < bottom+StackLimit { - fun := FuncForPC(**(**uintptr)(unsafe.Pointer(&f))) - t.Errorf("after %s: sp=%#x < limit=%#x (guard=%#x, bottom=%#x)", - fun.Name(), sp, bottom+StackLimit, guard, bottom) - } - } -} - -var Used byte - -func use(buf []byte) { - for _, c := range buf { - Used += c - } -} - // TestStackMem measures per-thread stack segment cache behavior. // The test consumed up to 500MB in the past. func TestStackMem(t *testing.T) { |
