aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAustin Clements <austin@google.com>2023-04-20 16:10:15 +0000
committerAustin Clements <austin@google.com>2023-04-20 16:19:49 +0000
commit466e6dae9570ac88ef15c5f1bda9a59d7253cfee (patch)
tree916fcafde3a860e6c399a6342d4ac2c426113ef2 /src
parentd11ff3f08155b7614485d9b555e97f7a9555ede5 (diff)
downloadgo-466e6dae9570ac88ef15c5f1bda9a59d7253cfee.tar.xz
Revert "internal/abi, runtime, cmd: merge StackSmall, StackBig consts into internal/abi"
This reverts commit CL 486379. Submitted out of order and breaks bootstrap. Change-Id: Ie20a61cc56efc79a365841293ca4e7352b02d86b Reviewed-on: https://go-review.googlesource.com/c/go/+/486917 TryBot-Bypass: Austin Clements <austin@google.com> Reviewed-by: David Chase <drchase@google.com>
Diffstat (limited to 'src')
-rw-r--r--src/cmd/internal/obj/arm/obj5.go8
-rw-r--r--src/cmd/internal/obj/arm64/obj7.go10
-rw-r--r--src/cmd/internal/obj/loong64/obj.go7
-rw-r--r--src/cmd/internal/obj/mips/obj0.go7
-rw-r--r--src/cmd/internal/obj/ppc64/obj9.go8
-rw-r--r--src/cmd/internal/obj/riscv/obj.go6
-rw-r--r--src/cmd/internal/obj/s390x/objz.go8
-rw-r--r--src/cmd/internal/obj/wasm/wasmobj.go5
-rw-r--r--src/cmd/internal/obj/x86/obj6.go12
-rw-r--r--src/cmd/internal/objabi/stack.go9
-rw-r--r--src/internal/abi/stack.go25
-rw-r--r--src/runtime/stack.go14
12 files changed, 53 insertions, 66 deletions
diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go
index 3a53628fe1..38aa11cde9 100644
--- a/src/cmd/internal/obj/arm/obj5.go
+++ b/src/cmd/internal/obj/arm/obj5.go
@@ -708,7 +708,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
// unnecessarily. See issue #35470.
p = c.ctxt.StartUnsafePoint(p, c.newprog)
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(p, c.newprog)
@@ -717,7 +717,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_R1
p.Reg = REGSP
- } else if framesize <= abi.StackBig {
+ } else if framesize <= objabi.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// MOVW $-(framesize-StackSmall)(SP), R2
// CMP stackguard, R2
@@ -726,7 +726,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.As = AMOVW
p.From.Type = obj.TYPE_ADDR
p.From.Reg = REGSP
- p.From.Offset = -(int64(framesize) - abi.StackSmall)
+ p.From.Offset = -(int64(framesize) - objabi.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
@@ -753,7 +753,7 @@ func (c *ctxt5) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.As = ASUB
p.Scond = C_SBIT
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(framesize) - abi.StackSmall
+ p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go
index a259956361..6c2cb63e9b 100644
--- a/src/cmd/internal/obj/arm64/obj7.go
+++ b/src/cmd/internal/obj/arm64/obj7.go
@@ -169,7 +169,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p = c.ctxt.StartUnsafePoint(p, c.newprog)
q := (*obj.Prog)(nil)
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
@@ -178,7 +178,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Type = obj.TYPE_REG
p.From.Reg = REGRT1
p.Reg = REGSP
- } else if framesize <= abi.StackBig {
+ } else if framesize <= objabi.StackBig {
// large stack: SP-framesize < stackguard-StackSmall
// SUB $(framesize-StackSmall), SP, RT2
// CMP stackguard, RT2
@@ -186,7 +186,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.As = ASUB
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(framesize) - abi.StackSmall
+ p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REGRT2
@@ -212,7 +212,7 @@ func (c *ctxt7) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p = obj.Appendp(p, c.newprog)
p.As = ASUBS
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(framesize) - abi.StackSmall
+ p.From.Offset = int64(framesize) - objabi.StackSmall
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REGRT2
@@ -582,7 +582,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
- if p.Mark&LEAF != 0 && c.autosize < abi.StackSmall {
+ if p.Mark&LEAF != 0 && c.autosize < objabi.StackSmall {
// A leaf function with a small stack can be marked
// NOSPLIT, avoiding a stack check.
p.From.Sym.Set(obj.AttrNoSplit, true)
diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go
index 8447a8dab3..0c1f5c029d 100644
--- a/src/cmd/internal/obj/loong64/obj.go
+++ b/src/cmd/internal/obj/loong64/obj.go
@@ -6,6 +6,7 @@ package loong64
import (
"cmd/internal/obj"
+ "cmd/internal/objabi"
"cmd/internal/sys"
"log"
"math"
@@ -592,7 +593,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p = c.ctxt.StartUnsafePoint(p, c.newprog)
var q *obj.Prog
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
// AGTU SP, stackguard, R19
p = obj.Appendp(p, c.newprog)
@@ -605,8 +606,8 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.To.Reg = REG_R19
} else {
// large stack: SP-framesize < stackguard-StackSmall
- offset := int64(framesize) - abi.StackSmall
- if framesize > abi.StackBig {
+ offset := int64(framesize) - objabi.StackSmall
+ if framesize > objabi.StackBig {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
// framesize is large enough that SP-framesize may
diff --git a/src/cmd/internal/obj/mips/obj0.go b/src/cmd/internal/obj/mips/obj0.go
index 469e649c3a..9241dfd631 100644
--- a/src/cmd/internal/obj/mips/obj0.go
+++ b/src/cmd/internal/obj/mips/obj0.go
@@ -31,6 +31,7 @@ package mips
import (
"cmd/internal/obj"
+ "cmd/internal/objabi"
"cmd/internal/sys"
"encoding/binary"
"fmt"
@@ -773,7 +774,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p = c.ctxt.StartUnsafePoint(p, c.newprog)
var q *obj.Prog
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
// AGTU SP, stackguard, R1
p = obj.Appendp(p, c.newprog)
@@ -786,8 +787,8 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.To.Reg = REG_R1
} else {
// large stack: SP-framesize < stackguard-StackSmall
- offset := int64(framesize) - abi.StackSmall
- if framesize > abi.StackBig {
+ offset := int64(framesize) - objabi.StackSmall
+ if framesize > objabi.StackBig {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
// framesize is large enough that SP-framesize may
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index 1d9d7584d7..e9d47b2880 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -632,7 +632,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
autosize += int32(c.ctxt.Arch.FixedFrameSize)
}
- if p.Mark&LEAF != 0 && autosize < abi.StackSmall {
+ if p.Mark&LEAF != 0 && autosize < objabi.StackSmall {
// A leaf function with a small stack can be marked
// NOSPLIT, avoiding a stack check.
p.From.Sym.Set(obj.AttrNoSplit, true)
@@ -1177,7 +1177,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p = c.ctxt.StartUnsafePoint(p, c.newprog)
var q *obj.Prog
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
p = obj.Appendp(p, c.newprog)
@@ -1189,8 +1189,8 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.To.Reg = REGSP
} else {
// large stack: SP-framesize < stackguard-StackSmall
- offset := int64(framesize) - abi.StackSmall
- if framesize > abi.StackBig {
+ offset := int64(framesize) - objabi.StackSmall
+ if framesize > objabi.StackBig {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
// framesize is large enough that SP-framesize may
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index f81e7cc2db..0d97121e2c 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -828,7 +828,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA
var to_done, to_more *obj.Prog
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack
// // if SP > stackguard { goto done }
// BLTU stackguard, SP, done
@@ -841,8 +841,8 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA
to_done = p
} else {
// large stack: SP-framesize < stackguard-StackSmall
- offset := int64(framesize) - abi.StackSmall
- if framesize > abi.StackBig {
+ offset := int64(framesize) - objabi.StackSmall
+ if framesize > objabi.StackBig {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
// framesize is large enough that SP-framesize may
diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go
index 1af5128670..4e8475624d 100644
--- a/src/cmd/internal/obj/s390x/objz.go
+++ b/src/cmd/internal/obj/s390x/objz.go
@@ -313,7 +313,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
autosize += int32(c.ctxt.Arch.FixedFrameSize)
}
- if p.Mark&LEAF != 0 && autosize < abi.StackSmall {
+ if p.Mark&LEAF != 0 && autosize < objabi.StackSmall {
// A leaf function with a small stack can be marked
// NOSPLIT, avoiding a stack check.
p.From.Sym.Set(obj.AttrNoSplit, true)
@@ -662,7 +662,7 @@ func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (pPre, pPreempt, pCh
// unnecessarily. See issue #35470.
p = c.ctxt.StartUnsafePoint(p, c.newprog)
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack: SP < stackguard
// CMPUBGE stackguard, SP, label-of-call-to-morestack
@@ -678,8 +678,8 @@ func (c *ctxtz) stacksplitPre(p *obj.Prog, framesize int32) (pPre, pPreempt, pCh
// large stack: SP-framesize < stackguard-StackSmall
- offset := int64(framesize) - abi.StackSmall
- if framesize > abi.StackBig {
+ offset := int64(framesize) - objabi.StackSmall
+ if framesize > objabi.StackBig {
// Such a large stack we need to protect against underflow.
// The runtime guarantees SP > objabi.StackBig, but
// framesize is large enough that SP-framesize may
diff --git a/src/cmd/internal/obj/wasm/wasmobj.go b/src/cmd/internal/obj/wasm/wasmobj.go
index 83b9329f12..6bf49c602d 100644
--- a/src/cmd/internal/obj/wasm/wasmobj.go
+++ b/src/cmd/internal/obj/wasm/wasmobj.go
@@ -11,7 +11,6 @@ import (
"cmd/internal/sys"
"encoding/binary"
"fmt"
- "internal/abi"
"io"
"math"
)
@@ -473,7 +472,7 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
if needMoreStack {
p := pMorestack
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack: SP <= stackguard
// Get SP
// Get g
@@ -501,7 +500,7 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
p = appendp(p, AGet, regAddr(REGG))
p = appendp(p, AI32WrapI64)
p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0
- p = appendp(p, AI32Const, constAddr(framesize-abi.StackSmall))
+ p = appendp(p, AI32Const, constAddr(framesize-objabi.StackSmall))
p = appendp(p, AI32Add)
p = appendp(p, AI32LeU)
}
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index 185ac78230..8c9ea4f2a9 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -641,7 +641,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
// TODO(rsc): Remove 'ctxt.Arch.Family == sys.AMD64 &&'.
- if ctxt.Arch.Family == sys.AMD64 && autoffset < abi.StackSmall && !p.From.Sym.NoSplit() {
+ if ctxt.Arch.Family == sys.AMD64 && autoffset < objabi.StackSmall && !p.From.Sym.NoSplit() {
leaf := true
LeafSearch:
for q := p; q != nil; q = q.Link {
@@ -655,7 +655,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
fallthrough
case obj.ADUFFCOPY, obj.ADUFFZERO:
- if autoffset >= abi.StackSmall-8 {
+ if autoffset >= objabi.StackSmall-8 {
leaf = false
break LeafSearch
}
@@ -1087,7 +1087,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA
p, rg = loadG(ctxt, cursym, p, newprog)
var q1 *obj.Prog
- if framesize <= abi.StackSmall {
+ if framesize <= objabi.StackSmall {
// small stack: SP <= stackguard
// CMPQ SP, stackguard
p = obj.Appendp(p, newprog)
@@ -1107,7 +1107,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA
// cleared, but we'll still call morestack, which will double the stack
// unnecessarily. See issue #35470.
p = ctxt.StartUnsafePoint(p, newprog)
- } else if framesize <= abi.StackBig {
+ } else if framesize <= objabi.StackBig {
// large stack: SP-framesize <= stackguard-StackSmall
// LEAQ -xxx(SP), tmp
// CMPQ tmp, stackguard
@@ -1116,7 +1116,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA
p.As = lea
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_SP
- p.From.Offset = -(int64(framesize) - abi.StackSmall)
+ p.From.Offset = -(int64(framesize) - objabi.StackSmall)
p.To.Type = obj.TYPE_REG
p.To.Reg = tmp
@@ -1159,7 +1159,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA
p = obj.Appendp(p, newprog)
p.As = sub
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(framesize) - abi.StackSmall
+ p.From.Offset = int64(framesize) - objabi.StackSmall
p.To.Type = obj.TYPE_REG
p.To.Reg = tmp
diff --git a/src/cmd/internal/objabi/stack.go b/src/cmd/internal/objabi/stack.go
index 5a2f641a75..88b4990d5e 100644
--- a/src/cmd/internal/objabi/stack.go
+++ b/src/cmd/internal/objabi/stack.go
@@ -4,22 +4,21 @@
package objabi
-import (
- "internal/abi"
- "internal/buildcfg"
-)
+import "internal/buildcfg"
// For the linkers. Must match Go definitions.
const (
STACKSYSTEM = 0
StackSystem = STACKSYSTEM
+ StackBig = 4096
+ StackSmall = 128
)
func StackLimit(race bool) int {
// This arithmetic must match that in runtime/stack.go:{_StackGuard,_StackLimit}.
stackGuard := 928*stackGuardMultiplier(race) + StackSystem
- stackLimit := stackGuard - StackSystem - abi.StackSmall
+ stackLimit := stackGuard - StackSystem - StackSmall
return stackLimit
}
diff --git a/src/internal/abi/stack.go b/src/internal/abi/stack.go
deleted file mode 100644
index 9efd21b167..0000000000
--- a/src/internal/abi/stack.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package abi
-
-const (
- // We have three different sequences for stack bounds checks, depending on
- // whether the stack frame of a function is small, big, or huge.
-
- // After a stack split check the SP is allowed to be StackSmall bytes below
- // the stack guard.
- //
- // Functions that need frames <= StackSmall can perform the stack check
- // using a single comparison directly between the stack guard and the SP
- // because we ensure that StackSmall bytes of stack space are available
- // beyond the stack guard.
- StackSmall = 128
-
- // Functions that need frames <= StackBig can assume that neither
- // SP-framesize nor stackGuard-StackSmall will underflow, and thus use a
- // more efficient check. In order to ensure this, StackBig must be <= the
- // size of the unmapped space at zero.
- StackBig = 4096
-)
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 39dbed5114..e1e6c7e82a 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -85,6 +85,13 @@ const (
_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
_FixedStack = _FixedStack6 + 1
+ // Functions that need frames bigger than this use an extra
+ // instruction to do the stack split check, to avoid overflow
+ // in case SP - framesize wraps below zero.
+ // This value can be no bigger than the size of the unmapped
+ // space at zero.
+ _StackBig = 4096
+
// The stack guard is a pointer this many bytes above the
// bottom of the stack.
//
@@ -94,10 +101,15 @@ const (
// This arithmetic must match that in cmd/internal/objabi/stack.go:StackLimit.
_StackGuard = 928*sys.StackGuardMultiplier + _StackSystem
+ // After a stack split check the SP is allowed to be this
+ // many bytes below the stack guard. This saves an instruction
+ // in the checking sequence for tiny frames.
+ _StackSmall = 128
+
// The maximum number of bytes that a chain of NOSPLIT
// functions can use.
// This arithmetic must match that in cmd/internal/objabi/stack.go:StackLimit.
- _StackLimit = _StackGuard - _StackSystem - abi.StackSmall
+ _StackLimit = _StackGuard - _StackSystem - _StackSmall
)
const (