aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mkpreempt.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mkpreempt.go')
-rw-r--r--src/runtime/mkpreempt.go92
1 files changed, 81 insertions, 11 deletions
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index ec900a23d2..e3dd5046f3 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -9,8 +9,10 @@
package main
import (
+ "bytes"
"flag"
"fmt"
+ "go/format"
"io"
"log"
"os"
@@ -122,14 +124,19 @@ type gen struct {
goarch string
}
-func (g *gen) asmHeader() {
+func (g *gen) commonHeader() {
fmt.Fprintf(g.w, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n")
if beLe[g.goarch] {
base := g.goarch[:len(g.goarch)-1]
fmt.Fprintf(g.w, "//go:build %s || %sle\n\n", base, base)
}
+}
+
+func (g *gen) asmHeader() {
+ g.commonHeader()
fmt.Fprintf(g.w, "#include \"go_asm.h\"\n")
if g.goarch == "amd64" {
+ fmt.Fprintf(g.w, "#include \"go_tls.h\"\n")
fmt.Fprintf(g.w, "#include \"asm_amd64.h\"\n")
}
fmt.Fprintf(g.w, "#include \"textflag.h\"\n\n")
@@ -145,6 +152,43 @@ func (g *gen) label(l string) {
fmt.Fprintf(g.w, "%s\n", l)
}
+// writeXRegs writes an architecture xregs file.
+func writeXRegs(arch string, l *layout) {
+ var code bytes.Buffer
+ g := gen{&code, arch}
+ g.commonHeader()
+ fmt.Fprintf(g.w, `
+package runtime
+
+type xRegState struct {
+`)
+ pos := 0
+ for _, reg := range l.regs {
+ if reg.pos != pos {
+ log.Fatalf("padding not implemented")
+ }
+ typ := fmt.Sprintf("[%d]byte", reg.size)
+ switch {
+ case reg.size == 4 && reg.pos%4 == 0:
+ typ = "uint32"
+ case reg.size == 8 && reg.pos%8 == 0:
+ typ = "uint64"
+ }
+ fmt.Fprintf(g.w, "\t%s %s\n", reg.reg, typ)
+ pos += reg.size
+ }
+ fmt.Fprintf(g.w, "}\n")
+
+ path := fmt.Sprintf("preempt_%s.go", arch)
+ b, err := format.Source(code.Bytes())
+ if err != nil {
+ log.Fatalf("formatting %s: %s", path, err)
+ }
+ if err := os.WriteFile(path, b, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
+
type layout struct {
stack int
regs []regPos
@@ -152,7 +196,7 @@ type layout struct {
}
type regPos struct {
- pos int
+ pos, size int
saveOp string
restoreOp string
@@ -165,17 +209,17 @@ type regPos struct {
}
func (l *layout) add(op, reg string, size int) {
- l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack})
+ l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack, size: size})
l.stack += size
}
func (l *layout) add2(sop, rop, reg string, size int) {
- l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack})
+ l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack, size: size})
l.stack += size
}
func (l *layout) addSpecial(save, restore string, size int) {
- l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack})
+ l.regs = append(l.regs, regPos{save: save, restore: restore, pos: l.stack, size: size})
l.stack += size
}
@@ -239,6 +283,8 @@ func gen386(g *gen) {
}
func genAMD64(g *gen) {
+ const xReg = "AX" // *xRegState
+
p := g.p
// Assign stack offsets.
@@ -251,12 +297,13 @@ func genAMD64(g *gen) {
l.add("MOVQ", reg, 8)
}
}
- lSSE := layout{stack: l.stack, sp: "SP"}
+ lXRegs := layout{sp: xReg} // Non-GP registers
for _, reg := range regNamesAMD64 {
if strings.HasPrefix(reg, "X") {
- lSSE.add("MOVUPS", reg, 16)
+ lXRegs.add("MOVUPS", reg, 16)
}
}
+ writeXRegs(g.goarch, &lXRegs)
// TODO: MXCSR register?
@@ -265,17 +312,40 @@ func genAMD64(g *gen) {
p("// Save flags before clobbering them")
p("PUSHFQ")
p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP")
- p("ADJSP $%d", lSSE.stack)
+ p("ADJSP $%d", l.stack)
p("// But vet doesn't know ADJSP, so suppress vet stack checking")
p("NOP SP")
+ p("// Save GPs")
l.save(g)
- lSSE.save(g)
+ // In general, the limitations on asynchronous preemption mean we only
+ // preempt in ABIInternal code. However, there's at least one exception to
+ // this: when we're in an open-coded transition between an ABIInternal
+ // function and an ABI0 call. We could more carefully arrange unsafe points
+ // to avoid ever landing in ABI0, but it's easy to just make this code not
+ // sensitive to the ABI we're preempting. The CALL to asyncPreempt2 will
+ // ensure we're in ABIInternal register state.
+ p("// Save extended register state to p.xRegs.scratch")
+ p("// Don't make assumptions about ABI register state. See mkpreempt.go")
+ p("get_tls(CX)")
+ p("MOVQ g(CX), R14")
+ p("MOVQ g_m(R14), %s", xReg)
+ p("MOVQ m_p(%s), %s", xReg, xReg)
+ p("LEAQ (p_xRegs+xRegPerP_scratch)(%s), %s", xReg, xReg)
+ lXRegs.save(g)
+
p("CALL ·asyncPreempt2(SB)")
- lSSE.restore(g)
+
+ p("// Restore non-GPs from *p.xRegs.cache")
+ p("MOVQ g_m(R14), %s", xReg)
+ p("MOVQ m_p(%s), %s", xReg, xReg)
+ p("MOVQ (p_xRegs+xRegPerP_cache)(%s), %s", xReg, xReg)
+ lXRegs.restore(g)
+
+ p("// Restore GPs")
l.restore(g)
- p("ADJSP $%d", -lSSE.stack)
+ p("ADJSP $%d", -l.stack)
p("POPFQ")
p("POPQ BP")
p("RET")