aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/stackalloc.go
blob: dd55d96ccc994cb08d492d9ed66e92de062641c9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package ssa

import "log"

// stackalloc allocates storage in the stack frame for
// all Values that did not get a register.
func stackalloc(f *Func) {
	home := f.RegAlloc

	// First compute the size of the outargs section.
	n := int64(16) //TODO: compute max of all callsites

	// Include one slot for deferreturn.
	if false && n < f.Config.ptrSize { //TODO: check for deferreturn
		n = f.Config.ptrSize
	}

	// TODO: group variables by ptr/nonptr, size, etc.  Emit ptr vars last
	// so stackmap is smaller.

	// Assign stack locations to phis first, because we
	// must also assign the same locations to the phi copies
	// introduced during regalloc.
	for _, b := range f.Blocks {
		for _, v := range b.Values {
			if v.Op != OpPhi {
				continue
			}
			if v.Type.IsMemory() { // TODO: only "regallocable" types
				continue
			}
			n += v.Type.Size()
			// a := v.Type.Align()
			// n = (n + a - 1) / a * a  TODO
			loc := &LocalSlot{n}
			home = setloc(home, v, loc)
			for _, w := range v.Args {
				home = setloc(home, w, loc)
			}
		}
	}

	// Now do all other unassigned values.
	for _, b := range f.Blocks {
		for _, v := range b.Values {
			if v.ID < ID(len(home)) && home[v.ID] != nil {
				continue
			}
			if v.Type.IsMemory() { // TODO: only "regallocable" types
				continue
			}
			if len(v.Args) == 0 {
				// v will have been materialized wherever it is needed.
				continue
			}
			if len(v.Args) == 1 && (v.Args[0].Op == OpFP || v.Args[0].Op == OpSP || v.Args[0].Op == OpGlobal) {
				continue
			}
			// a := v.Type.Align()
			// n = (n + a - 1) / a * a  TODO
			n += v.Type.Size()
			loc := &LocalSlot{n}
			home = setloc(home, v, loc)
		}
	}

	// TODO: align n
	n += f.Config.ptrSize // space for return address.  TODO: arch-dependent
	f.RegAlloc = home
	f.FrameSize = n

	// TODO: share stack slots among noninterfering (& gc type compatible) values

	// adjust all uses of FP to SP now that we have the frame size.
	var fp *Value
	for _, b := range f.Blocks {
		for _, v := range b.Values {
			if v.Op == OpFP {
				if fp != nil {
					log.Panicf("multiple FP ops: %s %s", fp, v)
				}
				fp = v
			}
			for i, a := range v.Args {
				if a.Op != OpFP {
					continue
				}
				// TODO: do this with arch-specific rewrite rules somehow?
				switch v.Op {
				case OpADDQ:
					// (ADDQ (FP) x) -> (LEAQ [n] (SP) x)
					v.Op = OpLEAQ
					v.Aux = n
				case OpLEAQ, OpMOVQload, OpMOVQstore, OpMOVBload, OpMOVQloadidx8:
					if v.Op == OpMOVQloadidx8 && i == 1 {
						// Note: we could do it, but it is probably an error
						log.Panicf("can't do FP->SP adjust on index slot of load %s", v.Op)
					}
					// eg: (MOVQload [c] (FP) mem) -> (MOVQload [c+n] (SP) mem)
					v.Aux = addOffset(v.Aux.(int64), n)
				default:
					log.Panicf("can't do FP->SP adjust on %s", v.Op)
				}
			}
		}
	}
	if fp != nil {
		fp.Op = OpSP
		home[fp.ID] = &registers[4] // TODO: arch-dependent
	}
}