Use *Node of type ONAME instead of string as the key for variable maps.
This will prevent aliasing between two identically named but
differently scoped variables.
Introduce an Aux value that encodes the offset of a variable
from a base pointer (either global base pointer or stack pointer).
Allow LEAQ and derivatives (MOVQ, etc.) to also have such an Aux field.
Allocate space for AUTO variables in stackalloc.
Change-Id: Ibdccdaea4bbc63a1f4882959ac374f2b467e3acd
Reviewed-on: https://go-review.googlesource.com/11238
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
"cmd/compile/internal/ssa"
"cmd/internal/obj"
- "cmd/internal/obj/x86" // TODO: remove
+ "cmd/internal/obj/x86"
)
// buildssa builds an SSA function
s.exit = s.f.NewBlock(ssa.BlockExit)
// Allocate starting values
+ s.vars = map[*Node]*ssa.Value{}
+ s.labels = map[string]*ssa.Block{}
s.startmem = s.entryNewValue0(ssa.OpArg, ssa.TypeMem)
- s.fp = s.entryNewValue0(ssa.OpFP, s.config.Uintptr) // TODO: use generic pointer type (unsafe.Pointer?) instead
- s.sp = s.entryNewValue0(ssa.OpSP, s.config.Uintptr)
+ s.sp = s.entryNewValue0(ssa.OpSP, s.config.Uintptr) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, s.config.Uintptr)
- s.vars = map[string]*ssa.Value{}
- s.labels = map[string]*ssa.Block{}
- s.argOffsets = map[string]int64{}
+ // Generate addresses of local declarations
+ s.decladdrs = map[*Node]*ssa.Value{}
+ for d := fn.Func.Dcl; d != nil; d = d.Next {
+ n := d.N
+ switch n.Class {
+ case PPARAM, PPARAMOUT:
+ aux := &ssa.ArgSymbol{Typ: n.Type, Offset: n.Xoffset, Sym: n.Sym}
+ s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
+ case PAUTO:
+ aux := &ssa.AutoSymbol{Typ: n.Type, Offset: -1, Sym: n.Sym} // offset TBD by SSA pass
+ s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sp)
+ }
+ }
+ // nodfp is a special argument which is the function's FP.
+ aux := &ssa.ArgSymbol{Typ: s.config.Uintptr, Offset: 0, Sym: nodfp.Sym}
+ s.decladdrs[nodfp] = s.entryNewValue1A(ssa.OpAddr, s.config.Uintptr, aux, s.sp)
// Convert the AST-based IR to the SSA-based IR
s.startBlock(s.f.Entry)
// current location where we're interpreting the AST
curBlock *ssa.Block
- // variable assignments in the current block (map from variable name to ssa value)
- vars map[string]*ssa.Value
+ // variable assignments in the current block (map from variable symbol to ssa value)
+ // *Node is the unique identifier (an ONAME Node) for the variable.
+ vars map[*Node]*ssa.Value
// all defined variables at the end of each block. Indexed by block ID.
- defvars []map[string]*ssa.Value
+ defvars []map[*Node]*ssa.Value
- // offsets of argument slots
- // unnamed and unused args are not listed.
- argOffsets map[string]int64
+ // addresses of PPARAM, PPARAMOUT, and PAUTO variables.
+ decladdrs map[*Node]*ssa.Value
// starting values. Memory, frame pointer, and stack pointer
startmem *ssa.Value
- fp *ssa.Value
sp *ssa.Value
+ sb *ssa.Value
// line number stack. The current line number is top of stack
line []int32
func (s *state) Fatalf(msg string, args ...interface{}) { s.config.Fatalf(msg, args...) }
func (s *state) Unimplementedf(msg string, args ...interface{}) { s.config.Unimplementedf(msg, args...) }
+// dummy node for the memory variable
+var memvar = Node{Op: ONAME, Sym: &Sym{Name: "mem"}}
+
// startBlock sets the current block we're generating code in to b.
func (s *state) startBlock(b *ssa.Block) {
if s.curBlock != nil {
s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
}
s.curBlock = b
- s.vars = map[string]*ssa.Value{}
+ s.vars = map[*Node]*ssa.Value{}
}
// endBlock marks the end of generating code for the current block.
return s.f.Entry.NewValue1I(s.peekLine(), op, t, auxint, arg)
}
+// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
+func (s *state) entryNewValue1A(op ssa.Op, t ssa.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
+ return s.f.Entry.NewValue1A(s.peekLine(), op, t, aux, arg)
+}
+
// entryNewValue2 adds a new value with two arguments to the entry block.
func (s *state) entryNewValue2(op ssa.Op, t ssa.Type, arg0, arg1 *ssa.Value) *ssa.Value {
return s.f.Entry.NewValue2(s.peekLine(), op, t, arg0, arg1)
case ONAME:
if n.Class == PFUNC {
// "value" of a function is the address of the function's closure
- return s.entryNewValue0A(ssa.OpGlobal, Ptrto(n.Type), funcsym(n.Sym))
+ sym := funcsym(n.Sym)
+ aux := &ssa.ExternSymbol{n.Type, sym}
+ return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
}
- s.argOffsets[n.Sym.Name] = n.Xoffset // TODO: remember this another way?
if canSSA(n) {
- return s.variable(n.Sym.Name, n.Type)
+ return s.variable(n, n.Type)
}
addr := s.addr(n)
return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
}
if left.Op == ONAME && canSSA(left) {
// Update variable assignment.
- s.vars[left.Sym.Name] = val
+ s.vars[left] = val
return
}
// not ssa-able. Treat as a store.
addr := s.addr(left)
- s.vars[".mem"] = s.newValue3(ssa.OpStore, ssa.TypeMem, addr, val, s.mem())
+ s.vars[&memvar] = s.newValue3(ssa.OpStore, ssa.TypeMem, addr, val, s.mem())
}
// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
switch n.Class {
case PEXTERN:
// global variable
- return s.entryNewValue0A(ssa.OpGlobal, Ptrto(n.Type), n.Sym)
- case PPARAMOUT:
- // store to parameter slot
- return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(n.Type), n.Xoffset, s.fp)
+ aux := &ssa.ExternSymbol{n.Type, n.Sym}
+ return s.entryNewValue1A(ssa.OpAddr, Ptrto(n.Type), aux, s.sb)
+ case PPARAM, PPARAMOUT, PAUTO:
+ // parameter/result slot or local variable
+ return s.decladdrs[n]
case PAUTO | PHEAP:
return s.expr(n.Name.Heapaddr)
default:
- // TODO: address of locals
s.Unimplementedf("variable address of %v not implemented", n)
return nil
}
}
// variable returns the value of a variable at the current location.
-func (s *state) variable(name string, t ssa.Type) *ssa.Value {
+func (s *state) variable(name *Node, t ssa.Type) *ssa.Value {
if s.curBlock == nil {
// Unimplemented instead of Fatal because fixedbugs/bug303.go
// demonstrates a case in which this appears to happen legitimately.
}
func (s *state) mem() *ssa.Value {
- return s.variable(".mem", ssa.TypeMem)
+ return s.variable(&memvar, ssa.TypeMem)
}
func (s *state) linkForwardReferences() {
if v.Op != ssa.OpFwdRef {
continue
}
- name := v.Aux.(string)
+ name := v.Aux.(*Node)
v.Op = ssa.OpCopy
v.Aux = nil
v.SetArgs1(s.lookupVarIncoming(b, v.Type, name))
}
// lookupVarIncoming finds the variable's value at the start of block b.
-func (s *state) lookupVarIncoming(b *ssa.Block, t ssa.Type, name string) *ssa.Value {
+func (s *state) lookupVarIncoming(b *ssa.Block, t ssa.Type, name *Node) *ssa.Value {
// TODO(khr): have lookupVarIncoming overwrite the fwdRef or copy it
// will be used in, instead of having the result used in a copy value.
if b == s.f.Entry {
- if name == ".mem" {
+ if name == &memvar {
return s.startmem
}
// variable is live at the entry block. Load it.
- addr := s.entryNewValue1I(ssa.OpOffPtr, Ptrto(t.(*Type)), s.argOffsets[name], s.fp)
+ addr := s.decladdrs[name]
+ if addr == nil {
+ // TODO: closure args reach here.
+ s.Unimplementedf("variable %s not found", name)
+ }
+ if _, ok := addr.Aux.(*ssa.ArgSymbol); !ok {
+ s.Fatalf("variable live at start of function %s is not an argument %s", b.Func.Name, name)
+ }
return s.entryNewValue2(ssa.OpLoad, t, addr, s.startmem)
-
}
var vals []*ssa.Value
for _, p := range b.Preds {
}
// lookupVarOutgoing finds the variable's value at the end of block b.
-func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name string) *ssa.Value {
+func (s *state) lookupVarOutgoing(b *ssa.Block, t ssa.Type, name *Node) *ssa.Value {
m := s.defvars[b.ID]
if v, ok := m[name]; ok {
return v
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- case ssa.OpAMD64LEAQ:
+ case ssa.OpAMD64LEAQ1:
p := Prog(x86.ALEAQ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = regnum(v.Args[0])
p.From.Scale = 1
p.From.Index = regnum(v.Args[1])
- p.From.Offset = v.AuxInt
+ addAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = regnum(v)
+ case ssa.OpAMD64LEAQ:
+ p := Prog(x86.ALEAQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = regnum(v.Args[0])
+ addAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = regnum(v)
case ssa.OpAMD64CMPQ, ssa.OpAMD64TESTB, ssa.OpAMD64TESTQ:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = regnum(v.Args[0])
- p.From.Offset = v.AuxInt
+ addAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = regnum(v)
case ssa.OpAMD64MOVQloadidx8:
p := Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = regnum(v.Args[0])
- p.From.Offset = v.AuxInt
+ addAux(&p.From, v)
p.From.Scale = 8
p.From.Index = regnum(v.Args[1])
p.To.Type = obj.TYPE_REG
p.From.Reg = regnum(v.Args[1])
p.To.Type = obj.TYPE_MEM
p.To.Reg = regnum(v.Args[0])
- p.To.Offset = v.AuxInt
+ addAux(&p.To, v)
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX:
p := Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
case ssa.OpArg:
// memory arg needs no code
// TODO: check that only mem arg goes here.
- case ssa.OpAMD64LEAQglobal:
- p := Prog(x86.ALEAQ)
- p.From.Type = obj.TYPE_MEM
- p.From.Name = obj.NAME_EXTERN
- p.From.Sym = Linksym(v.Aux.(*Sym))
- p.From.Offset = v.AuxInt
- p.To.Type = obj.TYPE_REG
- p.To.Reg = regnum(v)
case ssa.OpAMD64CALLstatic:
p := Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p := Prog(obj.ACALL)
p.To.Type = obj.TYPE_REG
p.To.Reg = regnum(v.Args[0])
- case ssa.OpFP, ssa.OpSP:
+ case ssa.OpSP, ssa.OpSB:
// nothing to do
default:
v.Unimplementedf("value %s not implemented", v.LongString())
return branches
}
+// addAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
+func addAux(a *obj.Addr, v *ssa.Value) {
+ if a.Type != obj.TYPE_MEM {
+ v.Fatalf("bad addAux addr %s", a)
+ }
+ // add integer offset
+ a.Offset += v.AuxInt
+
+ // If no additional symbol offset, we're done.
+ if v.Aux == nil {
+ return
+ }
+ // Add symbol's offset from its base register.
+ switch sym := v.Aux.(type) {
+ case *ssa.ExternSymbol:
+ a.Name = obj.NAME_EXTERN
+ a.Sym = Linksym(sym.Sym.(*Sym))
+ case *ssa.ArgSymbol:
+ a.Offset += v.Block.Func.FrameSize + sym.Offset
+ case *ssa.AutoSymbol:
+ if sym.Offset == -1 {
+ v.Fatalf("auto symbol %s offset not calculated", sym.Sym)
+ }
+ a.Offset += sym.Offset
+ default:
+ v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
+ }
+}
+
// ssaRegToReg maps ssa register numbers to obj register numbers.
var ssaRegToReg = [...]int16{
x86.REG_AX,
x86.REG_R13,
x86.REG_R14,
x86.REG_R15,
- // TODO: more
+ x86.REG_X0,
+ x86.REG_X1,
+ x86.REG_X2,
+ x86.REG_X3,
+ x86.REG_X4,
+ x86.REG_X5,
+ x86.REG_X6,
+ x86.REG_X7,
+ x86.REG_X8,
+ x86.REG_X9,
+ x86.REG_X10,
+ x86.REG_X11,
+ x86.REG_X12,
+ x86.REG_X13,
+ x86.REG_X14,
+ x86.REG_X15,
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
// TODO: arch-dependent
}
// StringSym returns a symbol (a *Sym wrapped in an interface) which
// is a global string constant containing s.
func (*ssaExport) StringSym(s string) interface{} {
- return stringsym(s)
+ // TODO: is idealstring correct? It might not matter...
+ return &ssa.ExternSymbol{Typ: idealstring, Sym: stringsym(s)}
}
// Log logs a message from the compiler.
- Floating point registers
- Make calls clobber all registers
+StackAlloc:
+ - Compute size of outargs section correctly
+ - Sort variables so all ptr-containing ones are first (so stack
+ maps are smaller)
+ - Reuse stack slots for noninterfering and type-compatible variables
+ (both AUTOs and spilled Values). But see issue 8740 for what
+ "type-compatible variables" mean and what DWARF information provides.
+
Rewrites
- Strength reduction (both arch-indep and arch-dependent?)
- Start another architecture (arm?)
f.Fatalf("phi length %s does not match pred length %d for block %s", v.LongString(), len(b.Preds), b)
}
+ if v.Op == OpAddr {
+ if v.Args[0].Op != OpSP && v.Args[0].Op != OpSB {
+ f.Fatalf("bad arg to OpAddr %v", v)
+ }
+ }
+
// TODO: check for cycles in values
// TODO: check type
}
continue
}
if last != nil {
- b.Fatalf("two final stores - simultaneous live stores", last, v)
+ b.Fatalf("two final stores - simultaneous live stores %s %s", last, v)
}
last = v
}
Bloc("entry",
Valu("start", OpArg, TypeMem, 0, ".mem"),
Valu("v", OpConst, TypeBool, 0, true),
- Valu("addr1", OpGlobal, ptrType, 0, nil),
- Valu("addr2", OpGlobal, ptrType, 0, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil),
+ Valu("addr2", OpAddr, ptrType, 0, nil),
Valu("store1", OpStore, TypeMem, 0, nil, "addr1", "v", "start"),
Valu("store2", OpStore, TypeMem, 0, nil, "addr2", "v", "store1"),
Valu("store3", OpStore, TypeMem, 0, nil, "addr1", "v", "store2"),
Bloc("entry",
Valu("start", OpArg, TypeMem, 0, ".mem"),
Valu("v", OpConst, TypeBool, 0, true),
- Valu("addr", OpGlobal, ptrType, 0, nil),
+ Valu("addr", OpAddr, ptrType, 0, nil),
Goto("loop")),
Bloc("loop",
Valu("phi", OpPhi, TypeMem, 0, nil, "start", "store"),
Bloc("entry",
Valu("start", OpArg, TypeMem, 0, ".mem"),
Valu("v", OpConst, TypeBool, 0, true),
- Valu("addr1", OpGlobal, t1, 0, nil),
- Valu("addr2", OpGlobal, t2, 0, nil),
+ Valu("addr1", OpAddr, t1, 0, nil),
+ Valu("addr2", OpAddr, t2, 0, nil),
Valu("store1", OpStore, TypeMem, 0, nil, "addr1", "v", "start"),
Valu("store2", OpStore, TypeMem, 0, nil, "addr2", "v", "store1"),
Goto("exit")),
(Const <t> [val]) && t.IsInteger() -> (MOVQconst [val])
+(Addr {sym} base) -> (LEAQ {sym} base)
+
// block rewrites
(If (SETL cmp) yes no) -> (LT cmp yes no)
(If (SETNE cmp) yes no) -> (NE cmp yes no)
// Rules below here apply some simple optimizations after lowering.
// TODO: Should this be a separate pass?
-// global loads/stores
-(Global {sym}) -> (LEAQglobal {sym})
-
// fold constants into instructions
(ADDQ x (MOVQconst [c])) -> (ADDQconst [c] x) // TODO: restrict c to int32 range?
(ADDQ (MOVQconst [c]) x) -> (ADDQconst [c] x)
(MOVQload [off1] (ADDQconst [off2] ptr) mem) -> (MOVQload [addOff(off1, off2)] ptr mem)
(MOVQstore [off1] (ADDQconst [off2] ptr) val mem) -> (MOVQstore [addOff(off1, off2)] ptr val mem)
+(MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) && (sym1 == nil || sym2 == nil) ->
+ (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+(MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) && (sym1 == nil || sym2 == nil) ->
+ (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+
// indexed loads and stores
(MOVQload [off1] (LEAQ8 [off2] ptr idx) mem) -> (MOVQloadidx8 [addOff(off1, off2)] ptr idx mem)
(MOVQstore [off1] (LEAQ8 [off2] ptr idx) val mem) -> (MOVQstoreidx8 [addOff(off1, off2)] ptr idx val mem)
".X15",
// pseudo-registers
- ".FP",
+ ".SB",
".FLAGS",
}
}
gp := buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15")
- gpsp := gp | buildReg("SP FP")
+ gpsp := gp | buildReg("SP")
+ gpspsb := gpsp | buildReg("SB")
flags := buildReg("FLAGS")
gp01 := regInfo{[]regMask{}, 0, []regMask{gp}}
gp11 := regInfo{[]regMask{gpsp}, 0, []regMask{gp}}
+ gp11sb := regInfo{[]regMask{gpspsb}, 0, []regMask{gp}}
gp21 := regInfo{[]regMask{gpsp, gpsp}, 0, []regMask{gp}}
+ gp21sb := regInfo{[]regMask{gpspsb, gpsp}, 0, []regMask{gp}}
gp21shift := regInfo{[]regMask{gpsp, buildReg("CX")}, 0, []regMask{gp}}
gp2flags := regInfo{[]regMask{gpsp, gpsp}, 0, []regMask{flags}}
gp1flags := regInfo{[]regMask{gpsp}, 0, []regMask{flags}}
flagsgp1 := regInfo{[]regMask{flags}, 0, []regMask{gp}}
- gpload := regInfo{[]regMask{gpsp, 0}, 0, []regMask{gp}}
- gploadidx := regInfo{[]regMask{gpsp, gpsp, 0}, 0, []regMask{gp}}
- gpstore := regInfo{[]regMask{gpsp, gpsp, 0}, 0, nil}
- gpstoreidx := regInfo{[]regMask{gpsp, gpsp, gpsp, 0}, 0, nil}
+ gpload := regInfo{[]regMask{gpspsb, 0}, 0, []regMask{gp}}
+ gploadidx := regInfo{[]regMask{gpspsb, gpsp, 0}, 0, []regMask{gp}}
+ gpstore := regInfo{[]regMask{gpspsb, gpsp, 0}, 0, nil}
+ gpstoreidx := regInfo{[]regMask{gpspsb, gpsp, gpsp, 0}, 0, nil}
flagsgp := regInfo{[]regMask{flags}, 0, []regMask{gp}}
cmov := regInfo{[]regMask{flags, gp, gp}, 0, []regMask{gp}}
{name: "MOVWQSX", reg: gp11, asm: "MOVWQSX"}, // extend arg0 from int16 to int64
{name: "MOVBQSX", reg: gp11, asm: "MOVBQSX"}, // extend arg0 from int8 to int64
- {name: "MOVQconst", reg: gp01}, // auxint
- {name: "LEAQ", reg: gp21}, // arg0 + arg1 + auxint
- {name: "LEAQ2", reg: gp21}, // arg0 + 2*arg1 + auxint
- {name: "LEAQ4", reg: gp21}, // arg0 + 4*arg1 + auxint
- {name: "LEAQ8", reg: gp21}, // arg0 + 8*arg1 + auxint
- {name: "LEAQglobal", reg: gp01}, // no args. address of aux.(*gc.Sym)
+ {name: "MOVQconst", reg: gp01}, // auxint
+ {name: "LEAQ", reg: gp11sb}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAQ1", reg: gp21sb}, // arg0 + arg1 + auxint
+ {name: "LEAQ2", reg: gp21sb}, // arg0 + 2*arg1 + auxint
+ {name: "LEAQ4", reg: gp21sb}, // arg0 + 4*arg1 + auxint
+ {name: "LEAQ8", reg: gp21sb}, // arg0 + 8*arg1 + auxint
{name: "MOVBload", reg: gpload, asm: "MOVB"}, // load byte from arg0+auxint. arg1=mem
{name: "MOVBQZXload", reg: gpload}, // ditto, extend to uint64
(Store dst (Load <t> src mem) mem) && t.Size() > 8 -> (Move [t.Size()] dst src mem)
// string ops
-(Const <t> {s}) && t.IsString() -> (StringMake (OffPtr <TypeBytePtr> [2*config.ptrSize] (Global <TypeBytePtr> {config.fe.StringSym(s.(string))})) (Const <config.Uintptr> [int64(len(s.(string)))])) // TODO: ptr
+(Const <t> {s}) && t.IsString() -> (StringMake (OffPtr <TypeBytePtr> [2*config.ptrSize] (Addr <TypeBytePtr> {config.fe.StringSym(s.(string))} (SB <config.Uintptr>))) (Const <config.Uintptr> [int64(len(s.(string)))])) // TODO: ptr
(Load <t> ptr mem) && t.IsString() -> (StringMake (Load <TypeBytePtr> ptr mem) (Load <config.Uintptr> (OffPtr <TypeBytePtr> [config.ptrSize] ptr) mem))
(StringPtr (StringMake ptr _)) -> ptr
(StringLen (StringMake _ len)) -> len
{name: "Const"},
// Constant-like things
- {name: "Arg"}, // address of a function parameter/result. Memory input is an arg called ".mem". aux is a string (TODO: make it something other than a string?)
- {name: "Global"}, // the address of a global variable aux.(*gc.Sym)
- {name: "SP"}, // stack pointer
- {name: "FP"}, // frame pointer
- {name: "Func"}, // entry address of a function
+ {name: "Arg"}, // memory input to the function.
+
+ // The address of a variable. arg0 is the base pointer (SB or SP, depending
+ // on whether it is a global or stack variable). The Aux field identifies the
+ // variable. It will be either an *ExternSymbol (with arg0=SB), *ArgSymbol (arg0=SP),
+ // or *AutoSymbol (arg0=SP).
+ {name: "Addr"}, // Address of a variable. Arg0=SP or SB. Aux identifies the variable.
+
+ {name: "SP"}, // stack pointer
+ {name: "SB"}, // static base pointer (a.k.a. globals pointer)
+ {name: "Func"}, // entry address of a function
// Memory operations
{name: "Load"}, // Load from arg0. arg1=memory
// Check for unlowered opcodes, fail if we find one.
for _, b := range f.Blocks {
for _, v := range b.Values {
- if opcodeTable[v.Op].generic && v.Op != OpFP && v.Op != OpSP && v.Op != OpArg && v.Op != OpCopy && v.Op != OpPhi {
+ if opcodeTable[v.Op].generic && v.Op != OpSP && v.Op != OpSB && v.Op != OpArg && v.Op != OpCopy && v.Op != OpPhi {
f.Unimplementedf("%s not lowered", v.LongString())
}
}
OpAMD64MOVBQSX
OpAMD64MOVQconst
OpAMD64LEAQ
+ OpAMD64LEAQ1
OpAMD64LEAQ2
OpAMD64LEAQ4
OpAMD64LEAQ8
- OpAMD64LEAQglobal
OpAMD64MOVBload
OpAMD64MOVBQZXload
OpAMD64MOVBQSXload
OpCopy
OpConst
OpArg
- OpGlobal
+ OpAddr
OpSP
- OpFP
+ OpSB
OpFunc
OpLoad
OpStore
name: "ADDQ",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
name: "ADDQconst",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ASUBQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ASUBQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AIMULQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AIMULQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AANDQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AANDQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ASHLQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 2, // .CX
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
},
clobbers: 0,
outputs: []regMask{
asm: x86.ASHLQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ASHRQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 2, // .CX
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
},
clobbers: 0,
outputs: []regMask{
asm: x86.ASHRQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ASARQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 2, // .CX
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 2, // .CX
},
clobbers: 0,
outputs: []regMask{
asm: x86.ASARQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
name: "NEGQ",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ACMPQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ACMPQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ATESTQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.ATESTB,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AMOVLQSX,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AMOVWQSX,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AMOVBQSX,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
name: "LEAQ",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
clobbers: 0,
outputs: []regMask{
},
},
{
- name: "LEAQ2",
+ name: "LEAQ1",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
},
},
{
- name: "LEAQ4",
+ name: "LEAQ2",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
},
},
{
- name: "LEAQ8",
+ name: "LEAQ4",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
},
},
{
- name: "LEAQglobal",
+ name: "LEAQ8",
reg: regInfo{
- inputs: []regMask{},
+ inputs: []regMask{
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
clobbers: 0,
outputs: []regMask{
65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
asm: x86.AMOVB,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
0,
},
clobbers: 0,
name: "MOVBQZXload",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
0,
},
clobbers: 0,
name: "MOVBQSXload",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
0,
},
clobbers: 0,
asm: x86.AMOVW,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
0,
},
clobbers: 0,
asm: x86.AMOVL,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
0,
},
clobbers: 0,
asm: x86.AMOVQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
0,
},
clobbers: 0,
asm: x86.AMOVQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
0,
},
clobbers: 0,
asm: x86.AMOVB,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
0,
},
clobbers: 0,
asm: x86.AMOVW,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
0,
},
clobbers: 0,
asm: x86.AMOVL,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
0,
},
clobbers: 0,
asm: x86.AMOVQ,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
0,
},
clobbers: 0,
name: "MOVQstoreidx8",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
0,
},
clobbers: 0,
name: "CALLclosure",
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4, // .DX
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 4, // .DX
0,
},
clobbers: 0,
asm: x86.AADDL,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AADDW,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
asm: x86.AADDB,
reg: regInfo{
inputs: []regMask{
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
- 4295032831, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .FP
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ 65535, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
},
clobbers: 0,
outputs: []regMask{
generic: true,
},
{
- name: "Global",
+ name: "Addr",
reg: regInfo{
inputs: []regMask{},
clobbers: 0,
generic: true,
},
{
- name: "FP",
+ name: "SB",
reg: regInfo{
inputs: []regMask{},
clobbers: 0,
Register{29, "X13"},
Register{30, "X14"},
Register{31, "X15"},
- Register{32, "FP"}, // pseudo-register, actually a constant offset from SP
+ Register{32, "SB"}, // pseudo-register for global base pointer (aka %rip)
Register{33, "FLAGS"},
// TODO: make arch-dependent
var oldSched []*Value
- // Hack to find fp, sp Values and assign them a register. (TODO: make not so hacky)
- var fp, sp *Value
+ // Hack to find sp and sb Values and assign them a register. (TODO: make not so hacky)
+ var sp, sb *Value
for _, v := range f.Entry.Values {
switch v.Op {
case OpSP:
sp = v
home = setloc(home, v, ®isters[4]) // TODO: arch-dependent
- case OpFP:
- fp = v
+ case OpSB:
+ sb = v
home = setloc(home, v, ®isters[32]) // TODO: arch-dependent
}
}
// TODO: hack: initialize fixed registers
regs[4] = regInfo{sp, sp, false}
- regs[32] = regInfo{fp, fp, false}
+ regs[32] = regInfo{sb, sb, false}
var used regMask // has a 1 for each non-nil entry in regs
var dirty regMask // has a 1 for each dirty entry in regs
// nospill contains registers that we can't spill because
// we already set them up for use by the current instruction.
var nospill regMask
- nospill |= 0x100000010 // SP and FP can't be spilled (TODO: arch-specific)
+ nospill |= 0x100000010 // SP & SB can't be spilled (TODO: arch-specific)
// Move inputs into registers
for _, o := range order {
var c *Value
if len(w.Args) == 0 {
// Materialize w
- if w.Op == OpFP || w.Op == OpSP || w.Op == OpGlobal {
+ if w.Op == OpSB {
+ c = w
+ } else if w.Op == OpSP {
c = b.NewValue1(w.Line, OpCopy, w.Type, w)
} else {
c = b.NewValue0IA(w.Line, w.Op, w.Type, w.AuxInt, w.Aux)
}
- } else if len(w.Args) == 1 && (w.Args[0].Op == OpFP || w.Args[0].Op == OpSP || w.Args[0].Op == OpGlobal) {
- // Materialize offsets from SP/FP/Global
+ } else if len(w.Args) == 1 && (w.Args[0].Op == OpSP || w.Args[0].Op == OpSB) {
+ // Materialize offsets from SP/SB
c = b.NewValue1IA(w.Line, w.Op, w.Type, w.AuxInt, w.Aux, w.Args[0])
} else if wreg != 0 {
// Copy from another register.
return z
}
+func mergeSym(x, y interface{}) interface{} {
+ if x == nil {
+ return y
+ }
+ if y == nil {
+ return x
+ }
+ panic(fmt.Sprintf("mergeSym with two non-nil syms %s %s", x, y))
+ return nil
+}
+
func inBounds(idx, len int64) bool {
return idx >= 0 && idx < len
}
goto end858e823866524b81b4636f7dd7e8eefe
end858e823866524b81b4636f7dd7e8eefe:
;
+ case OpAddr:
+ // match: (Addr {sym} base)
+ // cond:
+ // result: (LEAQ {sym} base)
+ {
+ sym := v.Aux
+ base := v.Args[0]
+ v.Op = OpAMD64LEAQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.Aux = sym
+ v.AddArg(base)
+ return true
+ }
+ goto end53cad0c3c9daa5575680e77c14e05e72
+ end53cad0c3c9daa5575680e77c14e05e72:
+ ;
case OpAMD64CMOVQCC:
// match: (CMOVQCC (CMPQconst [c] (MOVQconst [d])) _ x)
// cond: inBounds(d, c)
goto endcc7894224d4f6b0bcabcece5d0185912
endcc7894224d4f6b0bcabcece5d0185912:
;
- case OpGlobal:
- // match: (Global {sym})
- // cond:
- // result: (LEAQglobal {sym})
- {
- sym := v.Aux
- v.Op = OpAMD64LEAQglobal
- v.AuxInt = 0
- v.Aux = nil
- v.resetArgs()
- v.Aux = sym
- return true
- }
- goto end8f47b6f351fecaeded45abbe5c2beec0
- end8f47b6f351fecaeded45abbe5c2beec0:
- ;
case OpIsInBounds:
// match: (IsInBounds idx len)
// cond:
}
goto end843d29b538c4483b432b632e5666d6e3
end843d29b538c4483b432b632e5666d6e3:
+ ;
+ // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: (sym1 == nil || sym2 == nil)
+ // result: (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem)
+ {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v.Args[0].Op != OpAMD64LEAQ {
+ goto end227426af95e74caddcf59fdcd30ca8bc
+ }
+ off2 := v.Args[0].AuxInt
+ sym2 := v.Args[0].Aux
+ base := v.Args[0].Args[0]
+ mem := v.Args[1]
+ if !(sym1 == nil || sym2 == nil) {
+ goto end227426af95e74caddcf59fdcd30ca8bc
+ }
+ v.Op = OpAMD64MOVQload
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(mem)
+ return true
+ }
+ goto end227426af95e74caddcf59fdcd30ca8bc
+ end227426af95e74caddcf59fdcd30ca8bc:
;
// match: (MOVQload [off1] (LEAQ8 [off2] ptr idx) mem)
// cond:
}
goto end2108c693a43c79aed10b9246c39c80aa
end2108c693a43c79aed10b9246c39c80aa:
+ ;
+ // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: (sym1 == nil || sym2 == nil)
+ // result: (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem)
+ {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v.Args[0].Op != OpAMD64LEAQ {
+ goto end5061f48193268a5eb1e1740bdd23c43d
+ }
+ off2 := v.Args[0].AuxInt
+ sym2 := v.Args[0].Aux
+ base := v.Args[0].Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ if !(sym1 == nil || sym2 == nil) {
+ goto end5061f48193268a5eb1e1740bdd23c43d
+ }
+ v.Op = OpAMD64MOVQstore
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v.AuxInt = addOff(off1, off2)
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(base)
+ v.AddArg(val)
+ v.AddArg(mem)
+ return true
+ }
+ goto end5061f48193268a5eb1e1740bdd23c43d
+ end5061f48193268a5eb1e1740bdd23c43d:
;
// match: (MOVQstore [off1] (LEAQ8 [off2] ptr idx) val mem)
// cond:
case OpConst:
// match: (Const <t> {s})
// cond: t.IsString()
- // result: (StringMake (OffPtr <TypeBytePtr> [2*config.ptrSize] (Global <TypeBytePtr> {config.fe.StringSym(s.(string))})) (Const <config.Uintptr> [int64(len(s.(string)))]))
+ // result: (StringMake (OffPtr <TypeBytePtr> [2*config.ptrSize] (Addr <TypeBytePtr> {config.fe.StringSym(s.(string))} (SB <config.Uintptr>))) (Const <config.Uintptr> [int64(len(s.(string)))]))
{
t := v.Type
s := v.Aux
if !(t.IsString()) {
- goto end6d6321106a054a5984b2ed0acec52a5b
+ goto end55cd8fd3b98a2459d0ee9d6cbb456b01
}
v.Op = OpStringMake
v.AuxInt = 0
v0 := v.Block.NewValue0(v.Line, OpOffPtr, TypeInvalid)
v0.Type = TypeBytePtr
v0.AuxInt = 2 * config.ptrSize
- v1 := v.Block.NewValue0(v.Line, OpGlobal, TypeInvalid)
+ v1 := v.Block.NewValue0(v.Line, OpAddr, TypeInvalid)
v1.Type = TypeBytePtr
v1.Aux = config.fe.StringSym(s.(string))
+ v2 := v.Block.NewValue0(v.Line, OpSB, TypeInvalid)
+ v2.Type = config.Uintptr
+ v1.AddArg(v2)
v0.AddArg(v1)
v.AddArg(v0)
- v2 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
- v2.Type = config.Uintptr
- v2.AuxInt = int64(len(s.(string)))
- v.AddArg(v2)
+ v3 := v.Block.NewValue0(v.Line, OpConst, TypeInvalid)
+ v3.Type = config.Uintptr
+ v3.AuxInt = int64(len(s.(string)))
+ v.AddArg(v3)
return true
}
- goto end6d6321106a054a5984b2ed0acec52a5b
- end6d6321106a054a5984b2ed0acec52a5b:
+ goto end55cd8fd3b98a2459d0ee9d6cbb456b01
+ end55cd8fd3b98a2459d0ee9d6cbb456b01:
;
case OpIsInBounds:
// match: (IsInBounds (Const [c]) (Const [d]))
fun := Fun(c, "entry",
Bloc("entry",
Valu("mem", OpArg, TypeMem, 0, ".mem"),
- Valu("FP", OpFP, TypeUInt64, 0, nil),
- Valu("argptr", OpOffPtr, ptyp, 8, nil, "FP"),
- Valu("resptr", OpOffPtr, ptyp, 16, nil, "FP"),
+ Valu("SP", OpSP, TypeUInt64, 0, nil),
+ Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
+ Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
Valu("c", OpConst, TypeUInt64, amount, nil),
Valu("shift", op, typ, 0, nil, "load", "c"),
// v will have been materialized wherever it is needed.
continue
}
- if len(v.Args) == 1 && (v.Args[0].Op == OpFP || v.Args[0].Op == OpSP || v.Args[0].Op == OpGlobal) {
+ if len(v.Args) == 1 && (v.Args[0].Op == OpSP || v.Args[0].Op == OpSB) {
continue
}
n = align(n, v.Type.Alignment())
}
}
+ // Finally, allocate space for all autos that we used
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ s, ok := v.Aux.(*AutoSymbol)
+ if !ok || s.Offset >= 0 {
+ continue
+ }
+ t := s.Typ
+ n = align(n, t.Alignment())
+ s.Offset = n
+ n += t.Size()
+ }
+ }
+
n = align(n, f.Config.ptrSize)
n += f.Config.ptrSize // space for return address. TODO: arch-dependent
f.RegAlloc = home
f.FrameSize = n
// TODO: share stack slots among noninterfering (& gc type compatible) values
-
- // adjust all uses of FP to SP now that we have the frame size.
- var fp *Value
- for _, b := range f.Blocks {
- for _, v := range b.Values {
- if v.Op == OpFP {
- if fp != nil {
- b.Fatalf("multiple FP ops: %s %s", fp, v)
- }
- fp = v
- }
- for i, a := range v.Args {
- if a.Op != OpFP {
- continue
- }
- // TODO: do this with arch-specific rewrite rules somehow?
- switch v.Op {
- case OpAMD64ADDQ:
- // (ADDQ (FP) x) -> (LEAQ [n] (SP) x)
- v.Op = OpAMD64LEAQ
- v.AuxInt = n
- case OpAMD64ADDQconst:
- // TODO(matloob): Add LEAQconst op
- v.AuxInt = addOff(v.AuxInt, n)
- case OpAMD64LEAQ, OpAMD64MOVQload, OpAMD64MOVQstore, OpAMD64MOVLload, OpAMD64MOVLstore, OpAMD64MOVWload, OpAMD64MOVWstore, OpAMD64MOVBload, OpAMD64MOVBstore, OpAMD64MOVQloadidx8:
- if v.Op == OpAMD64MOVQloadidx8 && i == 1 {
- // Note: we could do it, but it is probably an error
- f.Fatalf("can't do FP->SP adjust on index slot of load %s", v.Op)
- }
- // eg: (MOVQload [c] (FP) mem) -> (MOVQload [c+n] (SP) mem)
- v.AuxInt = addOff(v.AuxInt, n)
- default:
- f.Unimplementedf("can't do FP->SP adjust on %s", v.Op)
- // TODO: OpCopy -> ADDQ
- }
- }
- }
- }
- if fp != nil {
- fp.Op = OpSP
- home[fp.ID] = ®isters[4] // TODO: arch-dependent
- }
}
// align increases n to the next multiple of a. a must be a power of 2.
func (v *Value) Logf(msg string, args ...interface{}) { v.Block.Logf(msg, args...) }
func (v *Value) Fatalf(msg string, args ...interface{}) { v.Block.Fatalf(msg, args...) }
func (v *Value) Unimplementedf(msg string, args ...interface{}) { v.Block.Unimplementedf(msg, args...) }
+
+// ExternSymbol is an aux value that encodes a variable's
+// constant offset from the static base pointer.
+type ExternSymbol struct {
+ Typ Type // Go type
+ Sym fmt.Stringer // A *gc.Sym referring to a global variable
+ // Note: the offset for an external symbol is not
+ // calculated until link time.
+}
+
+// ArgSymbol is an aux value that encodes an argument or result
+// variable's constant offset from FP (FP = SP + framesize).
+type ArgSymbol struct {
+ Typ Type // Go type
+ Offset int64 // Distance above frame pointer
+ Sym fmt.Stringer // A *gc.Sym referring to the argument/result variable.
+}
+
+// AutoSymbol is an aux value that encodes a local variable's
+// constant offset from SP.
+type AutoSymbol struct {
+ Typ Type // Go type
+ Offset int64 // Distance above stack pointer. Set by stackalloc in SSA.
+ Sym fmt.Stringer // A *gc.Sym referring to a local (auto) variable.
+}
+
+func (s *ExternSymbol) String() string {
+ return s.Sym.String()
+}
+
+func (s *ArgSymbol) String() string {
+ return s.Sym.String()
+}
+
+func (s *AutoSymbol) String() string {
+ return s.Sym.String()
+}