Step one in eliminating Prog-related globals.
Passes toolstash-check -all.
Updates #15756
Change-Id: I3b777fb5a7716f2d9da3067fbd94c28ca894a465
Reviewed-on: https://go-review.googlesource.com/38450
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(op obj.As, dest, src int16) *obj.Prog {
- p := gc.Prog(op)
+func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = dest
r2 := v.Args[1].Reg()
switch {
case r == r1:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case r == r2:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_REG
} else {
asm = x86.ALEAL
}
- p := gc.Prog(asm)
+ p := s.Prog(asm)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r1
p.From.Scale = 1
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- opregreg(v.Op.Asm(), r, v.Args[1].Reg())
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
// Arg[0] (the dividend) is in AX.
r := v.Args[1].Reg()
// Zero extend dividend.
- c := gc.Prog(x86.AXORL)
+ c := s.Prog(x86.AXORL)
c.From.Type = obj.TYPE_REG
c.From.Reg = x86.REG_DX
c.To.Type = obj.TYPE_REG
c.To.Reg = x86.REG_DX
// Issue divide.
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
var c *obj.Prog
switch v.Op {
case ssa.OpAMD64DIVQ:
- c = gc.Prog(x86.ACMPQ)
+ c = s.Prog(x86.ACMPQ)
case ssa.OpAMD64DIVL:
- c = gc.Prog(x86.ACMPL)
+ c = s.Prog(x86.ACMPL)
case ssa.OpAMD64DIVW:
- c = gc.Prog(x86.ACMPW)
+ c = s.Prog(x86.ACMPW)
}
c.From.Type = obj.TYPE_REG
c.From.Reg = r
c.To.Type = obj.TYPE_CONST
c.To.Offset = -1
- j1 := gc.Prog(x86.AJEQ)
+ j1 := s.Prog(x86.AJEQ)
j1.To.Type = obj.TYPE_BRANCH
// Sign extend dividend.
switch v.Op {
case ssa.OpAMD64DIVQ:
- gc.Prog(x86.ACQO)
+ s.Prog(x86.ACQO)
case ssa.OpAMD64DIVL:
- gc.Prog(x86.ACDQ)
+ s.Prog(x86.ACDQ)
case ssa.OpAMD64DIVW:
- gc.Prog(x86.ACWD)
+ s.Prog(x86.ACWD)
}
// Issue divide.
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
// Skip over -1 fixup code.
- j2 := gc.Prog(obj.AJMP)
+ j2 := s.Prog(obj.AJMP)
j2.To.Type = obj.TYPE_BRANCH
// Issue -1 fixup code.
// n / -1 = -n
- n1 := gc.Prog(x86.ANEGQ)
+ n1 := s.Prog(x86.ANEGQ)
n1.To.Type = obj.TYPE_REG
n1.To.Reg = x86.REG_AX
// n % -1 == 0
- n2 := gc.Prog(x86.AXORL)
+ n2 := s.Prog(x86.AXORL)
n2.From.Type = obj.TYPE_REG
n2.From.Reg = x86.REG_DX
n2.To.Type = obj.TYPE_REG
// Arg[0] is already in AX as it's the only register we allow
// and DX is the only output we care about (the high bits)
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
// IMULB puts the high portion in AH instead of DL,
// so move it to DL for consistency
if v.Type.Size() == 1 {
- m := gc.Prog(x86.AMOVB)
+ m := s.Prog(x86.AMOVB)
m.From.Type = obj.TYPE_REG
m.From.Reg = x86.REG_AH
m.To.Type = obj.TYPE_REG
case ssa.OpAMD64MULQU2:
// Arg[0] is already in AX as it's the only register we allow
// results hi in DX, lo in AX
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
case ssa.OpAMD64DIVQU2:
// Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow
// results q in AX, r in DX
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(x86.AADDQ)
+ p := s.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Reg = v.Args[1].Reg()
- p = gc.Prog(x86.ARCRQ)
+ p = s.Prog(x86.ARCRQ)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
} else {
asm = x86.AINCL
}
- p := gc.Prog(asm)
+ p := s.Prog(asm)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
} else {
asm = x86.ADECL
}
- p := gc.Prog(asm)
+ p := s.Prog(asm)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
} else {
asm = x86.ALEAL
}
- p := gc.Prog(asm)
+ p := s.Prog(asm)
p.From.Type = obj.TYPE_MEM
p.From.Reg = a
p.From.Offset = v.AuxInt
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
r := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
- p := gc.Prog(x86.ALEAQ)
+ p := s.Prog(x86.ALEAQ)
switch v.Op {
case ssa.OpAMD64LEAQ1:
p.From.Scale = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB,
ssa.OpAMD64BTL, ssa.OpAMD64BTQ:
- opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
// Go assembler has swapped operands for UCOMISx relative to CMP,
// must account for that right here.
- opregreg(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
x := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
}
case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
x := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
p.To.Reg = x
case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload, ssa.OpAMD64MOVOload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVWloadidx2:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
if i == x86.REG_SP {
r, i = i, r
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Scale = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVWstoreidx2:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
if i == x86.REG_SP {
r, i = i, r
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Index = i
gc.AddAux(&p.To, v)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
- opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS:
r := v.Reg()
// Break false dependency on destination register.
- opregreg(x86.AXORPS, r, r)
- opregreg(v.Op.Asm(), r, v.Args[0].Reg())
+ opregreg(s, x86.AXORPS, r, r)
+ opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
case ssa.OpAMD64ADDQmem, ssa.OpAMD64ADDLmem, ssa.OpAMD64SUBQmem, ssa.OpAMD64SUBLmem,
ssa.OpAMD64ANDQmem, ssa.OpAMD64ANDLmem, ssa.OpAMD64ORQmem, ssa.OpAMD64ORLmem,
ssa.OpAMD64XORQmem, ssa.OpAMD64XORLmem, ssa.OpAMD64ADDSDmem, ssa.OpAMD64ADDSSmem,
ssa.OpAMD64SUBSDmem, ssa.OpAMD64SUBSSmem, ssa.OpAMD64MULSDmem, ssa.OpAMD64MULSSmem:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
adj := duffAdj(v.AuxInt)
var p *obj.Prog
if adj != 0 {
- p = gc.Prog(x86.AADDQ)
+ p = s.Prog(x86.AADDQ)
p.From.Type = obj.TYPE_CONST
p.From.Offset = adj
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_DI
}
- p = gc.Prog(obj.ADUFFZERO)
+ p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffzero
p.To.Offset = off
v.Fatalf("MOVOconst can only do constant=0")
}
r := v.Reg()
- opregreg(x86.AXORPS, r, r)
+ opregreg(s, x86.AXORPS, r, r)
case ssa.OpAMD64DUFFCOPY:
- p := gc.Prog(obj.ADUFFCOPY)
+ p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
x := v.Args[0].Reg()
y := v.Reg()
if x != y {
- opregreg(moveByType(v.Type), y, x)
+ opregreg(s, moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(loadByType(v.Type))
+ p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(storeByType(v.Type))
+ p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) {
// MOVQ (TLS), r
- p := gc.Prog(x86.AMOVQ)
+ p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
} else {
// MOVQ TLS, r
// MOVQ (r)(TLS*1), r
- p := gc.Prog(x86.AMOVQ)
+ p := s.Prog(x86.AMOVQ)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- q := gc.Prog(x86.AMOVQ)
+ q := s.Prog(x86.AMOVQ)
q.From.Type = obj.TYPE_MEM
q.From.Reg = r
q.From.Index = x86.REG_TLS
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpAMD64BSFQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRQ, ssa.OpAMD64BSRL:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64SQRTSD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
ssa.OpAMD64SETA, ssa.OpAMD64SETAE:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64SETNEF:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- q := gc.Prog(x86.ASETPS)
+ q := s.Prog(x86.ASETPS)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
// ORL avoids partial register write and is smaller than ORQ, used by old compiler
- opregreg(x86.AORL, v.Reg(), x86.REG_AX)
+ opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
case ssa.OpAMD64SETEQF:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- q := gc.Prog(x86.ASETPC)
+ q := s.Prog(x86.ASETPC)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
// ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
- opregreg(x86.AANDL, v.Reg(), x86.REG_AX)
+ opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
case ssa.OpAMD64InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
case ssa.OpAMD64REPSTOSQ:
- gc.Prog(x86.AREP)
- gc.Prog(x86.ASTOSQ)
+ s.Prog(x86.AREP)
+ s.Prog(x86.ASTOSQ)
case ssa.OpAMD64REPMOVSQ:
- gc.Prog(x86.AREP)
- gc.Prog(x86.AMOVSQ)
+ s.Prog(x86.AREP)
+ s.Prog(x86.AMOVSQ)
case ssa.OpAMD64LoweredNilCheck:
// Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
// but it doesn't have false dependency on AX.
// Or maybe allocate an output register and use MOVL (reg),reg2 ?
// That trades clobbering flags for clobbering a register.
- p := gc.Prog(x86.ATESTB)
+ p := s.Prog(x86.ATESTB)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_MEM
gc.Warnl(v.Pos, "generated nil check")
}
case ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
- gc.Prog(x86.ALOCK)
- p := gc.Prog(v.Op.Asm())
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString())
}
- gc.Prog(x86.ALOCK)
- p := gc.Prog(v.Op.Asm())
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
- p = gc.Prog(x86.ASETEQ)
+ p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64ANDBlock, ssa.OpAMD64ORBlock:
- gc.Prog(x86.ALOCK)
- p := gc.Prog(v.Op.Asm())
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
// defer returns in rax:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(x86.ATESTL)
+ p := s.Prog(x86.ATESTL)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
- p = gc.Prog(x86.AJNE)
+ p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- q := gc.Prog(obj.AJMP)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
return shift(int64(reg&0xf) | typ | (s&31)<<7)
}
-// genshift generates a Prog for r = r0 op (r1 shifted by s)
-func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
- p := gc.Prog(as)
+// genshift generates a Prog for r = r0 op (r1 shifted by n)
+func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+ p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
- p.From.Offset = int64(makeshift(r1, typ, s))
+ p.From.Offset = int64(makeshift(r1, typ, n))
p.Reg = r0
if r != 0 {
p.To.Type = obj.TYPE_REG
}
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
-func genregshift(as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
- p := gc.Prog(as)
+func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+ p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeregshift(r1, typ, r2))
p.Reg = r0
panic("bad float size")
}
}
- p := gc.Prog(as)
+ p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(loadByType(v.Type))
+ p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(storeByType(v.Type))
+ p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
r := v.Reg0()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.Scond = arm.C_SBIT
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
- p := gc.Prog(arm.ASRA)
+ p := s.Prog(arm.ASRA)
p.Scond = arm.C_SCOND_HS
p.From.Type = obj.TYPE_CONST
p.From.Offset = 31
p.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- p = gc.Prog(arm.ASRA)
+ p = s.Prog(arm.ASRA)
p.Scond = arm.C_SCOND_LO
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
ssa.OpARMSLLconst,
ssa.OpARMSRLconst,
ssa.OpARMSRAconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
case ssa.OpARMADDSconst,
ssa.OpARMSUBSconst,
ssa.OpARMRSBSconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.Scond = arm.C_SBIT
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpARMSRRconst:
- genshift(arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
+ genshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
case ssa.OpARMADDshiftLL,
ssa.OpARMADCshiftLL,
ssa.OpARMSUBshiftLL,
ssa.OpARMORshiftLL,
ssa.OpARMXORshiftLL,
ssa.OpARMBICshiftLL:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMADDSshiftLL,
ssa.OpARMSUBSshiftLL,
ssa.OpARMRSBSshiftLL:
- p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
+ p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRL,
ssa.OpARMADCshiftRL,
ssa.OpARMORshiftRL,
ssa.OpARMXORshiftRL,
ssa.OpARMBICshiftRL:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMADDSshiftRL,
ssa.OpARMSUBSshiftRL,
ssa.OpARMRSBSshiftRL:
- p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
+ p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRA,
ssa.OpARMADCshiftRA,
ssa.OpARMORshiftRA,
ssa.OpARMXORshiftRA,
ssa.OpARMBICshiftRA:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMADDSshiftRA,
ssa.OpARMSUBSshiftRA,
ssa.OpARMRSBSshiftRA:
- p := genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
+ p := genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
p.Scond = arm.C_SBIT
case ssa.OpARMXORshiftRR:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
case ssa.OpARMMVNshiftLL:
- genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMMVNshiftRL:
- genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMMVNshiftRA:
- genshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMMVNshiftLLreg:
- genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
case ssa.OpARMMVNshiftRLreg:
- genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
case ssa.OpARMMVNshiftRAreg:
- genregshift(v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
case ssa.OpARMADDshiftLLreg,
ssa.OpARMADCshiftLLreg,
ssa.OpARMSUBshiftLLreg,
ssa.OpARMORshiftLLreg,
ssa.OpARMXORshiftLLreg,
ssa.OpARMBICshiftLLreg:
- genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
case ssa.OpARMADDSshiftLLreg,
ssa.OpARMSUBSshiftLLreg,
ssa.OpARMRSBSshiftLLreg:
- p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRLreg,
ssa.OpARMADCshiftRLreg,
ssa.OpARMORshiftRLreg,
ssa.OpARMXORshiftRLreg,
ssa.OpARMBICshiftRLreg:
- genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
case ssa.OpARMADDSshiftRLreg,
ssa.OpARMSUBSshiftRLreg,
ssa.OpARMRSBSshiftRLreg:
- p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
p.Scond = arm.C_SBIT
case ssa.OpARMADDshiftRAreg,
ssa.OpARMADCshiftRAreg,
ssa.OpARMORshiftRAreg,
ssa.OpARMXORshiftRAreg,
ssa.OpARMBICshiftRAreg:
- genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
case ssa.OpARMADDSshiftRAreg,
ssa.OpARMSUBSshiftRAreg,
ssa.OpARMRSBSshiftRAreg:
- p := genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
p.Scond = arm.C_SBIT
case ssa.OpARMHMUL,
ssa.OpARMHMULU:
// 32-bit high multiplication
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
case ssa.OpARMMULLU:
// 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
p.To.Reg = v.Reg0() // high 32-bit
p.To.Offset = int64(v.Reg1()) // low 32-bit
case ssa.OpARMMULA:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
p.To.Reg = v.Reg() // result
p.To.Offset = int64(v.Args[2].Reg()) // addend
case ssa.OpARMMOVWconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMMOVFconst,
ssa.OpARMMOVDconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
ssa.OpARMTEQ,
ssa.OpARMCMPF,
ssa.OpARMCMPD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
// Special layout in ARM assembly
// Comparing to x86, the operands of ARM's CMP are reversed.
ssa.OpARMTSTconst,
ssa.OpARMTEQconst:
// Special layout in ARM assembly
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
case ssa.OpARMCMPF0,
ssa.OpARMCMPD0:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
case ssa.OpARMCMPshiftLL:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
case ssa.OpARMCMPshiftRL:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
case ssa.OpARMCMPshiftRA:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
case ssa.OpARMCMPshiftLLreg:
- genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
case ssa.OpARMCMPshiftRLreg:
- genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
case ssa.OpARMCMPshiftRAreg:
- genregshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
case ssa.OpARMMOVWaddr:
- p := gc.Prog(arm.AMOVW)
+ p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
ssa.OpARMMOVWload,
ssa.OpARMMOVFload,
ssa.OpARMMOVDload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssa.OpARMMOVWstore,
ssa.OpARMMOVFstore,
ssa.OpARMMOVDstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
// this is just shift 0 bits
fallthrough
case ssa.OpARMMOVWloadshiftLL:
- p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWloadshiftRL:
- p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWloadshiftRA:
- p := genshift(v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
p.From.Reg = v.Args[0].Reg()
case ssa.OpARMMOVWstoreidx:
// this is just shift 0 bits
fallthrough
case ssa.OpARMMOVWstoreshiftLL:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT
p.To.Reg = v.Args[0].Reg()
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
case ssa.OpARMMOVWstoreshiftRL:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT
p.To.Reg = v.Args[0].Reg()
p.To.Offset = int64(makeshift(v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
case ssa.OpARMMOVWstoreshiftRA:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_SHIFT
if v.Reg() == v.Args[0].Reg() {
return
}
- p := gc.Prog(arm.AMOVW)
+ p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
ssa.OpARMMOVDW,
ssa.OpARMMOVFD,
ssa.OpARMMOVDF:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
ssa.OpARMMOVWUD,
ssa.OpARMMOVFWU,
ssa.OpARMMOVDWU:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.Scond = arm.C_UBIT
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMCMOVWHSconst:
- p := gc.Prog(arm.AMOVW)
+ p := s.Prog(arm.AMOVW)
p.Scond = arm.C_SCOND_HS
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMCMOVWLSconst:
- p := gc.Prog(arm.AMOVW)
+ p := s.Prog(arm.AMOVW)
p.Scond = arm.C_SCOND_LS
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter, ssa.OpARMCALLudiv:
s.Call(v)
case ssa.OpARMDUFFZERO:
- p := gc.Prog(obj.ADUFFZERO)
+ p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY:
- p := gc.Prog(obj.ADUFFCOPY)
+ p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck:
// Issue a load which will fault if arg is nil.
- p := gc.Prog(arm.AMOVB)
+ p := s.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
sz = 1
mov = arm.AMOVB
}
- p := gc.Prog(mov)
+ p := s.Prog(mov)
p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REG_R1
p.To.Offset = sz
- p2 := gc.Prog(arm.ACMP)
+ p2 := s.Prog(arm.ACMP)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = arm.REG_R1
- p3 := gc.Prog(arm.ABLE)
+ p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
case ssa.OpARMLoweredMove:
sz = 1
mov = arm.AMOVB
}
- p := gc.Prog(mov)
+ p := s.Prog(mov)
p.Scond = arm.C_PBIT
p.From.Type = obj.TYPE_MEM
p.From.Reg = arm.REG_R1
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
- p2 := gc.Prog(mov)
+ p2 := s.Prog(mov)
p2.Scond = arm.C_PBIT
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm.REG_R2
p2.To.Offset = sz
- p3 := gc.Prog(arm.ACMP)
+ p3 := s.Prog(arm.ACMP)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.Reg = arm.REG_R1
- p4 := gc.Prog(arm.ABLE)
+ p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
case ssa.OpARMEqual,
ssa.OpARMGreaterEqualU:
// generate boolean values
// use conditional move
- p := gc.Prog(arm.AMOVW)
+ p := s.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- p = gc.Prog(arm.AMOVW)
+ p = s.Prog(arm.AMOVW)
p.Scond = condBits[v.Op]
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
// defer returns in R0:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(arm.ACMP)
+ p := s.Prog(arm.ACMP)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.Reg = arm.REG_R0
- p = gc.Prog(arm.ABNE)
+ p = s.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(obj.ARET)
+ p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- q := gc.Prog(obj.AJMP)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
return int64(reg&31)<<16 | typ | (s&63)<<10
}
-// genshift generates a Prog for r = r0 op (r1 shifted by s)
-func genshift(as obj.As, r0, r1, r int16, typ int64, s int64) *obj.Prog {
- p := gc.Prog(as)
+// genshift generates a Prog for r = r0 op (r1 shifted by n)
+func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+ p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
- p.From.Offset = makeshift(r1, typ, s)
+ p.From.Offset = makeshift(r1, typ, n)
p.Reg = r0
if r != 0 {
p.To.Type = obj.TYPE_REG
panic("bad float size")
}
}
- p := gc.Prog(as)
+ p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(loadByType(v.Type))
+ p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(storeByType(v.Type))
+ p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
ssa.OpARM64SRAconst,
ssa.OpARM64RORconst,
ssa.OpARM64RORWconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
ssa.OpARM64ORshiftLL,
ssa.OpARM64XORshiftLL,
ssa.OpARM64BICshiftLL:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
case ssa.OpARM64ADDshiftRL,
ssa.OpARM64SUBshiftRL,
ssa.OpARM64ANDshiftRL,
ssa.OpARM64ORshiftRL,
ssa.OpARM64XORshiftRL,
ssa.OpARM64BICshiftRL:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64ADDshiftRA,
ssa.OpARM64SUBshiftRA,
ssa.OpARM64ANDshiftRA,
ssa.OpARM64ORshiftRA,
ssa.OpARM64XORshiftRA,
ssa.OpARM64BICshiftRA:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64MOVDconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64FMOVSconst,
ssa.OpARM64FMOVDconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
ssa.OpARM64CMNW,
ssa.OpARM64FCMPS,
ssa.OpARM64FCMPD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
ssa.OpARM64CMPWconst,
ssa.OpARM64CMNconst,
ssa.OpARM64CMNWconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
case ssa.OpARM64CMPshiftLL:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
case ssa.OpARM64CMPshiftRL:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
case ssa.OpARM64CMPshiftRA:
- genshift(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
+ genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
case ssa.OpARM64MOVDaddr:
- p := gc.Prog(arm64.AMOVD)
+ p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
ssa.OpARM64MOVDload,
ssa.OpARM64FMOVSload,
ssa.OpARM64FMOVDload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Reg = v.Reg()
case ssa.OpARM64LDAR,
ssa.OpARM64LDARW:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssa.OpARM64FMOVDstore,
ssa.OpARM64STLR,
ssa.OpARM64STLRW:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero,
ssa.OpARM64MOVDstorezero:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
- p := gc.Prog(ld)
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
- p1 := gc.Prog(st)
+ p1 := s.Prog(st)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = r0
p1.RegTo2 = arm64.REGTMP
- p2 := gc.Prog(arm64.ACBNZ)
+ p2 := s.Prog(arm64.ACBNZ)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
out := v.Reg0()
- p := gc.Prog(ld)
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
- p1 := gc.Prog(arm64.AADD)
+ p1 := s.Prog(arm64.AADD)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = out
- p2 := gc.Prog(st)
+ p2 := s.Prog(st)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = out
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = arm64.REGTMP
- p3 := gc.Prog(arm64.ACBNZ)
+ p3 := s.Prog(arm64.ACBNZ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
r1 := v.Args[1].Reg()
r2 := v.Args[2].Reg()
out := v.Reg0()
- p := gc.Prog(ld)
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
- p1 := gc.Prog(cmp)
+ p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.Reg = arm64.REGTMP
- p2 := gc.Prog(arm64.ABNE)
+ p2 := s.Prog(arm64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
- p3 := gc.Prog(st)
+ p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = r2
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0
p3.RegTo2 = arm64.REGTMP
- p4 := gc.Prog(arm64.ACBNZ)
+ p4 := s.Prog(arm64.ACBNZ)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
- p5 := gc.Prog(arm64.ACSET)
+ p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG
// CBNZ Rtmp, -3(PC)
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
- p := gc.Prog(arm64.ALDAXRB)
+ p := s.Prog(arm64.ALDAXRB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
- p1 := gc.Prog(v.Op.Asm())
+ p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = arm64.REGTMP
- p2 := gc.Prog(arm64.ASTLXRB)
+ p2 := s.Prog(arm64.ASTLXRB)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = arm64.REGTMP
- p3 := gc.Prog(arm64.ACBNZ)
+ p3 := s.Prog(arm64.ACBNZ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
if v.Reg() == v.Args[0].Reg() {
return
}
- p := gc.Prog(arm64.AMOVD)
+ p := s.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
ssa.OpARM64RBITW,
ssa.OpARM64CLZ,
ssa.OpARM64CLZW:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
if v.Op == ssa.OpARM64CSELULT {
r1 = v.Args[1].Reg()
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = arm64.COND_LO
p.Reg = v.Args[0].Reg()
p.To.Reg = v.Reg()
case ssa.OpARM64DUFFZERO:
// runtime.duffzero expects start address - 8 in R16
- p := gc.Prog(arm64.ASUB)
+ p := s.Prog(arm64.ASUB)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REG_R16
- p = gc.Prog(obj.ADUFFZERO)
+ p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
// CMP Rarg1, R16
// BLE -2(PC)
// arg1 is the address of the last element to zero
- p := gc.Prog(arm64.AMOVD)
+ p := s.Prog(arm64.AMOVD)
p.Scond = arm64.C_XPOST
p.From.Type = obj.TYPE_REG
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REG_R16
p.To.Offset = 8
- p2 := gc.Prog(arm64.ACMP)
+ p2 := s.Prog(arm64.ACMP)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = arm64.REG_R16
- p3 := gc.Prog(arm64.ABLE)
+ p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
case ssa.OpARM64DUFFCOPY:
- p := gc.Prog(obj.ADUFFCOPY)
+ p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
// CMP Rarg2, R16
// BLE -3(PC)
// arg2 is the address of the last element of src
- p := gc.Prog(arm64.AMOVD)
+ p := s.Prog(arm64.AMOVD)
p.Scond = arm64.C_XPOST
p.From.Type = obj.TYPE_MEM
p.From.Reg = arm64.REG_R16
p.From.Offset = 8
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
- p2 := gc.Prog(arm64.AMOVD)
+ p2 := s.Prog(arm64.AMOVD)
p2.Scond = arm64.C_XPOST
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = arm64.REG_R17
p2.To.Offset = 8
- p3 := gc.Prog(arm64.ACMP)
+ p3 := s.Prog(arm64.ACMP)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.Reg = arm64.REG_R16
- p4 := gc.Prog(arm64.ABLE)
+ p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v)
case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
- p := gc.Prog(arm64.AMOVB)
+ p := s.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssa.OpARM64GreaterThanU,
ssa.OpARM64GreaterEqualU:
// generate boolean values using CSET
- p := gc.Prog(arm64.ACSET)
+ p := s.Prog(arm64.ACSET)
p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p.From.Reg = condBits[v.Op]
p.To.Type = obj.TYPE_REG
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
// defer returns in R0:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(arm64.ACMP)
+ p := s.Prog(arm64.ACMP)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.Reg = arm64.REG_R0
- p = gc.Prog(arm64.ABNE)
+ p = s.Prog(arm64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(obj.ARET)
+ p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- q := gc.Prog(obj.AJMP)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
stackMapIndex map[*ssa.Value]int
}
+// Prog appends a new Prog.
+func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
+ return Prog(as)
+}
+
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
return pc
Index int
}
-func oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction, branches []Branch) []Branch {
- p := Prog(jumps.Jump)
+func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump, likely ssa.BranchPrediction) {
+ p := s.Prog(jumps.Jump)
p.To.Type = obj.TYPE_BRANCH
to := jumps.Index
- branches = append(branches, Branch{p, b.Succs[to].Block()})
+ s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
if to == 1 {
likely = -likely
}
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
}
- return branches
}
func SSAGenFPJump(s *SSAGenState, b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
likely := b.Likely
switch next {
case b.Succs[0].Block():
- s.Branches = oneFPJump(b, &jumps[0][0], likely, s.Branches)
- s.Branches = oneFPJump(b, &jumps[0][1], likely, s.Branches)
+ s.oneFPJump(b, &jumps[0][0], likely)
+ s.oneFPJump(b, &jumps[0][1], likely)
case b.Succs[1].Block():
- s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
- s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
+ s.oneFPJump(b, &jumps[1][0], likely)
+ s.oneFPJump(b, &jumps[1][1], likely)
default:
- s.Branches = oneFPJump(b, &jumps[1][0], likely, s.Branches)
- s.Branches = oneFPJump(b, &jumps[1][1], likely, s.Branches)
- q := Prog(obj.AJMP)
+ s.oneFPJump(b, &jumps[1][0], likely)
+ s.oneFPJump(b, &jumps[1][1], likely)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
}
if !ok {
Fatalf("missing stack map index for %v", v.LongString())
}
- p := Prog(obj.APCDATA)
+ p := s.Prog(obj.APCDATA)
Addrconst(&p.From, obj.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
thearch.Ginsnop()
}
- p = Prog(obj.ACALL)
+ p = s.Prog(obj.ACALL)
if sym, ok := v.Aux.(*obj.LSym); ok {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
}
}
- p := gc.Prog(as)
+ p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
// cannot move between special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVW)
+ p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
return
}
r := v.Reg()
- p := gc.Prog(loadByType(v.Type, r))
+ p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
// cannot directly load, load to TMP and move
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVW)
+ p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
r := v.Args[0].Reg()
if isHILO(r) {
// cannot directly store, move to TMP and store
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
r = mips.REGTMP
}
- p := gc.Prog(storeByType(v.Type, r))
+ p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
ssa.OpMIPSDIVF,
ssa.OpMIPSDIVD,
ssa.OpMIPSMUL:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
p.To.Reg = v.Reg()
case ssa.OpMIPSSGT,
ssa.OpMIPSSGTU:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
p.To.Reg = v.Reg()
case ssa.OpMIPSSGTzero,
ssa.OpMIPSSGTUzero:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO
ssa.OpMIPSSRAconst,
ssa.OpMIPSSGTconst,
ssa.OpMIPSSGTUconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
ssa.OpMIPSDIV,
ssa.OpMIPSDIVU:
// result in hi,lo
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
case ssa.OpMIPSMOVWconst:
r := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
if isFPreg(r) || isHILO(r) {
// cannot move into FP or special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVW)
+ p = s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
}
case ssa.OpMIPSMOVFconst,
ssa.OpMIPSMOVDconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.Reg = v.Args[1].Reg()
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = mips.REGZERO
ssa.OpMIPSCMPGED,
ssa.OpMIPSCMPGTF,
ssa.OpMIPSCMPGTD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
case ssa.OpMIPSMOVWaddr:
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_ADDR
var wantreg string
// MOVW $sym+off(base), R
ssa.OpMIPSMOVWload,
ssa.OpMIPSMOVFload,
ssa.OpMIPSMOVDload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssa.OpMIPSMOVWstore,
ssa.OpMIPSMOVFstore,
ssa.OpMIPSMOVDstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
case ssa.OpMIPSMOVBstorezero,
ssa.OpMIPSMOVHstorezero,
ssa.OpMIPSMOVWstorezero:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
if v.Reg() == v.Args[0].Reg() {
return
}
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
ssa.OpMIPSNEGD,
ssa.OpMIPSSQRTD,
ssa.OpMIPSCLZ:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPSNEG:
// SUB from REGZERO
- p := gc.Prog(mips.ASUBU)
+ p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO
sz = 1
mov = mips.AMOVB
}
- p := gc.Prog(mips.ASUBU)
+ p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
- p2 := gc.Prog(mov)
+ p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGZERO
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = mips.REG_R1
p2.To.Offset = sz
- p3 := gc.Prog(mips.AADDU)
+ p3 := s.Prog(mips.AADDU)
p3.From.Type = obj.TYPE_CONST
p3.From.Offset = sz
p3.To.Type = obj.TYPE_REG
p3.To.Reg = mips.REG_R1
- p4 := gc.Prog(mips.ABNE)
+ p4 := s.Prog(mips.ABNE)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
sz = 1
mov = mips.AMOVB
}
- p := gc.Prog(mips.ASUBU)
+ p := s.Prog(mips.ASUBU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
- p2 := gc.Prog(mov)
+ p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_MEM
p2.From.Reg = mips.REG_R1
p2.From.Offset = sz
p2.To.Type = obj.TYPE_REG
p2.To.Reg = mips.REGTMP
- p3 := gc.Prog(mov)
+ p3 := s.Prog(mov)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = mips.REG_R2
- p4 := gc.Prog(mips.AADDU)
+ p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG
p4.To.Reg = mips.REG_R1
- p5 := gc.Prog(mips.AADDU)
+ p5 := s.Prog(mips.AADDU)
p5.From.Type = obj.TYPE_CONST
p5.From.Offset = sz
p5.To.Type = obj.TYPE_REG
p5.To.Reg = mips.REG_R2
- p6 := gc.Prog(mips.ABNE)
+ p6 := s.Prog(mips.ABNE)
p6.From.Type = obj.TYPE_REG
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
s.Call(v)
case ssa.OpMIPSLoweredAtomicLoad:
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicStore:
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicStorezero:
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicExchange:
// SYNC
// MOVW Rarg1, Rtmp
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
- p1 := gc.Prog(mips.ALL)
+ p1 := s.Prog(mips.ALL)
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = v.Reg0()
- p2 := gc.Prog(mips.ASC)
+ p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
- p3 := gc.Prog(mips.ABEQ)
+ p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicAdd:
// SYNC
// LL (Rarg0), Rout
// BEQ Rtmp, -3(PC)
// SYNC
// ADDU Rarg1, Rout
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p := gc.Prog(mips.ALL)
+ p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
- p1 := gc.Prog(mips.AADDU)
+ p1 := s.Prog(mips.AADDU)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = v.Args[1].Reg()
p1.Reg = v.Reg0()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
- p2 := gc.Prog(mips.ASC)
+ p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
- p3 := gc.Prog(mips.ABEQ)
+ p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p4 := gc.Prog(mips.AADDU)
+ p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg()
p4.Reg = v.Reg0()
// BEQ Rtmp, -3(PC)
// SYNC
// ADDU $auxInt, Rout
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p := gc.Prog(mips.ALL)
+ p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
- p1 := gc.Prog(mips.AADDU)
+ p1 := s.Prog(mips.AADDU)
p1.From.Type = obj.TYPE_CONST
p1.From.Offset = v.AuxInt
p1.Reg = v.Reg0()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
- p2 := gc.Prog(mips.ASC)
+ p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
- p3 := gc.Prog(mips.ABEQ)
+ p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p4 := gc.Prog(mips.AADDU)
+ p4 := s.Prog(mips.AADDU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = v.AuxInt
p4.Reg = v.Reg0()
// SC Rtmp, (Rarg0)
// BEQ Rtmp, -3(PC)
// SYNC
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p := gc.Prog(mips.ALL)
+ p := s.Prog(mips.ALL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
- p1 := gc.Prog(v.Op.Asm())
+ p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = v.Args[1].Reg()
p1.Reg = mips.REGTMP
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
- p2 := gc.Prog(mips.ASC)
+ p2 := s.Prog(mips.ASC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
- p3 := gc.Prog(mips.ABEQ)
+ p3 := s.Prog(mips.ABEQ)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicCas:
// MOVW $0, Rout
// SC Rout, (Rarg0)
// BEQ Rout, -4(PC)
// SYNC
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p1 := gc.Prog(mips.ALL)
+ p1 := s.Prog(mips.ALL)
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = v.Args[0].Reg()
p1.To.Type = obj.TYPE_REG
p1.To.Reg = mips.REGTMP
- p2 := gc.Prog(mips.ABNE)
+ p2 := s.Prog(mips.ABNE)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = v.Args[1].Reg()
p2.Reg = mips.REGTMP
p2.To.Type = obj.TYPE_BRANCH
- p3 := gc.Prog(mips.AMOVW)
+ p3 := s.Prog(mips.AMOVW)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[2].Reg()
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg0()
- p4 := gc.Prog(mips.ASC)
+ p4 := s.Prog(mips.ASC)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Reg0()
p4.To.Type = obj.TYPE_MEM
p4.To.Reg = v.Args[0].Reg()
- p5 := gc.Prog(mips.ABEQ)
+ p5 := s.Prog(mips.ABEQ)
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1)
- gc.Prog(mips.ASYNC)
+ s.Prog(mips.ASYNC)
- p6 := gc.Prog(obj.ANOP)
+ p6 := s.Prog(obj.ANOP)
gc.Patch(p2, p6)
case ssa.OpMIPSLoweredNilCheck:
// Issue a load which will fault if arg is nil.
- p := gc.Prog(mips.AMOVB)
+ p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
if v.Op == ssa.OpMIPSFPFlagFalse {
cmov = mips.ACMOVT
}
- p := gc.Prog(mips.AMOVW)
+ p := s.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- p1 := gc.Prog(cmov)
+ p1 := s.Prog(cmov)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = mips.REGZERO
p1.To.Type = obj.TYPE_REG
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
// defer returns in R1:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(mips.ABNE)
+ p := s.Prog(mips.ABNE)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(obj.ARET)
+ p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- q := gc.Prog(obj.AJMP)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
if isFPreg(x) && isFPreg(y) {
as = mips.AMOVD
}
- p := gc.Prog(as)
+ p := s.Prog(as)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
// cannot move between special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVV)
+ p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
return
}
r := v.Reg()
- p := gc.Prog(loadByType(v.Type, r))
+ p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
// cannot directly load, load to TMP and move
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVV)
+ p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
r := v.Args[0].Reg()
if isHILO(r) {
// cannot directly store, move to TMP and store
- p := gc.Prog(mips.AMOVV)
+ p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
r = mips.REGTMP
}
- p := gc.Prog(storeByType(v.Type, r))
+ p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
ssa.OpMIPS64MULD,
ssa.OpMIPS64DIVF,
ssa.OpMIPS64DIVD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
p.To.Reg = v.Reg()
case ssa.OpMIPS64SGT,
ssa.OpMIPS64SGTU:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
ssa.OpMIPS64SRAVconst,
ssa.OpMIPS64SGTconst,
ssa.OpMIPS64SGTUconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.Reg = v.Args[0].Reg()
ssa.OpMIPS64DIVV,
ssa.OpMIPS64DIVVU:
// result in hi,lo
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[0].Reg()
case ssa.OpMIPS64MOVVconst:
r := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
if isFPreg(r) || isHILO(r) {
// cannot move into FP or special registers, use TMP as intermediate
p.To.Reg = mips.REGTMP
- p = gc.Prog(mips.AMOVV)
+ p = s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGTMP
p.To.Type = obj.TYPE_REG
}
case ssa.OpMIPS64MOVFconst,
ssa.OpMIPS64MOVDconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
ssa.OpMIPS64CMPGED,
ssa.OpMIPS64CMPGTF,
ssa.OpMIPS64CMPGTD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = v.Args[1].Reg()
case ssa.OpMIPS64MOVVaddr:
- p := gc.Prog(mips.AMOVV)
+ p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_ADDR
var wantreg string
// MOVV $sym+off(base), R
ssa.OpMIPS64MOVVload,
ssa.OpMIPS64MOVFload,
ssa.OpMIPS64MOVDload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssa.OpMIPS64MOVVstore,
ssa.OpMIPS64MOVFstore,
ssa.OpMIPS64MOVDstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
ssa.OpMIPS64MOVHstorezero,
ssa.OpMIPS64MOVWstorezero,
ssa.OpMIPS64MOVVstorezero:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
if v.Reg() == v.Args[0].Reg() {
return
}
- p := gc.Prog(mips.AMOVV)
+ p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
ssa.OpMIPS64MOVDF,
ssa.OpMIPS64NEGF,
ssa.OpMIPS64NEGD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPS64NEGV:
// SUB from REGZERO
- p := gc.Prog(mips.ASUBVU)
+ p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.Reg = mips.REGZERO
p.To.Reg = v.Reg()
case ssa.OpMIPS64DUFFZERO:
// runtime.duffzero expects start address - 8 in R1
- p := gc.Prog(mips.ASUBVU)
+ p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
p.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
- p = gc.Prog(obj.ADUFFZERO)
+ p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
sz = 1
mov = mips.AMOVB
}
- p := gc.Prog(mips.ASUBVU)
+ p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
- p2 := gc.Prog(mov)
+ p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = mips.REGZERO
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = mips.REG_R1
p2.To.Offset = sz
- p3 := gc.Prog(mips.AADDVU)
+ p3 := s.Prog(mips.AADDVU)
p3.From.Type = obj.TYPE_CONST
p3.From.Offset = sz
p3.To.Type = obj.TYPE_REG
p3.To.Reg = mips.REG_R1
- p4 := gc.Prog(mips.ABNE)
+ p4 := s.Prog(mips.ABNE)
p4.From.Type = obj.TYPE_REG
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
sz = 1
mov = mips.AMOVB
}
- p := gc.Prog(mips.ASUBVU)
+ p := s.Prog(mips.ASUBVU)
p.From.Type = obj.TYPE_CONST
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REG_R1
- p2 := gc.Prog(mov)
+ p2 := s.Prog(mov)
p2.From.Type = obj.TYPE_MEM
p2.From.Reg = mips.REG_R1
p2.From.Offset = sz
p2.To.Type = obj.TYPE_REG
p2.To.Reg = mips.REGTMP
- p3 := gc.Prog(mov)
+ p3 := s.Prog(mov)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = mips.REG_R2
- p4 := gc.Prog(mips.AADDVU)
+ p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST
p4.From.Offset = sz
p4.To.Type = obj.TYPE_REG
p4.To.Reg = mips.REG_R1
- p5 := gc.Prog(mips.AADDVU)
+ p5 := s.Prog(mips.AADDVU)
p5.From.Type = obj.TYPE_CONST
p5.From.Offset = sz
p5.To.Type = obj.TYPE_REG
p5.To.Reg = mips.REG_R2
- p6 := gc.Prog(mips.ABNE)
+ p6 := s.Prog(mips.ABNE)
p6.From.Type = obj.TYPE_REG
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
s.Call(v)
case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
- p := gc.Prog(mips.AMOVB)
+ p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
if v.Op == ssa.OpMIPS64FPFlagFalse {
branch = mips.ABFPT
}
- p := gc.Prog(mips.AMOVV)
+ p := s.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- p2 := gc.Prog(branch)
+ p2 := s.Prog(branch)
p2.To.Type = obj.TYPE_BRANCH
- p3 := gc.Prog(mips.AMOVV)
+ p3 := s.Prog(mips.AMOVV)
p3.From.Type = obj.TYPE_CONST
p3.From.Offset = 1
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg()
- p4 := gc.Prog(obj.ANOP) // not a machine instruction, for branch to land
+ p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
gc.Patch(p2, p4)
case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
// defer returns in R1:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(mips.ABNE)
+ p := s.Prog(mips.ABNE)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(obj.ARET)
+ p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- q := gc.Prog(obj.AJMP)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
panic("bad store type")
}
-func ssaGenISEL(v *ssa.Value, cr int64, r1, r2 int16) {
+func ssaGenISEL(s *gc.SSAGenState, v *ssa.Value, cr int64, r1, r2 int16) {
r := v.Reg()
- p := gc.Prog(ppc64.AISEL)
+ p := s.Prog(ppc64.AISEL)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.Reg = r1
if t.IsFloat() {
op = ppc64.AFMOVD
}
- p := gc.Prog(op)
+ p := s.Prog(op)
p.From.Type = rt
p.From.Reg = x
p.To.Type = rt
x := v.Args[0].Reg()
y := v.Reg()
- p := gc.Prog(ppc64.AMFVSRD)
+ p := s.Prog(ppc64.AMFVSRD)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
x := v.Args[0].Reg()
y := v.Reg()
- p := gc.Prog(ppc64.AMTVSRD)
+ p := s.Prog(ppc64.AMTVSRD)
p.From.Type = obj.TYPE_REG
p.From.Reg = x
p.To.Type = obj.TYPE_REG
// ISYNC
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
- psync := gc.Prog(ppc64.ASYNC)
+ psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
- p := gc.Prog(ppc64.ALBAR)
+ p := s.Prog(ppc64.ALBAR)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
- p1 := gc.Prog(v.Op.Asm())
+ p1 := s.Prog(v.Op.Asm())
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_REG
p1.To.Reg = ppc64.REGTMP
- p2 := gc.Prog(ppc64.ASTBCCC)
+ p2 := s.Prog(ppc64.ASTBCCC)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = r0
p2.RegTo2 = ppc64.REGTMP
- p3 := gc.Prog(ppc64.ABNE)
+ p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
- pisync := gc.Prog(ppc64.AISYNC)
+ pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
case ssa.OpPPC64LoweredAtomicAdd32,
r1 := v.Args[1].Reg()
out := v.Reg0()
// SYNC
- psync := gc.Prog(ppc64.ASYNC)
+ psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR
- p := gc.Prog(ld)
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
// ADD reg1,out
- p1 := gc.Prog(ppc64.AADD)
+ p1 := s.Prog(ppc64.AADD)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Reg = out
p1.To.Type = obj.TYPE_REG
// STDCCC or STWCCC
- p3 := gc.Prog(st)
+ p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = out
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0
// BNE retry
- p4 := gc.Prog(ppc64.ABNE)
+ p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
// ISYNC
- pisync := gc.Prog(ppc64.AISYNC)
+ pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
// Ensure a 32 bit result
if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
- p5 := gc.Prog(ppc64.AMOVWZ)
+ p5 := s.Prog(ppc64.AMOVWZ)
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
p5.From.Type = obj.TYPE_REG
r1 := v.Args[1].Reg()
out := v.Reg0()
// SYNC
- psync := gc.Prog(ppc64.ASYNC)
+ psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR
- p := gc.Prog(ld)
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
// STDCCC or STWCCC
- p1 := gc.Prog(st)
+ p1 := s.Prog(st)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = r0
// BNE retry
- p2 := gc.Prog(ppc64.ABNE)
+ p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p)
// ISYNC
- pisync := gc.Prog(ppc64.AISYNC)
+ pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
case ssa.OpPPC64LoweredAtomicLoad32,
arg0 := v.Args[0].Reg()
out := v.Reg0()
// SYNC
- psync := gc.Prog(ppc64.ASYNC)
+ psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// Load
- p := gc.Prog(ld)
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = arg0
p.To.Type = obj.TYPE_REG
p.To.Reg = out
// CMP
- p1 := gc.Prog(cmp)
+ p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = out
p1.To.Type = obj.TYPE_REG
p1.To.Reg = out
// BNE
- p2 := gc.Prog(ppc64.ABNE)
+ p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
// ISYNC
- pisync := gc.Prog(ppc64.AISYNC)
+ pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
gc.Patch(p2, pisync)
arg0 := v.Args[0].Reg()
arg1 := v.Args[1].Reg()
// SYNC
- psync := gc.Prog(ppc64.ASYNC)
+ psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// Store
- p := gc.Prog(st)
+ p := s.Prog(st)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arg0
p.From.Type = obj.TYPE_REG
r2 := v.Args[2].Reg()
out := v.Reg0()
// SYNC
- psync := gc.Prog(ppc64.ASYNC)
+ psync := s.Prog(ppc64.ASYNC)
psync.To.Type = obj.TYPE_NONE
// LDAR or LWAR
- p := gc.Prog(ld)
+ p := s.Prog(ld)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
// CMP reg1,reg2
- p1 := gc.Prog(cmp)
+ p1 := s.Prog(cmp)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = r1
p1.To.Reg = ppc64.REGTMP
p1.To.Type = obj.TYPE_REG
// BNE cas_fail
- p2 := gc.Prog(ppc64.ABNE)
+ p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
// STDCCC or STWCCC
- p3 := gc.Prog(st)
+ p3 := s.Prog(st)
p3.From.Type = obj.TYPE_REG
p3.From.Reg = r2
p3.To.Type = obj.TYPE_MEM
p3.To.Reg = r0
// BNE retry
- p4 := gc.Prog(ppc64.ABNE)
+ p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
// ISYNC
- pisync := gc.Prog(ppc64.AISYNC)
+ pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
// return true
- p5 := gc.Prog(ppc64.AMOVD)
+ p5 := s.Prog(ppc64.AMOVD)
p5.From.Type = obj.TYPE_CONST
p5.From.Offset = 1
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
// BR done
- p6 := gc.Prog(obj.AJMP)
+ p6 := s.Prog(obj.AJMP)
p6.To.Type = obj.TYPE_BRANCH
// return false
- p7 := gc.Prog(ppc64.AMOVD)
+ p7 := s.Prog(ppc64.AMOVD)
p7.From.Type = obj.TYPE_CONST
p7.From.Offset = 0
p7.To.Type = obj.TYPE_REG
p7.To.Reg = out
gc.Patch(p2, p7)
// done (label)
- p8 := gc.Prog(obj.ANOP)
+ p8 := s.Prog(obj.ANOP)
gc.Patch(p6, p8)
case ssa.OpPPC64LoweredGetClosurePtr:
case ssa.OpLoadReg:
loadOp := loadByType(v.Type)
- p := gc.Prog(loadOp)
+ p := s.Prog(loadOp)
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
storeOp := storeByType(v.Type)
- p := gc.Prog(storeOp)
+ p := s.Prog(storeOp)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
- p := gc.Prog(ppc64.ACMP)
+ p := s.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_CONST
p.To.Offset = -1
- pbahead := gc.Prog(ppc64.ABEQ)
+ pbahead := s.Prog(ppc64.ABEQ)
pbahead.To.Type = obj.TYPE_BRANCH
- p = gc.Prog(v.Op.Asm())
+ p = s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- pbover := gc.Prog(obj.AJMP)
+ pbover := s.Prog(obj.AJMP)
pbover.To.Type = obj.TYPE_BRANCH
- p = gc.Prog(ppc64.ANEG)
+ p = s.Prog(ppc64.ANEG)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
gc.Patch(pbahead, p)
- p = gc.Prog(obj.ANOP)
+ p = s.Prog(obj.ANOP)
gc.Patch(pbover, p)
case ssa.OpPPC64DIVW:
r0 := v.Args[0].Reg()
r1 := v.Args[1].Reg()
- p := gc.Prog(ppc64.ACMPW)
+ p := s.Prog(ppc64.ACMPW)
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_CONST
p.To.Offset = -1
- pbahead := gc.Prog(ppc64.ABEQ)
+ pbahead := s.Prog(ppc64.ABEQ)
pbahead.To.Type = obj.TYPE_BRANCH
- p = gc.Prog(v.Op.Asm())
+ p = s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r0
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- pbover := gc.Prog(obj.AJMP)
+ pbover := s.Prog(obj.AJMP)
pbover.To.Type = obj.TYPE_BRANCH
- p = gc.Prog(ppc64.ANEG)
+ p = s.Prog(ppc64.ANEG)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
gc.Patch(pbahead, p)
- p = gc.Prog(obj.ANOP)
+ p = s.Prog(obj.ANOP)
gc.Patch(pbover, p)
case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.Reg = r1
r2 := v.Args[1].Reg()
r3 := v.Args[2].Reg()
// r = r1*r2 ± r3
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r3
case ssa.OpPPC64MaskIfNotCarry:
r := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_REG
case ssa.OpPPC64ADDconstForCarry:
r1 := v.Args[0].Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.Reg = r1
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP:
r := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Type = obj.TYPE_REG
case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
if v.Aux != nil {
p.To.Reg = v.Reg()
case ssa.OpPPC64ANDCCconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.Reg = v.Args[0].Reg()
if v.Aux != nil {
p.To.Reg = ppc64.REGTMP // discard result
case ssa.OpPPC64MOVDaddr:
- p := gc.Prog(ppc64.AMOVD)
+ p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
}
case ssa.OpPPC64MOVDconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
// Shift in register to required size
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Reg = v.Reg()
p.To.Type = obj.TYPE_REG
case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Reg = v.Reg()
case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Reg = v.Reg()
case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_MEM
gc.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
// isel rt,0,rtmp,!cond // rt is target in ppc asm
if v.Block.Func.Config.OldArch {
- p := gc.Prog(ppc64.AMOVD)
+ p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- pb := gc.Prog(condOps[v.Op])
+ pb := s.Prog(condOps[v.Op])
pb.To.Type = obj.TYPE_BRANCH
- p = gc.Prog(ppc64.AMOVD)
+ p = s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- p = gc.Prog(obj.ANOP)
+ p = s.Prog(obj.ANOP)
gc.Patch(pb, p)
break
}
// Modern PPC uses ISEL
- p := gc.Prog(ppc64.AMOVD)
+ p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = iselRegs[1]
iop := iselOps[v.Op]
- ssaGenISEL(v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
+ ssaGenISEL(s, v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
case ssa.OpPPC64FLessEqual, // These include a second branch for EQ -- dealing with NaN prevents REL= to !REL conversion
ssa.OpPPC64FGreaterEqual:
if v.Block.Func.Config.OldArch {
- p := gc.Prog(ppc64.AMOVW)
+ p := s.Prog(ppc64.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- pb0 := gc.Prog(condOps[v.Op])
+ pb0 := s.Prog(condOps[v.Op])
pb0.To.Type = obj.TYPE_BRANCH
- pb1 := gc.Prog(ppc64.ABEQ)
+ pb1 := s.Prog(ppc64.ABEQ)
pb1.To.Type = obj.TYPE_BRANCH
- p = gc.Prog(ppc64.AMOVW)
+ p = s.Prog(ppc64.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- p = gc.Prog(obj.ANOP)
+ p = s.Prog(obj.ANOP)
gc.Patch(pb0, p)
gc.Patch(pb1, p)
break
}
// Modern PPC uses ISEL
- p := gc.Prog(ppc64.AMOVD)
+ p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = iselRegs[1]
iop := iselOps[v.Op]
- ssaGenISEL(v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
- ssaGenISEL(v, ppc64.C_COND_EQ, iselRegs[1], v.Reg())
+ ssaGenISEL(s, v, iop.cond, iselRegs[iop.valueIfCond], iselRegs[1-iop.valueIfCond])
+ ssaGenISEL(s, v, ppc64.C_COND_EQ, iselRegs[1], v.Reg())
case ssa.OpPPC64LoweredZero:
// than 1 iteration.
if ctr > 1 {
// Set up CTR loop counter
- p := gc.Prog(ppc64.AMOVD)
+ p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_CONST
p.From.Offset = ctr
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
- p = gc.Prog(ppc64.AMOVD)
+ p = s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGTMP
p.To.Type = obj.TYPE_REG
var top *obj.Prog
for offset := int64(0); offset < 32; offset += 8 {
// This is the top of loop
- p := gc.Prog(ppc64.AMOVD)
+ p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_MEM
// Increment address for the
// 4 doublewords just zeroed.
- p = gc.Prog(ppc64.AADD)
+ p = s.Prog(ppc64.AADD)
p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST
p.From.Offset = 32
// Branch back to top of loop
// based on CTR
// BC with BO_BCTR generates bdnz
- p = gc.Prog(ppc64.ABC)
+ p = s.Prog(ppc64.ABC)
p.From.Type = obj.TYPE_CONST
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
case rem >= 2:
op, size = ppc64.AMOVH, 2
}
- p := gc.Prog(op)
+ p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_MEM
movu = ppc64.AMOVBU
}
- p := gc.Prog(ppc64.AADD)
+ p := s.Prog(ppc64.AADD)
p.Reg = v.Args[0].Reg()
p.From.Type = obj.TYPE_CONST
p.From.Offset = -sz
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
- p = gc.Prog(ppc64.AADD)
+ p = s.Prog(ppc64.AADD)
p.Reg = v.Args[1].Reg()
p.From.Type = obj.TYPE_CONST
p.From.Offset = -sz
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
- p = gc.Prog(movu)
+ p = s.Prog(movu)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
p.From.Offset = sz
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
- p2 := gc.Prog(movu)
+ p2 := s.Prog(movu)
p2.From.Type = obj.TYPE_REG
p2.From.Reg = ppc64.REGTMP
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = v.Args[0].Reg()
p2.To.Offset = sz
- p3 := gc.Prog(ppc64.ACMPU)
+ p3 := s.Prog(ppc64.ACMPU)
p3.From.Reg = v.Args[1].Reg()
p3.From.Type = obj.TYPE_REG
p3.To.Reg = v.Args[2].Reg()
p3.To.Type = obj.TYPE_REG
- p4 := gc.Prog(ppc64.ABLT)
+ p4 := s.Prog(ppc64.ABLT)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
s.Call(v)
case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
- p := gc.Prog(ppc64.AMOVD)
+ p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
// change the register allocation to put the value in
// R12 already, but I don't know how to do that.
// TODO: We have the technology now to implement TODO above.
- q := gc.Prog(ppc64.AMOVD)
+ q := s.Prog(ppc64.AMOVD)
q.From = p.From
q.To.Type = obj.TYPE_REG
q.To.Reg = ppc64.REG_R12
// called via pointer might have been implemented in
// a separate module and so overwritten the TOC
// pointer in R2; reload it.
- q := gc.Prog(ppc64.AMOVD)
+ q := s.Prog(ppc64.AMOVD)
q.From.Type = obj.TYPE_MEM
q.From.Offset = 24
q.From.Reg = ppc64.REGSP
case ssa.OpPPC64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
- p := gc.Prog(ppc64.AMOVBZ)
+ p := s.Prog(ppc64.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
// defer returns in R3:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(ppc64.ACMP)
+ p := s.Prog(ppc64.ACMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R3
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_R0
- p = gc.Prog(ppc64.ABNE)
+ p = s.Prog(ppc64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if jmp.invasmun {
// TODO: The second branch is probably predict-not-taken since it is for FP unordered
- q := gc.Prog(ppc64.ABVS)
+ q := s.Prog(ppc64.ABVS)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
if jmp.asmeq {
- q := gc.Prog(ppc64.ABEQ)
+ q := s.Prog(ppc64.ABEQ)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
}
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
if jmp.asmeq {
- q := gc.Prog(ppc64.ABEQ)
+ q := s.Prog(ppc64.ABEQ)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
}
- q := gc.Prog(obj.AJMP)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(op obj.As, dest, src int16) *obj.Prog {
- p := gc.Prog(op)
+func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = dest
// dest := src(From) op off
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregregimm(op obj.As, dest, src int16, off int64) *obj.Prog {
- p := gc.Prog(op)
+func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog {
+ p := s.Prog(op)
p.From.Type = obj.TYPE_CONST
p.From.Offset = off
p.Reg = src
if r2 == s390x.REG_R0 {
v.Fatalf("cannot use R0 as shift value %s", v.LongString())
}
- p := opregreg(v.Op.Asm(), r, r2)
+ p := opregreg(s, v.Op.Asm(), r, r2)
if r != r1 {
p.Reg = r1
}
r := v.Reg()
r1 := v.Args[0].Reg()
r2 := v.Args[1].Reg()
- p := opregreg(v.Op.Asm(), r, r2)
+ p := opregreg(s, v.Op.Asm(), r, r2)
if r != r1 {
p.Reg = r1
}
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- opregreg(v.Op.Asm(), r, v.Args[1].Reg())
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.OpS390XFMADD, ssa.OpS390XFMADDS,
ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS:
r := v.Reg()
}
r1 := v.Args[1].Reg()
r2 := v.Args[2].Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.Reg = r2
v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW {
var c *obj.Prog
- c = gc.Prog(s390x.ACMP)
- j = gc.Prog(s390x.ABEQ)
+ c = s.Prog(s390x.ACMP)
+ j = s.Prog(s390x.ABEQ)
c.From.Type = obj.TYPE_REG
c.From.Reg = divisor
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = divisor
p.Reg = 0
// signed division, rest of the check for -1 case
if j != nil {
- j2 := gc.Prog(s390x.ABR)
+ j2 := s.Prog(s390x.ABR)
j2.To.Type = obj.TYPE_BRANCH
var n *obj.Prog
if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW {
// n * -1 = -n
- n = gc.Prog(s390x.ANEG)
+ n = s.Prog(s390x.ANEG)
n.To.Type = obj.TYPE_REG
n.To.Reg = dividend
} else {
// n % -1 == 0
- n = gc.Prog(s390x.AXOR)
+ n = s.Prog(s390x.AXOR)
n.From.Type = obj.TYPE_REG
n.From.Reg = dividend
n.To.Type = obj.TYPE_REG
j2.To.Val = s.Pc()
}
case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
- opregregimm(v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
+ opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst,
ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst,
ssa.OpS390XANDconst, ssa.OpS390XANDWconst,
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
ssa.OpS390XRLLGconst, ssa.OpS390XRLLconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
r := v.Reg()
p.To.Reg = r
case ssa.OpS390XSUBEcarrymask, ssa.OpS390XSUBEWcarrymask:
r := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
case ssa.OpS390XMOVDaddridx:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
- p := gc.Prog(s390x.AMOVD)
+ p := s.Prog(s390x.AMOVD)
p.From.Scale = 1
if i == s390x.REGSP {
r, i = i, r
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVDaddr:
- p := gc.Prog(s390x.AMOVD)
+ p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
- opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpS390XFCMPS, ssa.OpS390XFCMP:
- opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.OpS390XMOVDconst:
x := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = x
case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst:
x := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload,
ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
if i == s390x.REGSP {
r, i = i, r
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Scale = 1
case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore,
ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
if i == s390x.REGSP {
r, i = i, r
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Index = i
gc.AddAux(&p.To, v)
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
ssa.OpS390XFNEG, ssa.OpS390XFNEGS:
- opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.OpS390XCLEAR:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
x := v.Args[0].Reg()
y := v.Reg()
if x != y {
- opregreg(moveByType(v.Type), y, x)
+ opregreg(s, moveByType(v.Type), y, x)
}
case ssa.OpS390XMOVDnop:
if v.Reg() != v.Args[0].Reg() {
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(loadByType(v.Type))
+ p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(storeByType(v.Type))
+ p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
// input is already rounded
case ssa.OpS390XLoweredGetG:
r := v.Reg()
- p := gc.Prog(s390x.AMOVD)
+ p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = s390x.REGG
p.To.Type = obj.TYPE_REG
s.Call(v)
case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW,
ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XFSQRT:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
case ssa.OpS390XLoweredNilCheck:
// Issue a load which will fault if the input is nil.
- p := gc.Prog(s390x.AMOVBZ)
+ p := s.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
}
case ssa.OpS390XMVC:
vo := v.AuxValAndOff()
- p := gc.Prog(s390x.AMVC)
+ p := s.Prog(s390x.AMVC)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
p.From.Offset = vo.Off()
v.Fatalf("invalid store multiple %s", v.LongString())
}
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.Reg = v.Args[len(v.Args)-2].Reg()
// BNE mvc
// MVC $rem, 0(R2), 0(R1) // if rem > 0
// arg2 is the last address to move in the loop + 256
- mvc := gc.Prog(s390x.AMVC)
+ mvc := s.Prog(s390x.AMVC)
mvc.From.Type = obj.TYPE_MEM
mvc.From.Reg = v.Args[1].Reg()
mvc.To.Type = obj.TYPE_MEM
mvc.From3.Offset = 256
for i := 0; i < 2; i++ {
- movd := gc.Prog(s390x.AMOVD)
+ movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_ADDR
movd.From.Reg = v.Args[i].Reg()
movd.From.Offset = 256
movd.To.Reg = v.Args[i].Reg()
}
- cmpu := gc.Prog(s390x.ACMPU)
+ cmpu := s.Prog(s390x.ACMPU)
cmpu.From.Reg = v.Args[1].Reg()
cmpu.From.Type = obj.TYPE_REG
cmpu.To.Reg = v.Args[2].Reg()
cmpu.To.Type = obj.TYPE_REG
- bne := gc.Prog(s390x.ABLT)
+ bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, mvc)
if v.AuxInt > 0 {
- mvc := gc.Prog(s390x.AMVC)
+ mvc := s.Prog(s390x.AMVC)
mvc.From.Type = obj.TYPE_MEM
mvc.From.Reg = v.Args[1].Reg()
mvc.To.Type = obj.TYPE_MEM
// BNE clear
// CLEAR $rem, 0(R1) // if rem > 0
// arg1 is the last address to zero in the loop + 256
- clear := gc.Prog(s390x.ACLEAR)
+ clear := s.Prog(s390x.ACLEAR)
clear.From.Type = obj.TYPE_CONST
clear.From.Offset = 256
clear.To.Type = obj.TYPE_MEM
clear.To.Reg = v.Args[0].Reg()
- movd := gc.Prog(s390x.AMOVD)
+ movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_ADDR
movd.From.Reg = v.Args[0].Reg()
movd.From.Offset = 256
movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Args[0].Reg()
- cmpu := gc.Prog(s390x.ACMPU)
+ cmpu := s.Prog(s390x.ACMPU)
cmpu.From.Reg = v.Args[0].Reg()
cmpu.From.Type = obj.TYPE_REG
cmpu.To.Reg = v.Args[1].Reg()
cmpu.To.Type = obj.TYPE_REG
- bne := gc.Prog(s390x.ABLT)
+ bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, clear)
if v.AuxInt > 0 {
- clear := gc.Prog(s390x.ACLEAR)
+ clear := s.Prog(s390x.ACLEAR)
clear.From.Type = obj.TYPE_CONST
clear.From.Offset = v.AuxInt
clear.To.Type = obj.TYPE_MEM
clear.To.Reg = v.Args[0].Reg()
}
case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.OpS390XLAA, ssa.OpS390XLAAG:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.Reg = v.Reg0()
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
// NOP (so the BNE has somewhere to land)
// CS{,G} arg1, arg2, arg0
- cs := gc.Prog(v.Op.Asm())
+ cs := s.Prog(v.Op.Asm())
cs.From.Type = obj.TYPE_REG
cs.From.Reg = v.Args[1].Reg() // old
cs.Reg = v.Args[2].Reg() // new
gc.AddAux(&cs.To, v)
// MOVD $0, ret
- movd := gc.Prog(s390x.AMOVD)
+ movd := s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_CONST
movd.From.Offset = 0
movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Reg0()
// BNE 2(PC)
- bne := gc.Prog(s390x.ABNE)
+ bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH
// MOVD $1, ret
- movd = gc.Prog(s390x.AMOVD)
+ movd = s.Prog(s390x.AMOVD)
movd.From.Type = obj.TYPE_CONST
movd.From.Offset = 1
movd.To.Type = obj.TYPE_REG
movd.To.Reg = v.Reg0()
// NOP (so the BNE has somewhere to land)
- nop := gc.Prog(obj.ANOP)
+ nop := s.Prog(obj.ANOP)
gc.Patch(bne, nop)
case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
// Loop until the CS{,G} succeeds.
// BNE cs
// MOV{WZ,D} arg0, ret
- load := gc.Prog(loadByType(v.Type.FieldType(0)))
+ load := s.Prog(loadByType(v.Type.FieldType(0)))
load.From.Type = obj.TYPE_MEM
load.From.Reg = v.Args[0].Reg()
load.To.Type = obj.TYPE_REG
gc.AddAux(&load.From, v)
// CS{,G} ret, arg1, arg0
- cs := gc.Prog(v.Op.Asm())
+ cs := s.Prog(v.Op.Asm())
cs.From.Type = obj.TYPE_REG
cs.From.Reg = v.Reg0() // old
cs.Reg = v.Args[1].Reg() // new
gc.AddAux(&cs.To, v)
// BNE cs
- bne := gc.Prog(s390x.ABNE)
+ bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, cs)
default:
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(s390x.ABR)
+ p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
// defer returns in R3:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(s390x.ACMPW)
+ p := s.Prog(s390x.ACMPW)
p.From.Type = obj.TYPE_REG
p.From.Reg = s390x.REG_R3
p.To.Type = obj.TYPE_CONST
p.To.Offset = 0
- p = gc.Prog(s390x.ABNE)
+ p = s.Prog(s390x.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(s390x.ABR)
+ p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(s390x.ABR)
+ p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- q := gc.Prog(s390x.ABR)
+ q := s.Prog(s390x.ABR)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}
switch v.Op {
case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
- p := gc.Prog(loadPush(v.Type))
+ p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
popAndSave(s, v)
case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
- p := gc.Prog(loadPush(v.Type))
+ p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
popAndSave(s, v)
case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, ssa.Op386MOVSSloadidx4, ssa.Op386MOVSDloadidx8:
- p := gc.Prog(loadPush(v.Type))
+ p := s.Prog(loadPush(v.Type))
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
case ssa.Op386MOVSDstore:
op = x86.AFMOVDP
}
- p := gc.Prog(op)
+ p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_MEM
case ssa.Op386MOVSDstoreidx1, ssa.Op386MOVSDstoreidx8:
op = x86.AFMOVDP
}
- p := gc.Prog(op)
+ p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_MEM
// Set precision if needed. 64 bits is the default.
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
- p := gc.Prog(x86.AFSTCW)
+ p := s.Prog(x86.AFSTCW)
s.AddrScratch(&p.To)
- p = gc.Prog(x86.AFLDCW)
+ p = s.Prog(x86.AFLDCW)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = gc.Sysfunc("controlWord32")
case ssa.Op386DIVSS, ssa.Op386DIVSD:
op = x86.AFDIVDP
}
- p := gc.Prog(op)
+ p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
// Restore precision if needed.
switch v.Op {
case ssa.Op386ADDSS, ssa.Op386SUBSS, ssa.Op386MULSS, ssa.Op386DIVSS:
- p := gc.Prog(x86.AFLDCW)
+ p := s.Prog(x86.AFLDCW)
s.AddrScratch(&p.From)
}
push(s, v.Args[0])
// Compare.
- p := gc.Prog(x86.AFUCOMP)
+ p := s.Prog(x86.AFUCOMP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
p.To.Reg = s.SSEto387[v.Args[1].Reg()] + 1
// Save AX.
- p = gc.Prog(x86.AMOVL)
+ p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
s.AddrScratch(&p.To)
// Move status word into AX.
- p = gc.Prog(x86.AFSTSW)
+ p = s.Prog(x86.AFSTSW)
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
// Then move the flags we need to the integer flags.
- gc.Prog(x86.ASAHF)
+ s.Prog(x86.ASAHF)
// Restore AX.
- p = gc.Prog(x86.AMOVL)
+ p = s.Prog(x86.AMOVL)
s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
case ssa.Op386SQRTSD:
push(s, v.Args[0])
- gc.Prog(x86.AFSQRT)
+ s.Prog(x86.AFSQRT)
popAndSave(s, v)
case ssa.Op386FCHS:
push(s, v.Args[0])
- gc.Prog(x86.AFCHS)
+ s.Prog(x86.AFCHS)
popAndSave(s, v)
case ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD:
- p := gc.Prog(x86.AMOVL)
+ p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
s.AddrScratch(&p.To)
- p = gc.Prog(x86.AFMOVL)
+ p = s.Prog(x86.AFMOVL)
s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0
push(s, v.Args[0])
// Save control word.
- p := gc.Prog(x86.AFSTCW)
+ p := s.Prog(x86.AFSTCW)
s.AddrScratch(&p.To)
p.To.Offset += 4
// Load control word which truncates (rounds towards zero).
- p = gc.Prog(x86.AFLDCW)
+ p = s.Prog(x86.AFLDCW)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = gc.Sysfunc("controlWord64trunc")
// Now do the conversion.
- p = gc.Prog(x86.AFMOVLP)
+ p = s.Prog(x86.AFMOVLP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
s.AddrScratch(&p.To)
- p = gc.Prog(x86.AMOVL)
+ p = s.Prog(x86.AMOVL)
s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
// Restore control word.
- p = gc.Prog(x86.AFLDCW)
+ p = s.Prog(x86.AFLDCW)
s.AddrScratch(&p.From)
p.From.Offset += 4
case ssa.Op386CVTSD2SS:
// Round to nearest float32.
push(s, v.Args[0])
- p := gc.Prog(x86.AFMOVFP)
+ p := s.Prog(x86.AFMOVFP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
s.AddrScratch(&p.To)
- p = gc.Prog(x86.AFMOVF)
+ p = s.Prog(x86.AFMOVF)
s.AddrScratch(&p.From)
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0
return
}
// Load+push the value we need.
- p := gc.Prog(loadPush(v.Type))
+ p := s.Prog(loadPush(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_F0
case 8:
op = x86.AFMOVDP
}
- p := gc.Prog(op)
+ p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
gc.AddrAuto(&p.To, v)
// push pushes v onto the floating-point stack. v must be in a register.
func push(s *gc.SSAGenState, v *ssa.Value) {
- p := gc.Prog(x86.AFMOVD)
+ p := s.Prog(x86.AFMOVD)
p.From.Type = obj.TYPE_REG
p.From.Reg = s.SSEto387[v.Reg()]
p.To.Type = obj.TYPE_REG
r := v.Reg()
if _, ok := s.SSEto387[r]; ok {
// Pop value, write to correct register.
- p := gc.Prog(x86.AFMOVDP)
+ p := s.Prog(x86.AFMOVDP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
// flush387 removes all entries from the 387 floating-point stack.
func flush387(s *gc.SSAGenState) {
for k := range s.SSEto387 {
- p := gc.Prog(x86.AFMOVDP)
+ p := s.Prog(x86.AFMOVDP)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_F0
p.To.Type = obj.TYPE_REG
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(op obj.As, dest, src int16) *obj.Prog {
- p := gc.Prog(op)
+func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = dest
r2 := v.Args[1].Reg()
switch {
case r == r1:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r2
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case r == r2:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r1
p.To.Type = obj.TYPE_REG
p.To.Reg = r
default:
- p := gc.Prog(x86.ALEAL)
+ p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = r1
p.From.Scale = 1
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- opregreg(v.Op.Asm(), r, v.Args[1].Reg())
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry:
// output 0 is carry/borrow, output 1 is the low 32 bits.
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
- opregreg(v.Op.Asm(), r, v.Args[1].Reg())
+ opregreg(s, v.Op.Asm(), r, v.Args[1].Reg())
case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry:
// output 0 is carry/borrow, output 1 is the low 32 bits.
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output[0] not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
var c *obj.Prog
switch v.Op {
case ssa.Op386DIVL, ssa.Op386MODL:
- c = gc.Prog(x86.ACMPL)
- j = gc.Prog(x86.AJEQ)
- gc.Prog(x86.ACDQ) //TODO: fix
+ c = s.Prog(x86.ACMPL)
+ j = s.Prog(x86.AJEQ)
+ s.Prog(x86.ACDQ) //TODO: fix
case ssa.Op386DIVW, ssa.Op386MODW:
- c = gc.Prog(x86.ACMPW)
- j = gc.Prog(x86.AJEQ)
- gc.Prog(x86.ACWD)
+ c = s.Prog(x86.ACMPW)
+ j = s.Prog(x86.AJEQ)
+ s.Prog(x86.ACWD)
}
c.From.Type = obj.TYPE_REG
c.From.Reg = x
// signed ints were sign extended above
if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU ||
v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU {
- c := gc.Prog(x86.AXORL)
+ c := s.Prog(x86.AXORL)
c.From.Type = obj.TYPE_REG
c.From.Reg = x86.REG_DX
c.To.Type = obj.TYPE_REG
c.To.Reg = x86.REG_DX
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = x
// signed division, rest of the check for -1 case
if j != nil {
- j2 := gc.Prog(obj.AJMP)
+ j2 := s.Prog(obj.AJMP)
j2.To.Type = obj.TYPE_BRANCH
var n *obj.Prog
if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW {
// n * -1 = -n
- n = gc.Prog(x86.ANEGL)
+ n = s.Prog(x86.ANEGL)
n.To.Type = obj.TYPE_REG
n.To.Reg = x86.REG_AX
} else {
// n % -1 == 0
- n = gc.Prog(x86.AXORL)
+ n = s.Prog(x86.AXORL)
n.From.Type = obj.TYPE_REG
n.From.Reg = x86.REG_DX
n.To.Type = obj.TYPE_REG
// Arg[0] is already in AX as it's the only register we allow
// and DX is the only output we care about (the high bits)
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
// IMULB puts the high portion in AH instead of DL,
// so move it to DL for consistency
if v.Type.Size() == 1 {
- m := gc.Prog(x86.AMOVB)
+ m := s.Prog(x86.AMOVB)
m.From.Type = obj.TYPE_REG
m.From.Reg = x86.REG_AH
m.To.Type = obj.TYPE_REG
case ssa.Op386MULLQU:
// AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(x86.AADDL)
+ p := s.Prog(x86.AADDL)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
p.From.Reg = v.Args[1].Reg()
- p = gc.Prog(x86.ARCRL)
+ p = s.Prog(x86.ARCRL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 1
p.To.Type = obj.TYPE_REG
a := v.Args[0].Reg()
if r == a {
if v.AuxInt == 1 {
- p := gc.Prog(x86.AINCL)
+ p := s.Prog(x86.AINCL)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
}
if v.AuxInt == -1 {
- p := gc.Prog(x86.ADECL)
+ p := s.Prog(x86.ADECL)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = r
return
}
- p := gc.Prog(x86.ALEAL)
+ p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = a
p.From.Offset = v.AuxInt
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.Op386SBBLcarrymask:
r := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = r
p.To.Type = obj.TYPE_REG
case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8:
r := v.Args[0].Reg()
i := v.Args[1].Reg()
- p := gc.Prog(x86.ALEAL)
+ p := s.Prog(x86.ALEAL)
switch v.Op {
case ssa.Op386LEAL1:
p.From.Scale = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386LEAL:
- p := gc.Prog(x86.ALEAL)
+ p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Reg = v.Reg()
case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB:
- opregreg(v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
// Go assembler has swapped operands for UCOMISx relative to CMP,
// must account for that right here.
- opregreg(v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_CONST
p.To.Offset = v.AuxInt
case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[0].Reg()
case ssa.Op386MOVLconst:
x := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
p.From.Offset = v.AuxInt
p.To.Type = obj.TYPE_REG
}
case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
x := v.Reg()
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_FCONST
p.From.Val = math.Float64frombits(uint64(v.AuxInt))
p.To.Type = obj.TYPE_REG
} else {
literal = fmt.Sprintf("$f32.%08x", math.Float32bits(float32(math.Float64frombits(uint64(v.AuxInt)))))
}
- p := gc.Prog(x86.ALEAL)
+ p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = obj.Linklookup(gc.Ctxt, literal, 0)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVSDloadidx8:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVWloadidx2:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
if i == x86.REG_SP {
r, i = i, r
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Scale = 1
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
case ssa.Op386MOVSDstoreidx8:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Index = v.Args[1].Reg()
gc.AddAux(&p.To, v)
case ssa.Op386MOVWstoreidx2:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
if i == x86.REG_SP {
r, i = i, r
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Index = i
gc.AddAux(&p.To, v)
case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
sc := v.AuxValAndOff()
p.From.Offset = sc.Val()
ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS:
- opregreg(v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
case ssa.Op386DUFFZERO:
- p := gc.Prog(obj.ADUFFZERO)
+ p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffzero
p.To.Offset = v.AuxInt
case ssa.Op386DUFFCOPY:
- p := gc.Prog(obj.ADUFFCOPY)
+ p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffcopy
p.To.Offset = v.AuxInt
x := v.Args[0].Reg()
y := v.Reg()
if x != y {
- opregreg(moveByType(v.Type), y, x)
+ opregreg(s, moveByType(v.Type), y, x)
}
case ssa.OpLoadReg:
if v.Type.IsFlags() {
v.Fatalf("load flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(loadByType(v.Type))
+ p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
v.Fatalf("store flags not implemented: %v", v.LongString())
return
}
- p := gc.Prog(storeByType(v.Type))
+ p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
// near CanUse1InsnTLS for a detailed explanation of these instructions.
if x86.CanUse1InsnTLS(gc.Ctxt) {
// MOVL (TLS), r
- p := gc.Prog(x86.AMOVL)
+ p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
} else {
// MOVL TLS, r
// MOVL (r)(TLS*1), r
- p := gc.Prog(x86.AMOVL)
+ p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_TLS
p.To.Type = obj.TYPE_REG
p.To.Reg = r
- q := gc.Prog(x86.AMOVL)
+ q := s.Prog(x86.AMOVL)
q.From.Type = obj.TYPE_MEM
q.From.Reg = r
q.From.Index = x86.REG_TLS
if r != v.Args[0].Reg() {
v.Fatalf("input[0] and output not in same register %s", v.LongString())
}
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.Op386BSFL, ssa.Op386BSFW,
ssa.Op386BSRL, ssa.Op386BSRW,
ssa.Op386SQRTSD:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
ssa.Op386SETB, ssa.Op386SETBE,
ssa.Op386SETORD, ssa.Op386SETNAN,
ssa.Op386SETA, ssa.Op386SETAE:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386SETNEF:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- q := gc.Prog(x86.ASETPS)
+ q := s.Prog(x86.ASETPS)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
- opregreg(x86.AORL, v.Reg(), x86.REG_AX)
+ opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
case ssa.Op386SETEQF:
- p := gc.Prog(v.Op.Asm())
+ p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- q := gc.Prog(x86.ASETPC)
+ q := s.Prog(x86.ASETPC)
q.To.Type = obj.TYPE_REG
q.To.Reg = x86.REG_AX
- opregreg(x86.AANDL, v.Reg(), x86.REG_AX)
+ opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
case ssa.Op386InvertFlags:
v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT:
v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
case ssa.Op386REPSTOSL:
- gc.Prog(x86.AREP)
- gc.Prog(x86.ASTOSL)
+ s.Prog(x86.AREP)
+ s.Prog(x86.ASTOSL)
case ssa.Op386REPMOVSL:
- gc.Prog(x86.AREP)
- gc.Prog(x86.AMOVSL)
+ s.Prog(x86.AREP)
+ s.Prog(x86.AMOVSL)
case ssa.Op386LoweredNilCheck:
// Issue a load which will fault if the input is nil.
// TODO: We currently use the 2-byte instruction TESTB AX, (reg).
// but it doesn't have false dependency on AX.
// Or maybe allocate an output register and use MOVL (reg),reg2 ?
// That trades clobbering flags for clobbering a register.
- p := gc.Prog(x86.ATESTB)
+ p := s.Prog(x86.ATESTB)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_MEM
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
// defer returns in rax:
// 0 if we should continue executing
// 1 if we should jump to deferreturn call
- p := gc.Prog(x86.ATESTL)
+ p := s.Prog(x86.ATESTL)
p.From.Type = obj.TYPE_REG
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_REG
p.To.Reg = x86.REG_AX
- p = gc.Prog(x86.AJNE)
+ p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
- gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
+ s.Prog(obj.AUNDEF) // tell plive.go that we never reach here
case ssa.BlockRet:
- gc.Prog(obj.ARET)
+ s.Prog(obj.ARET)
case ssa.BlockRetJmp:
- p := gc.Prog(obj.AJMP)
+ p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
var p *obj.Prog
switch next {
case b.Succs[0].Block():
- p = gc.Prog(jmp.invasm)
+ p = s.Prog(jmp.invasm)
likely *= -1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
case b.Succs[1].Block():
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
default:
- p = gc.Prog(jmp.asm)
+ p = s.Prog(jmp.asm)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
- q := gc.Prog(obj.AJMP)
+ q := s.Prog(obj.AJMP)
q.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
}