// generate body
s.startBlock(bBody)
s.stmtList(n.Nbody)
- s.stmt(n.Right)
+ if n.Right != nil {
+ s.stmt(n.Right)
+ }
b = s.endBlock()
addEdge(b, bCond)
}
}
+var binOpToSSA = [...]ssa.Op{
+ // Comparisons
+ OEQ: ssa.OpEq,
+ ONE: ssa.OpNeq,
+ OLT: ssa.OpLess,
+ OLE: ssa.OpLeq,
+ OGT: ssa.OpGreater,
+ OGE: ssa.OpGeq,
+ // Arithmetic
+ OADD: ssa.OpAdd,
+ OSUB: ssa.OpSub,
+ OLSH: ssa.OpLsh,
+ ORSH: ssa.OpRsh,
+}
+
// expr converts the expression n to ssa, adds it to s and returns the ssa result.
func (s *state) expr(n *Node) *ssa.Value {
s.pushLine(n.Lineno)
x := s.expr(n.Left)
return s.newValue1(ssa.OpConvert, n.Type, x)
- // binary ops
- case OLT:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- return s.newValue2(ssa.OpLess, ssa.TypeBool, a, b)
- case OADD:
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- return s.newValue2(ssa.OpAdd, a.Type, a, b)
- case OSUB:
- // TODO:(khr) fold code for all binary ops together somehow
- a := s.expr(n.Left)
- b := s.expr(n.Right)
- return s.newValue2(ssa.OpSub, a.Type, a, b)
- case OLSH:
+ // binary ops
+ case OLT, OEQ, ONE, OLE, OGE, OGT:
a := s.expr(n.Left)
b := s.expr(n.Right)
- return s.newValue2(ssa.OpLsh, a.Type, a, b)
- case ORSH:
+ return s.newValue2(binOpToSSA[n.Op], ssa.TypeBool, a, b)
+ case OADD, OSUB, OLSH, ORSH:
a := s.expr(n.Left)
b := s.expr(n.Right)
- return s.newValue2(ssa.OpRsh, a.Type, a, b)
+ return s.newValue2(binOpToSSA[n.Op], a.Type, a, b)
case OADDR:
return s.addr(n.Left)
y))
(Less x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETL (CMPQ <TypeFlags> x y))
+(Leq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETLE (CMPQ <TypeFlags> x y))
+(Greater x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETG (CMPQ <TypeFlags> x y))
+(Geq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETGE (CMPQ <TypeFlags> x y))
+(Eq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETEQ (CMPQ <TypeFlags> x y))
+(Neq x y) && is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type) -> (SETNE (CMPQ <TypeFlags> x y))
(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
{name: "SETEQ", reg: flagsgp}, // extract == condition from arg0
{name: "SETNE", reg: flagsgp}, // extract != condition from arg0
{name: "SETL", reg: flagsgp}, // extract signed < condition from arg0
+ {name: "SETLE", reg: flagsgp}, // extract signed <= condition from arg0
{name: "SETG", reg: flagsgp}, // extract signed > condition from arg0
{name: "SETGE", reg: flagsgp}, // extract signed >= condition from arg0
{name: "SETB", reg: flagsgp}, // extract unsigned < condition from arg0
{name: "Rsh"}, // arg0 >> arg1 (signed/unsigned depending on signedness of type)
// 2-input comparisons
- {name: "Less"}, // arg0 < arg1
+ {name: "Eq"}, // arg0 == arg1
+ {name: "Neq"}, // arg0 != arg1
+ {name: "Less"}, // arg0 < arg1
+ {name: "Leq"}, // arg0 <= arg1
+ {name: "Greater"}, // arg0 > arg1
+ {name: "Geq"}, // arg0 <= arg1
// Data movement
{name: "Phi"}, // select an argument based on which predecessor block we came from
blocs = append(blocs,
Bloc("entry",
Valu("mem", OpArg, TypeMem, 0, ".mem"),
+ Valu("sb", OpSB, TypeInvalid, 0, nil),
Goto(blockn(0)),
),
)
for i := 0; i < depth; i++ {
blocs = append(blocs,
Bloc(blockn(i),
- Valu(ptrn(i), OpGlobal, ptrType, 0, nil),
+ Valu(ptrn(i), OpAddr, ptrType, 0, nil, "sb"),
Valu(booln(i), OpIsNonNil, TypeBool, 0, nil, ptrn(i)),
If(booln(i), blockn(i+1), "exit"),
),
OpAMD64SETEQ
OpAMD64SETNE
OpAMD64SETL
+ OpAMD64SETLE
OpAMD64SETG
OpAMD64SETGE
OpAMD64SETB
OpMul
OpLsh
OpRsh
+ OpEq
+ OpNeq
OpLess
+ OpLeq
+ OpGreater
+ OpGeq
OpPhi
OpCopy
OpConst
},
},
},
+ {
+ name: "SETLE",
+ reg: regInfo{
+ inputs: []regMask{
+ 8589934592, // .FLAGS
+ },
+ clobbers: 0,
+ outputs: []regMask{
+ 65519, // .AX .CX .DX .BX .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15
+ },
+ },
+ },
{
name: "SETG",
reg: regInfo{
},
generic: true,
},
+ {
+ name: "Eq",
+ reg: regInfo{
+ inputs: []regMask{},
+ clobbers: 0,
+ outputs: []regMask{},
+ },
+ generic: true,
+ },
+ {
+ name: "Neq",
+ reg: regInfo{
+ inputs: []regMask{},
+ clobbers: 0,
+ outputs: []regMask{},
+ },
+ generic: true,
+ },
{
name: "Less",
reg: regInfo{
},
generic: true,
},
+ {
+ name: "Leq",
+ reg: regInfo{
+ inputs: []regMask{},
+ clobbers: 0,
+ outputs: []regMask{},
+ },
+ generic: true,
+ },
+ {
+ name: "Greater",
+ reg: regInfo{
+ inputs: []regMask{},
+ clobbers: 0,
+ outputs: []regMask{},
+ },
+ generic: true,
+ },
+ {
+ name: "Geq",
+ reg: regInfo{
+ inputs: []regMask{},
+ clobbers: 0,
+ outputs: []regMask{},
+ },
+ generic: true,
+ },
{
name: "Phi",
reg: regInfo{
goto endcc7894224d4f6b0bcabcece5d0185912
endcc7894224d4f6b0bcabcece5d0185912:
;
+ case OpEq:
+ // match: (Eq x y)
+ // cond: is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)
+ // result: (SETEQ (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)) {
+ goto endad64a62086703de09f52315e190bdf0e
+ }
+ v.Op = OpAMD64SETEQ
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto endad64a62086703de09f52315e190bdf0e
+ endad64a62086703de09f52315e190bdf0e:
+ ;
+ case OpGeq:
+ // match: (Geq x y)
+ // cond: is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)
+ // result: (SETGE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)) {
+ goto end31ba1968829a3b451a35431111140fec
+ }
+ v.Op = OpAMD64SETGE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end31ba1968829a3b451a35431111140fec
+ end31ba1968829a3b451a35431111140fec:
+ ;
+ case OpGreater:
+ // match: (Greater x y)
+ // cond: is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)
+ // result: (SETG (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)) {
+ goto end1cff30b1bf40104e5e30ab73d6568f7f
+ }
+ v.Op = OpAMD64SETG
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto end1cff30b1bf40104e5e30ab73d6568f7f
+ end1cff30b1bf40104e5e30ab73d6568f7f:
+ ;
case OpIsInBounds:
// match: (IsInBounds idx len)
// cond:
goto endff508c3726edfb573abc6128c177e76c
endff508c3726edfb573abc6128c177e76c:
;
+ case OpLeq:
+ // match: (Leq x y)
+ // cond: is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)
+ // result: (SETLE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)) {
+ goto enddb4f100c01cdd95d69d399ffc37e33e7
+ }
+ v.Op = OpAMD64SETLE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto enddb4f100c01cdd95d69d399ffc37e33e7
+ enddb4f100c01cdd95d69d399ffc37e33e7:
+ ;
case OpLess:
// match: (Less x y)
// cond: is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)
goto endfab0d598f376ecba45a22587d50f7aff
endfab0d598f376ecba45a22587d50f7aff:
;
+ case OpNeq:
+ // match: (Neq x y)
+ // cond: is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)
+ // result: (SETNE (CMPQ <TypeFlags> x y))
+ {
+ x := v.Args[0]
+ y := v.Args[1]
+ if !(is64BitInt(v.Args[0].Type) && isSigned(v.Args[0].Type)) {
+ goto enddccbd4e7581ae8d9916b933d3501987b
+ }
+ v.Op = OpAMD64SETNE
+ v.AuxInt = 0
+ v.Aux = nil
+ v.resetArgs()
+ v0 := v.Block.NewValue0(v.Line, OpAMD64CMPQ, TypeInvalid)
+ v0.Type = TypeFlags
+ v0.AddArg(x)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ goto enddccbd4e7581ae8d9916b933d3501987b
+ enddccbd4e7581ae8d9916b933d3501987b:
+ ;
case OpOffPtr:
// match: (OffPtr [off] ptr)
// cond: