Type Op is enfored now.
Type EType will need further CLs.
Added TODOs where Node.EType is used as a union type.
The TODOs have the format `TODO(marvin): Fix Node.EType union type.`.
Furthermore:
-The flag of Econv function in fmt.go is removed, since unused.
-Some cleaning along the way, e.g. declare vars first when getting initialized.
Passes go build -toolexec 'toolstash -cmp' -a std.
Fixes #11846
Change-Id: I908b955d5a78a195604970983fb9194bd9e9260b
Reviewed-on: https://go-review.googlesource.com/14956
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Marvin Stenger <marvin.stenger94@gmail.com>
* res = nl % nr
* according to op.
*/
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
x.Type = gc.Types[gc.TINT64]
gmove(x, oldx)
x.Type = t
- oldx.Etype = r // squirrel away old r value
+ // TODO(marvin): Fix Node.EType type union.
+ oldx.Etype = gc.EType(r) // squirrel away old r value
gc.SetReg(dr, 1)
}
}
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
-func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
+func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
if optoas(op, nl.Type) != x86.AIMULB {
return false
}
gins(as, &n1, n2)
}
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
+ // avoid constant conversions in switches below
+ const (
+ OMINUS_ = uint32(gc.OMINUS) << 16
+ OLSH_ = uint32(gc.OLSH) << 16
+ ORSH_ = uint32(gc.ORSH) << 16
+ OADD_ = uint32(gc.OADD) << 16
+ OSUB_ = uint32(gc.OSUB) << 16
+ OMUL_ = uint32(gc.OMUL) << 16
+ ODIV_ = uint32(gc.ODIV) << 16
+ OMOD_ = uint32(gc.OMOD) << 16
+ OOR_ = uint32(gc.OOR) << 16
+ OAND_ = uint32(gc.OAND) << 16
+ OXOR_ = uint32(gc.OXOR) << 16
+ OEQ_ = uint32(gc.OEQ) << 16
+ ONE_ = uint32(gc.ONE) << 16
+ OLT_ = uint32(gc.OLT) << 16
+ OLE_ = uint32(gc.OLE) << 16
+ OGE_ = uint32(gc.OGE) << 16
+ OGT_ = uint32(gc.OGT) << 16
+ OCMP_ = uint32(gc.OCMP) << 16
+ OPS_ = uint32(gc.OPS) << 16
+ OPC_ = uint32(gc.OPC) << 16
+ OAS_ = uint32(gc.OAS) << 16
+ OHMUL_ = uint32(gc.OHMUL) << 16
+ OSQRT_ = uint32(gc.OSQRT) << 16
+ OADDR_ = uint32(gc.OADDR) << 16
+ OINC_ = uint32(gc.OINC) << 16
+ ODEC_ = uint32(gc.ODEC) << 16
+ OLROT_ = uint32(gc.OLROT) << 16
+ ORROTC_ = uint32(gc.ORROTC) << 16
+ OEXTEND_ = uint32(gc.OEXTEND) << 16
+ )
+
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
- case gc.OADDR<<16 | gc.TPTR32:
+ case OADDR_ | gc.TPTR32:
a = x86.ALEAL
- case gc.OADDR<<16 | gc.TPTR64:
+ case OADDR_ | gc.TPTR64:
a = x86.ALEAQ
- case gc.OEQ<<16 | gc.TBOOL,
- gc.OEQ<<16 | gc.TINT8,
- gc.OEQ<<16 | gc.TUINT8,
- gc.OEQ<<16 | gc.TINT16,
- gc.OEQ<<16 | gc.TUINT16,
- gc.OEQ<<16 | gc.TINT32,
- gc.OEQ<<16 | gc.TUINT32,
- gc.OEQ<<16 | gc.TINT64,
- gc.OEQ<<16 | gc.TUINT64,
- gc.OEQ<<16 | gc.TPTR32,
- gc.OEQ<<16 | gc.TPTR64,
- gc.OEQ<<16 | gc.TFLOAT32,
- gc.OEQ<<16 | gc.TFLOAT64:
+ case OEQ_ | gc.TBOOL,
+ OEQ_ | gc.TINT8,
+ OEQ_ | gc.TUINT8,
+ OEQ_ | gc.TINT16,
+ OEQ_ | gc.TUINT16,
+ OEQ_ | gc.TINT32,
+ OEQ_ | gc.TUINT32,
+ OEQ_ | gc.TINT64,
+ OEQ_ | gc.TUINT64,
+ OEQ_ | gc.TPTR32,
+ OEQ_ | gc.TPTR64,
+ OEQ_ | gc.TFLOAT32,
+ OEQ_ | gc.TFLOAT64:
a = x86.AJEQ
- case gc.ONE<<16 | gc.TBOOL,
- gc.ONE<<16 | gc.TINT8,
- gc.ONE<<16 | gc.TUINT8,
- gc.ONE<<16 | gc.TINT16,
- gc.ONE<<16 | gc.TUINT16,
- gc.ONE<<16 | gc.TINT32,
- gc.ONE<<16 | gc.TUINT32,
- gc.ONE<<16 | gc.TINT64,
- gc.ONE<<16 | gc.TUINT64,
- gc.ONE<<16 | gc.TPTR32,
- gc.ONE<<16 | gc.TPTR64,
- gc.ONE<<16 | gc.TFLOAT32,
- gc.ONE<<16 | gc.TFLOAT64:
+ case ONE_ | gc.TBOOL,
+ ONE_ | gc.TINT8,
+ ONE_ | gc.TUINT8,
+ ONE_ | gc.TINT16,
+ ONE_ | gc.TUINT16,
+ ONE_ | gc.TINT32,
+ ONE_ | gc.TUINT32,
+ ONE_ | gc.TINT64,
+ ONE_ | gc.TUINT64,
+ ONE_ | gc.TPTR32,
+ ONE_ | gc.TPTR64,
+ ONE_ | gc.TFLOAT32,
+ ONE_ | gc.TFLOAT64:
a = x86.AJNE
- case gc.OPS<<16 | gc.TBOOL,
- gc.OPS<<16 | gc.TINT8,
- gc.OPS<<16 | gc.TUINT8,
- gc.OPS<<16 | gc.TINT16,
- gc.OPS<<16 | gc.TUINT16,
- gc.OPS<<16 | gc.TINT32,
- gc.OPS<<16 | gc.TUINT32,
- gc.OPS<<16 | gc.TINT64,
- gc.OPS<<16 | gc.TUINT64,
- gc.OPS<<16 | gc.TPTR32,
- gc.OPS<<16 | gc.TPTR64,
- gc.OPS<<16 | gc.TFLOAT32,
- gc.OPS<<16 | gc.TFLOAT64:
+ case OPS_ | gc.TBOOL,
+ OPS_ | gc.TINT8,
+ OPS_ | gc.TUINT8,
+ OPS_ | gc.TINT16,
+ OPS_ | gc.TUINT16,
+ OPS_ | gc.TINT32,
+ OPS_ | gc.TUINT32,
+ OPS_ | gc.TINT64,
+ OPS_ | gc.TUINT64,
+ OPS_ | gc.TPTR32,
+ OPS_ | gc.TPTR64,
+ OPS_ | gc.TFLOAT32,
+ OPS_ | gc.TFLOAT64:
a = x86.AJPS
- case gc.OPC<<16 | gc.TBOOL,
- gc.OPC<<16 | gc.TINT8,
- gc.OPC<<16 | gc.TUINT8,
- gc.OPC<<16 | gc.TINT16,
- gc.OPC<<16 | gc.TUINT16,
- gc.OPC<<16 | gc.TINT32,
- gc.OPC<<16 | gc.TUINT32,
- gc.OPC<<16 | gc.TINT64,
- gc.OPC<<16 | gc.TUINT64,
- gc.OPC<<16 | gc.TPTR32,
- gc.OPC<<16 | gc.TPTR64,
- gc.OPC<<16 | gc.TFLOAT32,
- gc.OPC<<16 | gc.TFLOAT64:
+ case OPC_ | gc.TBOOL,
+ OPC_ | gc.TINT8,
+ OPC_ | gc.TUINT8,
+ OPC_ | gc.TINT16,
+ OPC_ | gc.TUINT16,
+ OPC_ | gc.TINT32,
+ OPC_ | gc.TUINT32,
+ OPC_ | gc.TINT64,
+ OPC_ | gc.TUINT64,
+ OPC_ | gc.TPTR32,
+ OPC_ | gc.TPTR64,
+ OPC_ | gc.TFLOAT32,
+ OPC_ | gc.TFLOAT64:
a = x86.AJPC
- case gc.OLT<<16 | gc.TINT8,
- gc.OLT<<16 | gc.TINT16,
- gc.OLT<<16 | gc.TINT32,
- gc.OLT<<16 | gc.TINT64:
+ case OLT_ | gc.TINT8,
+ OLT_ | gc.TINT16,
+ OLT_ | gc.TINT32,
+ OLT_ | gc.TINT64:
a = x86.AJLT
- case gc.OLT<<16 | gc.TUINT8,
- gc.OLT<<16 | gc.TUINT16,
- gc.OLT<<16 | gc.TUINT32,
- gc.OLT<<16 | gc.TUINT64:
+ case OLT_ | gc.TUINT8,
+ OLT_ | gc.TUINT16,
+ OLT_ | gc.TUINT32,
+ OLT_ | gc.TUINT64:
a = x86.AJCS
- case gc.OLE<<16 | gc.TINT8,
- gc.OLE<<16 | gc.TINT16,
- gc.OLE<<16 | gc.TINT32,
- gc.OLE<<16 | gc.TINT64:
+ case OLE_ | gc.TINT8,
+ OLE_ | gc.TINT16,
+ OLE_ | gc.TINT32,
+ OLE_ | gc.TINT64:
a = x86.AJLE
- case gc.OLE<<16 | gc.TUINT8,
- gc.OLE<<16 | gc.TUINT16,
- gc.OLE<<16 | gc.TUINT32,
- gc.OLE<<16 | gc.TUINT64:
+ case OLE_ | gc.TUINT8,
+ OLE_ | gc.TUINT16,
+ OLE_ | gc.TUINT32,
+ OLE_ | gc.TUINT64:
a = x86.AJLS
- case gc.OGT<<16 | gc.TINT8,
- gc.OGT<<16 | gc.TINT16,
- gc.OGT<<16 | gc.TINT32,
- gc.OGT<<16 | gc.TINT64:
+ case OGT_ | gc.TINT8,
+ OGT_ | gc.TINT16,
+ OGT_ | gc.TINT32,
+ OGT_ | gc.TINT64:
a = x86.AJGT
- case gc.OGT<<16 | gc.TUINT8,
- gc.OGT<<16 | gc.TUINT16,
- gc.OGT<<16 | gc.TUINT32,
- gc.OGT<<16 | gc.TUINT64,
- gc.OLT<<16 | gc.TFLOAT32,
- gc.OLT<<16 | gc.TFLOAT64:
+ case OGT_ | gc.TUINT8,
+ OGT_ | gc.TUINT16,
+ OGT_ | gc.TUINT32,
+ OGT_ | gc.TUINT64,
+ OLT_ | gc.TFLOAT32,
+ OLT_ | gc.TFLOAT64:
a = x86.AJHI
- case gc.OGE<<16 | gc.TINT8,
- gc.OGE<<16 | gc.TINT16,
- gc.OGE<<16 | gc.TINT32,
- gc.OGE<<16 | gc.TINT64:
+ case OGE_ | gc.TINT8,
+ OGE_ | gc.TINT16,
+ OGE_ | gc.TINT32,
+ OGE_ | gc.TINT64:
a = x86.AJGE
- case gc.OGE<<16 | gc.TUINT8,
- gc.OGE<<16 | gc.TUINT16,
- gc.OGE<<16 | gc.TUINT32,
- gc.OGE<<16 | gc.TUINT64,
- gc.OLE<<16 | gc.TFLOAT32,
- gc.OLE<<16 | gc.TFLOAT64:
+ case OGE_ | gc.TUINT8,
+ OGE_ | gc.TUINT16,
+ OGE_ | gc.TUINT32,
+ OGE_ | gc.TUINT64,
+ OLE_ | gc.TFLOAT32,
+ OLE_ | gc.TFLOAT64:
a = x86.AJCC
- case gc.OCMP<<16 | gc.TBOOL,
- gc.OCMP<<16 | gc.TINT8,
- gc.OCMP<<16 | gc.TUINT8:
+ case OCMP_ | gc.TBOOL,
+ OCMP_ | gc.TINT8,
+ OCMP_ | gc.TUINT8:
a = x86.ACMPB
- case gc.OCMP<<16 | gc.TINT16,
- gc.OCMP<<16 | gc.TUINT16:
+ case OCMP_ | gc.TINT16,
+ OCMP_ | gc.TUINT16:
a = x86.ACMPW
- case gc.OCMP<<16 | gc.TINT32,
- gc.OCMP<<16 | gc.TUINT32,
- gc.OCMP<<16 | gc.TPTR32:
+ case OCMP_ | gc.TINT32,
+ OCMP_ | gc.TUINT32,
+ OCMP_ | gc.TPTR32:
a = x86.ACMPL
- case gc.OCMP<<16 | gc.TINT64,
- gc.OCMP<<16 | gc.TUINT64,
- gc.OCMP<<16 | gc.TPTR64:
+ case OCMP_ | gc.TINT64,
+ OCMP_ | gc.TUINT64,
+ OCMP_ | gc.TPTR64:
a = x86.ACMPQ
- case gc.OCMP<<16 | gc.TFLOAT32:
+ case OCMP_ | gc.TFLOAT32:
a = x86.AUCOMISS
- case gc.OCMP<<16 | gc.TFLOAT64:
+ case OCMP_ | gc.TFLOAT64:
a = x86.AUCOMISD
- case gc.OAS<<16 | gc.TBOOL,
- gc.OAS<<16 | gc.TINT8,
- gc.OAS<<16 | gc.TUINT8:
+ case OAS_ | gc.TBOOL,
+ OAS_ | gc.TINT8,
+ OAS_ | gc.TUINT8:
a = x86.AMOVB
- case gc.OAS<<16 | gc.TINT16,
- gc.OAS<<16 | gc.TUINT16:
+ case OAS_ | gc.TINT16,
+ OAS_ | gc.TUINT16:
a = x86.AMOVW
- case gc.OAS<<16 | gc.TINT32,
- gc.OAS<<16 | gc.TUINT32,
- gc.OAS<<16 | gc.TPTR32:
+ case OAS_ | gc.TINT32,
+ OAS_ | gc.TUINT32,
+ OAS_ | gc.TPTR32:
a = x86.AMOVL
- case gc.OAS<<16 | gc.TINT64,
- gc.OAS<<16 | gc.TUINT64,
- gc.OAS<<16 | gc.TPTR64:
+ case OAS_ | gc.TINT64,
+ OAS_ | gc.TUINT64,
+ OAS_ | gc.TPTR64:
a = x86.AMOVQ
- case gc.OAS<<16 | gc.TFLOAT32:
+ case OAS_ | gc.TFLOAT32:
a = x86.AMOVSS
- case gc.OAS<<16 | gc.TFLOAT64:
+ case OAS_ | gc.TFLOAT64:
a = x86.AMOVSD
- case gc.OADD<<16 | gc.TINT8,
- gc.OADD<<16 | gc.TUINT8:
+ case OADD_ | gc.TINT8,
+ OADD_ | gc.TUINT8:
a = x86.AADDB
- case gc.OADD<<16 | gc.TINT16,
- gc.OADD<<16 | gc.TUINT16:
+ case OADD_ | gc.TINT16,
+ OADD_ | gc.TUINT16:
a = x86.AADDW
- case gc.OADD<<16 | gc.TINT32,
- gc.OADD<<16 | gc.TUINT32,
- gc.OADD<<16 | gc.TPTR32:
+ case OADD_ | gc.TINT32,
+ OADD_ | gc.TUINT32,
+ OADD_ | gc.TPTR32:
a = x86.AADDL
- case gc.OADD<<16 | gc.TINT64,
- gc.OADD<<16 | gc.TUINT64,
- gc.OADD<<16 | gc.TPTR64:
+ case OADD_ | gc.TINT64,
+ OADD_ | gc.TUINT64,
+ OADD_ | gc.TPTR64:
a = x86.AADDQ
- case gc.OADD<<16 | gc.TFLOAT32:
+ case OADD_ | gc.TFLOAT32:
a = x86.AADDSS
- case gc.OADD<<16 | gc.TFLOAT64:
+ case OADD_ | gc.TFLOAT64:
a = x86.AADDSD
- case gc.OSUB<<16 | gc.TINT8,
- gc.OSUB<<16 | gc.TUINT8:
+ case OSUB_ | gc.TINT8,
+ OSUB_ | gc.TUINT8:
a = x86.ASUBB
- case gc.OSUB<<16 | gc.TINT16,
- gc.OSUB<<16 | gc.TUINT16:
+ case OSUB_ | gc.TINT16,
+ OSUB_ | gc.TUINT16:
a = x86.ASUBW
- case gc.OSUB<<16 | gc.TINT32,
- gc.OSUB<<16 | gc.TUINT32,
- gc.OSUB<<16 | gc.TPTR32:
+ case OSUB_ | gc.TINT32,
+ OSUB_ | gc.TUINT32,
+ OSUB_ | gc.TPTR32:
a = x86.ASUBL
- case gc.OSUB<<16 | gc.TINT64,
- gc.OSUB<<16 | gc.TUINT64,
- gc.OSUB<<16 | gc.TPTR64:
+ case OSUB_ | gc.TINT64,
+ OSUB_ | gc.TUINT64,
+ OSUB_ | gc.TPTR64:
a = x86.ASUBQ
- case gc.OSUB<<16 | gc.TFLOAT32:
+ case OSUB_ | gc.TFLOAT32:
a = x86.ASUBSS
- case gc.OSUB<<16 | gc.TFLOAT64:
+ case OSUB_ | gc.TFLOAT64:
a = x86.ASUBSD
- case gc.OINC<<16 | gc.TINT8,
- gc.OINC<<16 | gc.TUINT8:
+ case OINC_ | gc.TINT8,
+ OINC_ | gc.TUINT8:
a = x86.AINCB
- case gc.OINC<<16 | gc.TINT16,
- gc.OINC<<16 | gc.TUINT16:
+ case OINC_ | gc.TINT16,
+ OINC_ | gc.TUINT16:
a = x86.AINCW
- case gc.OINC<<16 | gc.TINT32,
- gc.OINC<<16 | gc.TUINT32,
- gc.OINC<<16 | gc.TPTR32:
+ case OINC_ | gc.TINT32,
+ OINC_ | gc.TUINT32,
+ OINC_ | gc.TPTR32:
a = x86.AINCL
- case gc.OINC<<16 | gc.TINT64,
- gc.OINC<<16 | gc.TUINT64,
- gc.OINC<<16 | gc.TPTR64:
+ case OINC_ | gc.TINT64,
+ OINC_ | gc.TUINT64,
+ OINC_ | gc.TPTR64:
a = x86.AINCQ
- case gc.ODEC<<16 | gc.TINT8,
- gc.ODEC<<16 | gc.TUINT8:
+ case ODEC_ | gc.TINT8,
+ ODEC_ | gc.TUINT8:
a = x86.ADECB
- case gc.ODEC<<16 | gc.TINT16,
- gc.ODEC<<16 | gc.TUINT16:
+ case ODEC_ | gc.TINT16,
+ ODEC_ | gc.TUINT16:
a = x86.ADECW
- case gc.ODEC<<16 | gc.TINT32,
- gc.ODEC<<16 | gc.TUINT32,
- gc.ODEC<<16 | gc.TPTR32:
+ case ODEC_ | gc.TINT32,
+ ODEC_ | gc.TUINT32,
+ ODEC_ | gc.TPTR32:
a = x86.ADECL
- case gc.ODEC<<16 | gc.TINT64,
- gc.ODEC<<16 | gc.TUINT64,
- gc.ODEC<<16 | gc.TPTR64:
+ case ODEC_ | gc.TINT64,
+ ODEC_ | gc.TUINT64,
+ ODEC_ | gc.TPTR64:
a = x86.ADECQ
- case gc.OMINUS<<16 | gc.TINT8,
- gc.OMINUS<<16 | gc.TUINT8:
+ case OMINUS_ | gc.TINT8,
+ OMINUS_ | gc.TUINT8:
a = x86.ANEGB
- case gc.OMINUS<<16 | gc.TINT16,
- gc.OMINUS<<16 | gc.TUINT16:
+ case OMINUS_ | gc.TINT16,
+ OMINUS_ | gc.TUINT16:
a = x86.ANEGW
- case gc.OMINUS<<16 | gc.TINT32,
- gc.OMINUS<<16 | gc.TUINT32,
- gc.OMINUS<<16 | gc.TPTR32:
+ case OMINUS_ | gc.TINT32,
+ OMINUS_ | gc.TUINT32,
+ OMINUS_ | gc.TPTR32:
a = x86.ANEGL
- case gc.OMINUS<<16 | gc.TINT64,
- gc.OMINUS<<16 | gc.TUINT64,
- gc.OMINUS<<16 | gc.TPTR64:
+ case OMINUS_ | gc.TINT64,
+ OMINUS_ | gc.TUINT64,
+ OMINUS_ | gc.TPTR64:
a = x86.ANEGQ
- case gc.OAND<<16 | gc.TBOOL,
- gc.OAND<<16 | gc.TINT8,
- gc.OAND<<16 | gc.TUINT8:
+ case OAND_ | gc.TBOOL,
+ OAND_ | gc.TINT8,
+ OAND_ | gc.TUINT8:
a = x86.AANDB
- case gc.OAND<<16 | gc.TINT16,
- gc.OAND<<16 | gc.TUINT16:
+ case OAND_ | gc.TINT16,
+ OAND_ | gc.TUINT16:
a = x86.AANDW
- case gc.OAND<<16 | gc.TINT32,
- gc.OAND<<16 | gc.TUINT32,
- gc.OAND<<16 | gc.TPTR32:
+ case OAND_ | gc.TINT32,
+ OAND_ | gc.TUINT32,
+ OAND_ | gc.TPTR32:
a = x86.AANDL
- case gc.OAND<<16 | gc.TINT64,
- gc.OAND<<16 | gc.TUINT64,
- gc.OAND<<16 | gc.TPTR64:
+ case OAND_ | gc.TINT64,
+ OAND_ | gc.TUINT64,
+ OAND_ | gc.TPTR64:
a = x86.AANDQ
- case gc.OOR<<16 | gc.TBOOL,
- gc.OOR<<16 | gc.TINT8,
- gc.OOR<<16 | gc.TUINT8:
+ case OOR_ | gc.TBOOL,
+ OOR_ | gc.TINT8,
+ OOR_ | gc.TUINT8:
a = x86.AORB
- case gc.OOR<<16 | gc.TINT16,
- gc.OOR<<16 | gc.TUINT16:
+ case OOR_ | gc.TINT16,
+ OOR_ | gc.TUINT16:
a = x86.AORW
- case gc.OOR<<16 | gc.TINT32,
- gc.OOR<<16 | gc.TUINT32,
- gc.OOR<<16 | gc.TPTR32:
+ case OOR_ | gc.TINT32,
+ OOR_ | gc.TUINT32,
+ OOR_ | gc.TPTR32:
a = x86.AORL
- case gc.OOR<<16 | gc.TINT64,
- gc.OOR<<16 | gc.TUINT64,
- gc.OOR<<16 | gc.TPTR64:
+ case OOR_ | gc.TINT64,
+ OOR_ | gc.TUINT64,
+ OOR_ | gc.TPTR64:
a = x86.AORQ
- case gc.OXOR<<16 | gc.TINT8,
- gc.OXOR<<16 | gc.TUINT8:
+ case OXOR_ | gc.TINT8,
+ OXOR_ | gc.TUINT8:
a = x86.AXORB
- case gc.OXOR<<16 | gc.TINT16,
- gc.OXOR<<16 | gc.TUINT16:
+ case OXOR_ | gc.TINT16,
+ OXOR_ | gc.TUINT16:
a = x86.AXORW
- case gc.OXOR<<16 | gc.TINT32,
- gc.OXOR<<16 | gc.TUINT32,
- gc.OXOR<<16 | gc.TPTR32:
+ case OXOR_ | gc.TINT32,
+ OXOR_ | gc.TUINT32,
+ OXOR_ | gc.TPTR32:
a = x86.AXORL
- case gc.OXOR<<16 | gc.TINT64,
- gc.OXOR<<16 | gc.TUINT64,
- gc.OXOR<<16 | gc.TPTR64:
+ case OXOR_ | gc.TINT64,
+ OXOR_ | gc.TUINT64,
+ OXOR_ | gc.TPTR64:
a = x86.AXORQ
- case gc.OLROT<<16 | gc.TINT8,
- gc.OLROT<<16 | gc.TUINT8:
+ case OLROT_ | gc.TINT8,
+ OLROT_ | gc.TUINT8:
a = x86.AROLB
- case gc.OLROT<<16 | gc.TINT16,
- gc.OLROT<<16 | gc.TUINT16:
+ case OLROT_ | gc.TINT16,
+ OLROT_ | gc.TUINT16:
a = x86.AROLW
- case gc.OLROT<<16 | gc.TINT32,
- gc.OLROT<<16 | gc.TUINT32,
- gc.OLROT<<16 | gc.TPTR32:
+ case OLROT_ | gc.TINT32,
+ OLROT_ | gc.TUINT32,
+ OLROT_ | gc.TPTR32:
a = x86.AROLL
- case gc.OLROT<<16 | gc.TINT64,
- gc.OLROT<<16 | gc.TUINT64,
- gc.OLROT<<16 | gc.TPTR64:
+ case OLROT_ | gc.TINT64,
+ OLROT_ | gc.TUINT64,
+ OLROT_ | gc.TPTR64:
a = x86.AROLQ
- case gc.OLSH<<16 | gc.TINT8,
- gc.OLSH<<16 | gc.TUINT8:
+ case OLSH_ | gc.TINT8,
+ OLSH_ | gc.TUINT8:
a = x86.ASHLB
- case gc.OLSH<<16 | gc.TINT16,
- gc.OLSH<<16 | gc.TUINT16:
+ case OLSH_ | gc.TINT16,
+ OLSH_ | gc.TUINT16:
a = x86.ASHLW
- case gc.OLSH<<16 | gc.TINT32,
- gc.OLSH<<16 | gc.TUINT32,
- gc.OLSH<<16 | gc.TPTR32:
+ case OLSH_ | gc.TINT32,
+ OLSH_ | gc.TUINT32,
+ OLSH_ | gc.TPTR32:
a = x86.ASHLL
- case gc.OLSH<<16 | gc.TINT64,
- gc.OLSH<<16 | gc.TUINT64,
- gc.OLSH<<16 | gc.TPTR64:
+ case OLSH_ | gc.TINT64,
+ OLSH_ | gc.TUINT64,
+ OLSH_ | gc.TPTR64:
a = x86.ASHLQ
- case gc.ORSH<<16 | gc.TUINT8:
+ case ORSH_ | gc.TUINT8:
a = x86.ASHRB
- case gc.ORSH<<16 | gc.TUINT16:
+ case ORSH_ | gc.TUINT16:
a = x86.ASHRW
- case gc.ORSH<<16 | gc.TUINT32,
- gc.ORSH<<16 | gc.TPTR32:
+ case ORSH_ | gc.TUINT32,
+ ORSH_ | gc.TPTR32:
a = x86.ASHRL
- case gc.ORSH<<16 | gc.TUINT64,
- gc.ORSH<<16 | gc.TPTR64:
+ case ORSH_ | gc.TUINT64,
+ ORSH_ | gc.TPTR64:
a = x86.ASHRQ
- case gc.ORSH<<16 | gc.TINT8:
+ case ORSH_ | gc.TINT8:
a = x86.ASARB
- case gc.ORSH<<16 | gc.TINT16:
+ case ORSH_ | gc.TINT16:
a = x86.ASARW
- case gc.ORSH<<16 | gc.TINT32:
+ case ORSH_ | gc.TINT32:
a = x86.ASARL
- case gc.ORSH<<16 | gc.TINT64:
+ case ORSH_ | gc.TINT64:
a = x86.ASARQ
- case gc.ORROTC<<16 | gc.TINT8,
- gc.ORROTC<<16 | gc.TUINT8:
+ case ORROTC_ | gc.TINT8,
+ ORROTC_ | gc.TUINT8:
a = x86.ARCRB
- case gc.ORROTC<<16 | gc.TINT16,
- gc.ORROTC<<16 | gc.TUINT16:
+ case ORROTC_ | gc.TINT16,
+ ORROTC_ | gc.TUINT16:
a = x86.ARCRW
- case gc.ORROTC<<16 | gc.TINT32,
- gc.ORROTC<<16 | gc.TUINT32:
+ case ORROTC_ | gc.TINT32,
+ ORROTC_ | gc.TUINT32:
a = x86.ARCRL
- case gc.ORROTC<<16 | gc.TINT64,
- gc.ORROTC<<16 | gc.TUINT64:
+ case ORROTC_ | gc.TINT64,
+ ORROTC_ | gc.TUINT64:
a = x86.ARCRQ
- case gc.OHMUL<<16 | gc.TINT8,
- gc.OMUL<<16 | gc.TINT8,
- gc.OMUL<<16 | gc.TUINT8:
+ case OHMUL_ | gc.TINT8,
+ OMUL_ | gc.TINT8,
+ OMUL_ | gc.TUINT8:
a = x86.AIMULB
- case gc.OHMUL<<16 | gc.TINT16,
- gc.OMUL<<16 | gc.TINT16,
- gc.OMUL<<16 | gc.TUINT16:
+ case OHMUL_ | gc.TINT16,
+ OMUL_ | gc.TINT16,
+ OMUL_ | gc.TUINT16:
a = x86.AIMULW
- case gc.OHMUL<<16 | gc.TINT32,
- gc.OMUL<<16 | gc.TINT32,
- gc.OMUL<<16 | gc.TUINT32,
- gc.OMUL<<16 | gc.TPTR32:
+ case OHMUL_ | gc.TINT32,
+ OMUL_ | gc.TINT32,
+ OMUL_ | gc.TUINT32,
+ OMUL_ | gc.TPTR32:
a = x86.AIMULL
- case gc.OHMUL<<16 | gc.TINT64,
- gc.OMUL<<16 | gc.TINT64,
- gc.OMUL<<16 | gc.TUINT64,
- gc.OMUL<<16 | gc.TPTR64:
+ case OHMUL_ | gc.TINT64,
+ OMUL_ | gc.TINT64,
+ OMUL_ | gc.TUINT64,
+ OMUL_ | gc.TPTR64:
a = x86.AIMULQ
- case gc.OHMUL<<16 | gc.TUINT8:
+ case OHMUL_ | gc.TUINT8:
a = x86.AMULB
- case gc.OHMUL<<16 | gc.TUINT16:
+ case OHMUL_ | gc.TUINT16:
a = x86.AMULW
- case gc.OHMUL<<16 | gc.TUINT32,
- gc.OHMUL<<16 | gc.TPTR32:
+ case OHMUL_ | gc.TUINT32,
+ OHMUL_ | gc.TPTR32:
a = x86.AMULL
- case gc.OHMUL<<16 | gc.TUINT64,
- gc.OHMUL<<16 | gc.TPTR64:
+ case OHMUL_ | gc.TUINT64,
+ OHMUL_ | gc.TPTR64:
a = x86.AMULQ
- case gc.OMUL<<16 | gc.TFLOAT32:
+ case OMUL_ | gc.TFLOAT32:
a = x86.AMULSS
- case gc.OMUL<<16 | gc.TFLOAT64:
+ case OMUL_ | gc.TFLOAT64:
a = x86.AMULSD
- case gc.ODIV<<16 | gc.TINT8,
- gc.OMOD<<16 | gc.TINT8:
+ case ODIV_ | gc.TINT8,
+ OMOD_ | gc.TINT8:
a = x86.AIDIVB
- case gc.ODIV<<16 | gc.TUINT8,
- gc.OMOD<<16 | gc.TUINT8:
+ case ODIV_ | gc.TUINT8,
+ OMOD_ | gc.TUINT8:
a = x86.ADIVB
- case gc.ODIV<<16 | gc.TINT16,
- gc.OMOD<<16 | gc.TINT16:
+ case ODIV_ | gc.TINT16,
+ OMOD_ | gc.TINT16:
a = x86.AIDIVW
- case gc.ODIV<<16 | gc.TUINT16,
- gc.OMOD<<16 | gc.TUINT16:
+ case ODIV_ | gc.TUINT16,
+ OMOD_ | gc.TUINT16:
a = x86.ADIVW
- case gc.ODIV<<16 | gc.TINT32,
- gc.OMOD<<16 | gc.TINT32:
+ case ODIV_ | gc.TINT32,
+ OMOD_ | gc.TINT32:
a = x86.AIDIVL
- case gc.ODIV<<16 | gc.TUINT32,
- gc.ODIV<<16 | gc.TPTR32,
- gc.OMOD<<16 | gc.TUINT32,
- gc.OMOD<<16 | gc.TPTR32:
+ case ODIV_ | gc.TUINT32,
+ ODIV_ | gc.TPTR32,
+ OMOD_ | gc.TUINT32,
+ OMOD_ | gc.TPTR32:
a = x86.ADIVL
- case gc.ODIV<<16 | gc.TINT64,
- gc.OMOD<<16 | gc.TINT64:
+ case ODIV_ | gc.TINT64,
+ OMOD_ | gc.TINT64:
a = x86.AIDIVQ
- case gc.ODIV<<16 | gc.TUINT64,
- gc.ODIV<<16 | gc.TPTR64,
- gc.OMOD<<16 | gc.TUINT64,
- gc.OMOD<<16 | gc.TPTR64:
+ case ODIV_ | gc.TUINT64,
+ ODIV_ | gc.TPTR64,
+ OMOD_ | gc.TUINT64,
+ OMOD_ | gc.TPTR64:
a = x86.ADIVQ
- case gc.OEXTEND<<16 | gc.TINT16:
+ case OEXTEND_ | gc.TINT16:
a = x86.ACWD
- case gc.OEXTEND<<16 | gc.TINT32:
+ case OEXTEND_ | gc.TINT32:
a = x86.ACDQ
- case gc.OEXTEND<<16 | gc.TINT64:
+ case OEXTEND_ | gc.TINT64:
a = x86.ACQO
- case gc.ODIV<<16 | gc.TFLOAT32:
+ case ODIV_ | gc.TFLOAT32:
a = x86.ADIVSS
- case gc.ODIV<<16 | gc.TFLOAT64:
+ case ODIV_ | gc.TFLOAT64:
a = x86.ADIVSD
- case gc.OSQRT<<16 | gc.TFLOAT64:
+ case OSQRT_ | gc.TFLOAT64:
a = x86.ASQRTSD
}
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo2, &n1)
- gins(optoas(int(n.Op), lo1.Type), &n1, &al)
+ gins(optoas(n.Op, lo1.Type), &n1, &al)
gins(arm.AMOVW, &hi2, &n1)
- gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
+ gins(optoas(n.Op, lo1.Type), &n1, &ah)
gc.Regfree(&n1)
}
* generate comparison of nl, nr, both 64-bit.
* nl is memory; nr is constant or memory.
*/
-func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) {
var lo1 gc.Node
var hi1 gc.Node
var lo2 gc.Node
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
gc.Regfree(&n2)
}
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL {
op = gc.Brrev(op)
n1, n2 = n2, n1
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
+ // avoid constant conversions in switches below
+ const (
+ OMINUS_ = uint32(gc.OMINUS) << 16
+ OLSH_ = uint32(gc.OLSH) << 16
+ ORSH_ = uint32(gc.ORSH) << 16
+ OADD_ = uint32(gc.OADD) << 16
+ OSUB_ = uint32(gc.OSUB) << 16
+ OMUL_ = uint32(gc.OMUL) << 16
+ ODIV_ = uint32(gc.ODIV) << 16
+ OMOD_ = uint32(gc.OMOD) << 16
+ OOR_ = uint32(gc.OOR) << 16
+ OAND_ = uint32(gc.OAND) << 16
+ OXOR_ = uint32(gc.OXOR) << 16
+ OEQ_ = uint32(gc.OEQ) << 16
+ ONE_ = uint32(gc.ONE) << 16
+ OLT_ = uint32(gc.OLT) << 16
+ OLE_ = uint32(gc.OLE) << 16
+ OGE_ = uint32(gc.OGE) << 16
+ OGT_ = uint32(gc.OGT) << 16
+ OCMP_ = uint32(gc.OCMP) << 16
+ OPS_ = uint32(gc.OPS) << 16
+ OAS_ = uint32(gc.OAS) << 16
+ OSQRT_ = uint32(gc.OSQRT) << 16
+ )
+
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
break;
*/
// TODO(kaib): make sure the conditional branches work on all edge cases
- case gc.OEQ<<16 | gc.TBOOL,
- gc.OEQ<<16 | gc.TINT8,
- gc.OEQ<<16 | gc.TUINT8,
- gc.OEQ<<16 | gc.TINT16,
- gc.OEQ<<16 | gc.TUINT16,
- gc.OEQ<<16 | gc.TINT32,
- gc.OEQ<<16 | gc.TUINT32,
- gc.OEQ<<16 | gc.TINT64,
- gc.OEQ<<16 | gc.TUINT64,
- gc.OEQ<<16 | gc.TPTR32,
- gc.OEQ<<16 | gc.TPTR64,
- gc.OEQ<<16 | gc.TFLOAT32,
- gc.OEQ<<16 | gc.TFLOAT64:
+ case OEQ_ | gc.TBOOL,
+ OEQ_ | gc.TINT8,
+ OEQ_ | gc.TUINT8,
+ OEQ_ | gc.TINT16,
+ OEQ_ | gc.TUINT16,
+ OEQ_ | gc.TINT32,
+ OEQ_ | gc.TUINT32,
+ OEQ_ | gc.TINT64,
+ OEQ_ | gc.TUINT64,
+ OEQ_ | gc.TPTR32,
+ OEQ_ | gc.TPTR64,
+ OEQ_ | gc.TFLOAT32,
+ OEQ_ | gc.TFLOAT64:
a = arm.ABEQ
- case gc.ONE<<16 | gc.TBOOL,
- gc.ONE<<16 | gc.TINT8,
- gc.ONE<<16 | gc.TUINT8,
- gc.ONE<<16 | gc.TINT16,
- gc.ONE<<16 | gc.TUINT16,
- gc.ONE<<16 | gc.TINT32,
- gc.ONE<<16 | gc.TUINT32,
- gc.ONE<<16 | gc.TINT64,
- gc.ONE<<16 | gc.TUINT64,
- gc.ONE<<16 | gc.TPTR32,
- gc.ONE<<16 | gc.TPTR64,
- gc.ONE<<16 | gc.TFLOAT32,
- gc.ONE<<16 | gc.TFLOAT64:
+ case ONE_ | gc.TBOOL,
+ ONE_ | gc.TINT8,
+ ONE_ | gc.TUINT8,
+ ONE_ | gc.TINT16,
+ ONE_ | gc.TUINT16,
+ ONE_ | gc.TINT32,
+ ONE_ | gc.TUINT32,
+ ONE_ | gc.TINT64,
+ ONE_ | gc.TUINT64,
+ ONE_ | gc.TPTR32,
+ ONE_ | gc.TPTR64,
+ ONE_ | gc.TFLOAT32,
+ ONE_ | gc.TFLOAT64:
a = arm.ABNE
- case gc.OLT<<16 | gc.TINT8,
- gc.OLT<<16 | gc.TINT16,
- gc.OLT<<16 | gc.TINT32,
- gc.OLT<<16 | gc.TINT64,
- gc.OLT<<16 | gc.TFLOAT32,
- gc.OLT<<16 | gc.TFLOAT64:
+ case OLT_ | gc.TINT8,
+ OLT_ | gc.TINT16,
+ OLT_ | gc.TINT32,
+ OLT_ | gc.TINT64,
+ OLT_ | gc.TFLOAT32,
+ OLT_ | gc.TFLOAT64:
a = arm.ABLT
- case gc.OLT<<16 | gc.TUINT8,
- gc.OLT<<16 | gc.TUINT16,
- gc.OLT<<16 | gc.TUINT32,
- gc.OLT<<16 | gc.TUINT64:
+ case OLT_ | gc.TUINT8,
+ OLT_ | gc.TUINT16,
+ OLT_ | gc.TUINT32,
+ OLT_ | gc.TUINT64:
a = arm.ABLO
- case gc.OLE<<16 | gc.TINT8,
- gc.OLE<<16 | gc.TINT16,
- gc.OLE<<16 | gc.TINT32,
- gc.OLE<<16 | gc.TINT64,
- gc.OLE<<16 | gc.TFLOAT32,
- gc.OLE<<16 | gc.TFLOAT64:
+ case OLE_ | gc.TINT8,
+ OLE_ | gc.TINT16,
+ OLE_ | gc.TINT32,
+ OLE_ | gc.TINT64,
+ OLE_ | gc.TFLOAT32,
+ OLE_ | gc.TFLOAT64:
a = arm.ABLE
- case gc.OLE<<16 | gc.TUINT8,
- gc.OLE<<16 | gc.TUINT16,
- gc.OLE<<16 | gc.TUINT32,
- gc.OLE<<16 | gc.TUINT64:
+ case OLE_ | gc.TUINT8,
+ OLE_ | gc.TUINT16,
+ OLE_ | gc.TUINT32,
+ OLE_ | gc.TUINT64:
a = arm.ABLS
- case gc.OGT<<16 | gc.TINT8,
- gc.OGT<<16 | gc.TINT16,
- gc.OGT<<16 | gc.TINT32,
- gc.OGT<<16 | gc.TINT64,
- gc.OGT<<16 | gc.TFLOAT32,
- gc.OGT<<16 | gc.TFLOAT64:
+ case OGT_ | gc.TINT8,
+ OGT_ | gc.TINT16,
+ OGT_ | gc.TINT32,
+ OGT_ | gc.TINT64,
+ OGT_ | gc.TFLOAT32,
+ OGT_ | gc.TFLOAT64:
a = arm.ABGT
- case gc.OGT<<16 | gc.TUINT8,
- gc.OGT<<16 | gc.TUINT16,
- gc.OGT<<16 | gc.TUINT32,
- gc.OGT<<16 | gc.TUINT64:
+ case OGT_ | gc.TUINT8,
+ OGT_ | gc.TUINT16,
+ OGT_ | gc.TUINT32,
+ OGT_ | gc.TUINT64:
a = arm.ABHI
- case gc.OGE<<16 | gc.TINT8,
- gc.OGE<<16 | gc.TINT16,
- gc.OGE<<16 | gc.TINT32,
- gc.OGE<<16 | gc.TINT64,
- gc.OGE<<16 | gc.TFLOAT32,
- gc.OGE<<16 | gc.TFLOAT64:
+ case OGE_ | gc.TINT8,
+ OGE_ | gc.TINT16,
+ OGE_ | gc.TINT32,
+ OGE_ | gc.TINT64,
+ OGE_ | gc.TFLOAT32,
+ OGE_ | gc.TFLOAT64:
a = arm.ABGE
- case gc.OGE<<16 | gc.TUINT8,
- gc.OGE<<16 | gc.TUINT16,
- gc.OGE<<16 | gc.TUINT32,
- gc.OGE<<16 | gc.TUINT64:
+ case OGE_ | gc.TUINT8,
+ OGE_ | gc.TUINT16,
+ OGE_ | gc.TUINT32,
+ OGE_ | gc.TUINT64:
a = arm.ABHS
- case gc.OCMP<<16 | gc.TBOOL,
- gc.OCMP<<16 | gc.TINT8,
- gc.OCMP<<16 | gc.TUINT8,
- gc.OCMP<<16 | gc.TINT16,
- gc.OCMP<<16 | gc.TUINT16,
- gc.OCMP<<16 | gc.TINT32,
- gc.OCMP<<16 | gc.TUINT32,
- gc.OCMP<<16 | gc.TPTR32:
+ case OCMP_ | gc.TBOOL,
+ OCMP_ | gc.TINT8,
+ OCMP_ | gc.TUINT8,
+ OCMP_ | gc.TINT16,
+ OCMP_ | gc.TUINT16,
+ OCMP_ | gc.TINT32,
+ OCMP_ | gc.TUINT32,
+ OCMP_ | gc.TPTR32:
a = arm.ACMP
- case gc.OCMP<<16 | gc.TFLOAT32:
+ case OCMP_ | gc.TFLOAT32:
a = arm.ACMPF
- case gc.OCMP<<16 | gc.TFLOAT64:
+ case OCMP_ | gc.TFLOAT64:
a = arm.ACMPD
- case gc.OPS<<16 | gc.TFLOAT32,
- gc.OPS<<16 | gc.TFLOAT64:
+ case OPS_ | gc.TFLOAT32,
+ OPS_ | gc.TFLOAT64:
a = arm.ABVS
- case gc.OAS<<16 | gc.TBOOL:
+ case OAS_ | gc.TBOOL:
a = arm.AMOVB
- case gc.OAS<<16 | gc.TINT8:
+ case OAS_ | gc.TINT8:
a = arm.AMOVBS
- case gc.OAS<<16 | gc.TUINT8:
+ case OAS_ | gc.TUINT8:
a = arm.AMOVBU
- case gc.OAS<<16 | gc.TINT16:
+ case OAS_ | gc.TINT16:
a = arm.AMOVHS
- case gc.OAS<<16 | gc.TUINT16:
+ case OAS_ | gc.TUINT16:
a = arm.AMOVHU
- case gc.OAS<<16 | gc.TINT32,
- gc.OAS<<16 | gc.TUINT32,
- gc.OAS<<16 | gc.TPTR32:
+ case OAS_ | gc.TINT32,
+ OAS_ | gc.TUINT32,
+ OAS_ | gc.TPTR32:
a = arm.AMOVW
- case gc.OAS<<16 | gc.TFLOAT32:
+ case OAS_ | gc.TFLOAT32:
a = arm.AMOVF
- case gc.OAS<<16 | gc.TFLOAT64:
+ case OAS_ | gc.TFLOAT64:
a = arm.AMOVD
- case gc.OADD<<16 | gc.TINT8,
- gc.OADD<<16 | gc.TUINT8,
- gc.OADD<<16 | gc.TINT16,
- gc.OADD<<16 | gc.TUINT16,
- gc.OADD<<16 | gc.TINT32,
- gc.OADD<<16 | gc.TUINT32,
- gc.OADD<<16 | gc.TPTR32:
+ case OADD_ | gc.TINT8,
+ OADD_ | gc.TUINT8,
+ OADD_ | gc.TINT16,
+ OADD_ | gc.TUINT16,
+ OADD_ | gc.TINT32,
+ OADD_ | gc.TUINT32,
+ OADD_ | gc.TPTR32:
a = arm.AADD
- case gc.OADD<<16 | gc.TFLOAT32:
+ case OADD_ | gc.TFLOAT32:
a = arm.AADDF
- case gc.OADD<<16 | gc.TFLOAT64:
+ case OADD_ | gc.TFLOAT64:
a = arm.AADDD
- case gc.OSUB<<16 | gc.TINT8,
- gc.OSUB<<16 | gc.TUINT8,
- gc.OSUB<<16 | gc.TINT16,
- gc.OSUB<<16 | gc.TUINT16,
- gc.OSUB<<16 | gc.TINT32,
- gc.OSUB<<16 | gc.TUINT32,
- gc.OSUB<<16 | gc.TPTR32:
+ case OSUB_ | gc.TINT8,
+ OSUB_ | gc.TUINT8,
+ OSUB_ | gc.TINT16,
+ OSUB_ | gc.TUINT16,
+ OSUB_ | gc.TINT32,
+ OSUB_ | gc.TUINT32,
+ OSUB_ | gc.TPTR32:
a = arm.ASUB
- case gc.OSUB<<16 | gc.TFLOAT32:
+ case OSUB_ | gc.TFLOAT32:
a = arm.ASUBF
- case gc.OSUB<<16 | gc.TFLOAT64:
+ case OSUB_ | gc.TFLOAT64:
a = arm.ASUBD
- case gc.OMINUS<<16 | gc.TINT8,
- gc.OMINUS<<16 | gc.TUINT8,
- gc.OMINUS<<16 | gc.TINT16,
- gc.OMINUS<<16 | gc.TUINT16,
- gc.OMINUS<<16 | gc.TINT32,
- gc.OMINUS<<16 | gc.TUINT32,
- gc.OMINUS<<16 | gc.TPTR32:
+ case OMINUS_ | gc.TINT8,
+ OMINUS_ | gc.TUINT8,
+ OMINUS_ | gc.TINT16,
+ OMINUS_ | gc.TUINT16,
+ OMINUS_ | gc.TINT32,
+ OMINUS_ | gc.TUINT32,
+ OMINUS_ | gc.TPTR32:
a = arm.ARSB
- case gc.OAND<<16 | gc.TINT8,
- gc.OAND<<16 | gc.TUINT8,
- gc.OAND<<16 | gc.TINT16,
- gc.OAND<<16 | gc.TUINT16,
- gc.OAND<<16 | gc.TINT32,
- gc.OAND<<16 | gc.TUINT32,
- gc.OAND<<16 | gc.TPTR32:
+ case OAND_ | gc.TINT8,
+ OAND_ | gc.TUINT8,
+ OAND_ | gc.TINT16,
+ OAND_ | gc.TUINT16,
+ OAND_ | gc.TINT32,
+ OAND_ | gc.TUINT32,
+ OAND_ | gc.TPTR32:
a = arm.AAND
- case gc.OOR<<16 | gc.TINT8,
- gc.OOR<<16 | gc.TUINT8,
- gc.OOR<<16 | gc.TINT16,
- gc.OOR<<16 | gc.TUINT16,
- gc.OOR<<16 | gc.TINT32,
- gc.OOR<<16 | gc.TUINT32,
- gc.OOR<<16 | gc.TPTR32:
+ case OOR_ | gc.TINT8,
+ OOR_ | gc.TUINT8,
+ OOR_ | gc.TINT16,
+ OOR_ | gc.TUINT16,
+ OOR_ | gc.TINT32,
+ OOR_ | gc.TUINT32,
+ OOR_ | gc.TPTR32:
a = arm.AORR
- case gc.OXOR<<16 | gc.TINT8,
- gc.OXOR<<16 | gc.TUINT8,
- gc.OXOR<<16 | gc.TINT16,
- gc.OXOR<<16 | gc.TUINT16,
- gc.OXOR<<16 | gc.TINT32,
- gc.OXOR<<16 | gc.TUINT32,
- gc.OXOR<<16 | gc.TPTR32:
+ case OXOR_ | gc.TINT8,
+ OXOR_ | gc.TUINT8,
+ OXOR_ | gc.TINT16,
+ OXOR_ | gc.TUINT16,
+ OXOR_ | gc.TINT32,
+ OXOR_ | gc.TUINT32,
+ OXOR_ | gc.TPTR32:
a = arm.AEOR
- case gc.OLSH<<16 | gc.TINT8,
- gc.OLSH<<16 | gc.TUINT8,
- gc.OLSH<<16 | gc.TINT16,
- gc.OLSH<<16 | gc.TUINT16,
- gc.OLSH<<16 | gc.TINT32,
- gc.OLSH<<16 | gc.TUINT32,
- gc.OLSH<<16 | gc.TPTR32:
+ case OLSH_ | gc.TINT8,
+ OLSH_ | gc.TUINT8,
+ OLSH_ | gc.TINT16,
+ OLSH_ | gc.TUINT16,
+ OLSH_ | gc.TINT32,
+ OLSH_ | gc.TUINT32,
+ OLSH_ | gc.TPTR32:
a = arm.ASLL
- case gc.ORSH<<16 | gc.TUINT8,
- gc.ORSH<<16 | gc.TUINT16,
- gc.ORSH<<16 | gc.TUINT32,
- gc.ORSH<<16 | gc.TPTR32:
+ case ORSH_ | gc.TUINT8,
+ ORSH_ | gc.TUINT16,
+ ORSH_ | gc.TUINT32,
+ ORSH_ | gc.TPTR32:
a = arm.ASRL
- case gc.ORSH<<16 | gc.TINT8,
- gc.ORSH<<16 | gc.TINT16,
- gc.ORSH<<16 | gc.TINT32:
+ case ORSH_ | gc.TINT8,
+ ORSH_ | gc.TINT16,
+ ORSH_ | gc.TINT32:
a = arm.ASRA
- case gc.OMUL<<16 | gc.TUINT8,
- gc.OMUL<<16 | gc.TUINT16,
- gc.OMUL<<16 | gc.TUINT32,
- gc.OMUL<<16 | gc.TPTR32:
+ case OMUL_ | gc.TUINT8,
+ OMUL_ | gc.TUINT16,
+ OMUL_ | gc.TUINT32,
+ OMUL_ | gc.TPTR32:
a = arm.AMULU
- case gc.OMUL<<16 | gc.TINT8,
- gc.OMUL<<16 | gc.TINT16,
- gc.OMUL<<16 | gc.TINT32:
+ case OMUL_ | gc.TINT8,
+ OMUL_ | gc.TINT16,
+ OMUL_ | gc.TINT32:
a = arm.AMUL
- case gc.OMUL<<16 | gc.TFLOAT32:
+ case OMUL_ | gc.TFLOAT32:
a = arm.AMULF
- case gc.OMUL<<16 | gc.TFLOAT64:
+ case OMUL_ | gc.TFLOAT64:
a = arm.AMULD
- case gc.ODIV<<16 | gc.TUINT8,
- gc.ODIV<<16 | gc.TUINT16,
- gc.ODIV<<16 | gc.TUINT32,
- gc.ODIV<<16 | gc.TPTR32:
+ case ODIV_ | gc.TUINT8,
+ ODIV_ | gc.TUINT16,
+ ODIV_ | gc.TUINT32,
+ ODIV_ | gc.TPTR32:
a = arm.ADIVU
- case gc.ODIV<<16 | gc.TINT8,
- gc.ODIV<<16 | gc.TINT16,
- gc.ODIV<<16 | gc.TINT32:
+ case ODIV_ | gc.TINT8,
+ ODIV_ | gc.TINT16,
+ ODIV_ | gc.TINT32:
a = arm.ADIV
- case gc.OMOD<<16 | gc.TUINT8,
- gc.OMOD<<16 | gc.TUINT16,
- gc.OMOD<<16 | gc.TUINT32,
- gc.OMOD<<16 | gc.TPTR32:
+ case OMOD_ | gc.TUINT8,
+ OMOD_ | gc.TUINT16,
+ OMOD_ | gc.TUINT32,
+ OMOD_ | gc.TPTR32:
a = arm.AMODU
- case gc.OMOD<<16 | gc.TINT8,
- gc.OMOD<<16 | gc.TINT16,
- gc.OMOD<<16 | gc.TINT32:
+ case OMOD_ | gc.TINT8,
+ OMOD_ | gc.TINT16,
+ OMOD_ | gc.TINT32:
a = arm.AMOD
// case CASE(OEXTEND, TINT16):
// a = ACQO;
// break;
- case gc.ODIV<<16 | gc.TFLOAT32:
+ case ODIV_ | gc.TFLOAT32:
a = arm.ADIVF
- case gc.ODIV<<16 | gc.TFLOAT64:
+ case ODIV_ | gc.TFLOAT64:
a = arm.ADIVD
- case gc.OSQRT<<16 | gc.TFLOAT64:
+ case OSQRT_ | gc.TFLOAT64:
a = arm.ASQRTD
}
* res = nl % nr
* according to op.
*/
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will generate undefined result.
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
gc.Regfree(&ntmp)
}
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
+ // avoid constant conversions in switches below
+ const (
+ OMINUS_ = uint32(gc.OMINUS) << 16
+ OLSH_ = uint32(gc.OLSH) << 16
+ ORSH_ = uint32(gc.ORSH) << 16
+ OADD_ = uint32(gc.OADD) << 16
+ OSUB_ = uint32(gc.OSUB) << 16
+ OMUL_ = uint32(gc.OMUL) << 16
+ ODIV_ = uint32(gc.ODIV) << 16
+ OOR_ = uint32(gc.OOR) << 16
+ OAND_ = uint32(gc.OAND) << 16
+ OXOR_ = uint32(gc.OXOR) << 16
+ OEQ_ = uint32(gc.OEQ) << 16
+ ONE_ = uint32(gc.ONE) << 16
+ OLT_ = uint32(gc.OLT) << 16
+ OLE_ = uint32(gc.OLE) << 16
+ OGE_ = uint32(gc.OGE) << 16
+ OGT_ = uint32(gc.OGT) << 16
+ OCMP_ = uint32(gc.OCMP) << 16
+ OAS_ = uint32(gc.OAS) << 16
+ OHMUL_ = uint32(gc.OHMUL) << 16
+ OSQRT_ = uint32(gc.OSQRT) << 16
+ )
+
a := int(obj.AXXX)
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
- case gc.OEQ<<16 | gc.TBOOL,
- gc.OEQ<<16 | gc.TINT8,
- gc.OEQ<<16 | gc.TUINT8,
- gc.OEQ<<16 | gc.TINT16,
- gc.OEQ<<16 | gc.TUINT16,
- gc.OEQ<<16 | gc.TINT32,
- gc.OEQ<<16 | gc.TUINT32,
- gc.OEQ<<16 | gc.TINT64,
- gc.OEQ<<16 | gc.TUINT64,
- gc.OEQ<<16 | gc.TPTR32,
- gc.OEQ<<16 | gc.TPTR64,
- gc.OEQ<<16 | gc.TFLOAT32,
- gc.OEQ<<16 | gc.TFLOAT64:
+ case OEQ_ | gc.TBOOL,
+ OEQ_ | gc.TINT8,
+ OEQ_ | gc.TUINT8,
+ OEQ_ | gc.TINT16,
+ OEQ_ | gc.TUINT16,
+ OEQ_ | gc.TINT32,
+ OEQ_ | gc.TUINT32,
+ OEQ_ | gc.TINT64,
+ OEQ_ | gc.TUINT64,
+ OEQ_ | gc.TPTR32,
+ OEQ_ | gc.TPTR64,
+ OEQ_ | gc.TFLOAT32,
+ OEQ_ | gc.TFLOAT64:
a = arm64.ABEQ
- case gc.ONE<<16 | gc.TBOOL,
- gc.ONE<<16 | gc.TINT8,
- gc.ONE<<16 | gc.TUINT8,
- gc.ONE<<16 | gc.TINT16,
- gc.ONE<<16 | gc.TUINT16,
- gc.ONE<<16 | gc.TINT32,
- gc.ONE<<16 | gc.TUINT32,
- gc.ONE<<16 | gc.TINT64,
- gc.ONE<<16 | gc.TUINT64,
- gc.ONE<<16 | gc.TPTR32,
- gc.ONE<<16 | gc.TPTR64,
- gc.ONE<<16 | gc.TFLOAT32,
- gc.ONE<<16 | gc.TFLOAT64:
+ case ONE_ | gc.TBOOL,
+ ONE_ | gc.TINT8,
+ ONE_ | gc.TUINT8,
+ ONE_ | gc.TINT16,
+ ONE_ | gc.TUINT16,
+ ONE_ | gc.TINT32,
+ ONE_ | gc.TUINT32,
+ ONE_ | gc.TINT64,
+ ONE_ | gc.TUINT64,
+ ONE_ | gc.TPTR32,
+ ONE_ | gc.TPTR64,
+ ONE_ | gc.TFLOAT32,
+ ONE_ | gc.TFLOAT64:
a = arm64.ABNE
- case gc.OLT<<16 | gc.TINT8,
- gc.OLT<<16 | gc.TINT16,
- gc.OLT<<16 | gc.TINT32,
- gc.OLT<<16 | gc.TINT64:
+ case OLT_ | gc.TINT8,
+ OLT_ | gc.TINT16,
+ OLT_ | gc.TINT32,
+ OLT_ | gc.TINT64:
a = arm64.ABLT
- case gc.OLT<<16 | gc.TUINT8,
- gc.OLT<<16 | gc.TUINT16,
- gc.OLT<<16 | gc.TUINT32,
- gc.OLT<<16 | gc.TUINT64,
- gc.OLT<<16 | gc.TFLOAT32,
- gc.OLT<<16 | gc.TFLOAT64:
+ case OLT_ | gc.TUINT8,
+ OLT_ | gc.TUINT16,
+ OLT_ | gc.TUINT32,
+ OLT_ | gc.TUINT64,
+ OLT_ | gc.TFLOAT32,
+ OLT_ | gc.TFLOAT64:
a = arm64.ABLO
- case gc.OLE<<16 | gc.TINT8,
- gc.OLE<<16 | gc.TINT16,
- gc.OLE<<16 | gc.TINT32,
- gc.OLE<<16 | gc.TINT64:
+ case OLE_ | gc.TINT8,
+ OLE_ | gc.TINT16,
+ OLE_ | gc.TINT32,
+ OLE_ | gc.TINT64:
a = arm64.ABLE
- case gc.OLE<<16 | gc.TUINT8,
- gc.OLE<<16 | gc.TUINT16,
- gc.OLE<<16 | gc.TUINT32,
- gc.OLE<<16 | gc.TUINT64,
- gc.OLE<<16 | gc.TFLOAT32,
- gc.OLE<<16 | gc.TFLOAT64:
+ case OLE_ | gc.TUINT8,
+ OLE_ | gc.TUINT16,
+ OLE_ | gc.TUINT32,
+ OLE_ | gc.TUINT64,
+ OLE_ | gc.TFLOAT32,
+ OLE_ | gc.TFLOAT64:
a = arm64.ABLS
- case gc.OGT<<16 | gc.TINT8,
- gc.OGT<<16 | gc.TINT16,
- gc.OGT<<16 | gc.TINT32,
- gc.OGT<<16 | gc.TINT64,
- gc.OGT<<16 | gc.TFLOAT32,
- gc.OGT<<16 | gc.TFLOAT64:
+ case OGT_ | gc.TINT8,
+ OGT_ | gc.TINT16,
+ OGT_ | gc.TINT32,
+ OGT_ | gc.TINT64,
+ OGT_ | gc.TFLOAT32,
+ OGT_ | gc.TFLOAT64:
a = arm64.ABGT
- case gc.OGT<<16 | gc.TUINT8,
- gc.OGT<<16 | gc.TUINT16,
- gc.OGT<<16 | gc.TUINT32,
- gc.OGT<<16 | gc.TUINT64:
+ case OGT_ | gc.TUINT8,
+ OGT_ | gc.TUINT16,
+ OGT_ | gc.TUINT32,
+ OGT_ | gc.TUINT64:
a = arm64.ABHI
- case gc.OGE<<16 | gc.TINT8,
- gc.OGE<<16 | gc.TINT16,
- gc.OGE<<16 | gc.TINT32,
- gc.OGE<<16 | gc.TINT64,
- gc.OGE<<16 | gc.TFLOAT32,
- gc.OGE<<16 | gc.TFLOAT64:
+ case OGE_ | gc.TINT8,
+ OGE_ | gc.TINT16,
+ OGE_ | gc.TINT32,
+ OGE_ | gc.TINT64,
+ OGE_ | gc.TFLOAT32,
+ OGE_ | gc.TFLOAT64:
a = arm64.ABGE
- case gc.OGE<<16 | gc.TUINT8,
- gc.OGE<<16 | gc.TUINT16,
- gc.OGE<<16 | gc.TUINT32,
- gc.OGE<<16 | gc.TUINT64:
+ case OGE_ | gc.TUINT8,
+ OGE_ | gc.TUINT16,
+ OGE_ | gc.TUINT32,
+ OGE_ | gc.TUINT64:
a = arm64.ABHS
- case gc.OCMP<<16 | gc.TBOOL,
- gc.OCMP<<16 | gc.TINT8,
- gc.OCMP<<16 | gc.TINT16,
- gc.OCMP<<16 | gc.TINT32,
- gc.OCMP<<16 | gc.TPTR32,
- gc.OCMP<<16 | gc.TINT64,
- gc.OCMP<<16 | gc.TUINT8,
- gc.OCMP<<16 | gc.TUINT16,
- gc.OCMP<<16 | gc.TUINT32,
- gc.OCMP<<16 | gc.TUINT64,
- gc.OCMP<<16 | gc.TPTR64:
+ case OCMP_ | gc.TBOOL,
+ OCMP_ | gc.TINT8,
+ OCMP_ | gc.TINT16,
+ OCMP_ | gc.TINT32,
+ OCMP_ | gc.TPTR32,
+ OCMP_ | gc.TINT64,
+ OCMP_ | gc.TUINT8,
+ OCMP_ | gc.TUINT16,
+ OCMP_ | gc.TUINT32,
+ OCMP_ | gc.TUINT64,
+ OCMP_ | gc.TPTR64:
a = arm64.ACMP
- case gc.OCMP<<16 | gc.TFLOAT32:
+ case OCMP_ | gc.TFLOAT32:
a = arm64.AFCMPS
- case gc.OCMP<<16 | gc.TFLOAT64:
+ case OCMP_ | gc.TFLOAT64:
a = arm64.AFCMPD
- case gc.OAS<<16 | gc.TBOOL,
- gc.OAS<<16 | gc.TINT8:
+ case OAS_ | gc.TBOOL,
+ OAS_ | gc.TINT8:
a = arm64.AMOVB
- case gc.OAS<<16 | gc.TUINT8:
+ case OAS_ | gc.TUINT8:
a = arm64.AMOVBU
- case gc.OAS<<16 | gc.TINT16:
+ case OAS_ | gc.TINT16:
a = arm64.AMOVH
- case gc.OAS<<16 | gc.TUINT16:
+ case OAS_ | gc.TUINT16:
a = arm64.AMOVHU
- case gc.OAS<<16 | gc.TINT32:
+ case OAS_ | gc.TINT32:
a = arm64.AMOVW
- case gc.OAS<<16 | gc.TUINT32,
- gc.OAS<<16 | gc.TPTR32:
+ case OAS_ | gc.TUINT32,
+ OAS_ | gc.TPTR32:
a = arm64.AMOVWU
- case gc.OAS<<16 | gc.TINT64,
- gc.OAS<<16 | gc.TUINT64,
- gc.OAS<<16 | gc.TPTR64:
+ case OAS_ | gc.TINT64,
+ OAS_ | gc.TUINT64,
+ OAS_ | gc.TPTR64:
a = arm64.AMOVD
- case gc.OAS<<16 | gc.TFLOAT32:
+ case OAS_ | gc.TFLOAT32:
a = arm64.AFMOVS
- case gc.OAS<<16 | gc.TFLOAT64:
+ case OAS_ | gc.TFLOAT64:
a = arm64.AFMOVD
- case gc.OADD<<16 | gc.TINT8,
- gc.OADD<<16 | gc.TUINT8,
- gc.OADD<<16 | gc.TINT16,
- gc.OADD<<16 | gc.TUINT16,
- gc.OADD<<16 | gc.TINT32,
- gc.OADD<<16 | gc.TUINT32,
- gc.OADD<<16 | gc.TPTR32,
- gc.OADD<<16 | gc.TINT64,
- gc.OADD<<16 | gc.TUINT64,
- gc.OADD<<16 | gc.TPTR64:
+ case OADD_ | gc.TINT8,
+ OADD_ | gc.TUINT8,
+ OADD_ | gc.TINT16,
+ OADD_ | gc.TUINT16,
+ OADD_ | gc.TINT32,
+ OADD_ | gc.TUINT32,
+ OADD_ | gc.TPTR32,
+ OADD_ | gc.TINT64,
+ OADD_ | gc.TUINT64,
+ OADD_ | gc.TPTR64:
a = arm64.AADD
- case gc.OADD<<16 | gc.TFLOAT32:
+ case OADD_ | gc.TFLOAT32:
a = arm64.AFADDS
- case gc.OADD<<16 | gc.TFLOAT64:
+ case OADD_ | gc.TFLOAT64:
a = arm64.AFADDD
- case gc.OSUB<<16 | gc.TINT8,
- gc.OSUB<<16 | gc.TUINT8,
- gc.OSUB<<16 | gc.TINT16,
- gc.OSUB<<16 | gc.TUINT16,
- gc.OSUB<<16 | gc.TINT32,
- gc.OSUB<<16 | gc.TUINT32,
- gc.OSUB<<16 | gc.TPTR32,
- gc.OSUB<<16 | gc.TINT64,
- gc.OSUB<<16 | gc.TUINT64,
- gc.OSUB<<16 | gc.TPTR64:
+ case OSUB_ | gc.TINT8,
+ OSUB_ | gc.TUINT8,
+ OSUB_ | gc.TINT16,
+ OSUB_ | gc.TUINT16,
+ OSUB_ | gc.TINT32,
+ OSUB_ | gc.TUINT32,
+ OSUB_ | gc.TPTR32,
+ OSUB_ | gc.TINT64,
+ OSUB_ | gc.TUINT64,
+ OSUB_ | gc.TPTR64:
a = arm64.ASUB
- case gc.OSUB<<16 | gc.TFLOAT32:
+ case OSUB_ | gc.TFLOAT32:
a = arm64.AFSUBS
- case gc.OSUB<<16 | gc.TFLOAT64:
+ case OSUB_ | gc.TFLOAT64:
a = arm64.AFSUBD
- case gc.OMINUS<<16 | gc.TINT8,
- gc.OMINUS<<16 | gc.TUINT8,
- gc.OMINUS<<16 | gc.TINT16,
- gc.OMINUS<<16 | gc.TUINT16,
- gc.OMINUS<<16 | gc.TINT32,
- gc.OMINUS<<16 | gc.TUINT32,
- gc.OMINUS<<16 | gc.TPTR32,
- gc.OMINUS<<16 | gc.TINT64,
- gc.OMINUS<<16 | gc.TUINT64,
- gc.OMINUS<<16 | gc.TPTR64:
+ case OMINUS_ | gc.TINT8,
+ OMINUS_ | gc.TUINT8,
+ OMINUS_ | gc.TINT16,
+ OMINUS_ | gc.TUINT16,
+ OMINUS_ | gc.TINT32,
+ OMINUS_ | gc.TUINT32,
+ OMINUS_ | gc.TPTR32,
+ OMINUS_ | gc.TINT64,
+ OMINUS_ | gc.TUINT64,
+ OMINUS_ | gc.TPTR64:
a = arm64.ANEG
- case gc.OMINUS<<16 | gc.TFLOAT32:
+ case OMINUS_ | gc.TFLOAT32:
a = arm64.AFNEGS
- case gc.OMINUS<<16 | gc.TFLOAT64:
+ case OMINUS_ | gc.TFLOAT64:
a = arm64.AFNEGD
- case gc.OAND<<16 | gc.TINT8,
- gc.OAND<<16 | gc.TUINT8,
- gc.OAND<<16 | gc.TINT16,
- gc.OAND<<16 | gc.TUINT16,
- gc.OAND<<16 | gc.TINT32,
- gc.OAND<<16 | gc.TUINT32,
- gc.OAND<<16 | gc.TPTR32,
- gc.OAND<<16 | gc.TINT64,
- gc.OAND<<16 | gc.TUINT64,
- gc.OAND<<16 | gc.TPTR64:
+ case OAND_ | gc.TINT8,
+ OAND_ | gc.TUINT8,
+ OAND_ | gc.TINT16,
+ OAND_ | gc.TUINT16,
+ OAND_ | gc.TINT32,
+ OAND_ | gc.TUINT32,
+ OAND_ | gc.TPTR32,
+ OAND_ | gc.TINT64,
+ OAND_ | gc.TUINT64,
+ OAND_ | gc.TPTR64:
a = arm64.AAND
- case gc.OOR<<16 | gc.TINT8,
- gc.OOR<<16 | gc.TUINT8,
- gc.OOR<<16 | gc.TINT16,
- gc.OOR<<16 | gc.TUINT16,
- gc.OOR<<16 | gc.TINT32,
- gc.OOR<<16 | gc.TUINT32,
- gc.OOR<<16 | gc.TPTR32,
- gc.OOR<<16 | gc.TINT64,
- gc.OOR<<16 | gc.TUINT64,
- gc.OOR<<16 | gc.TPTR64:
+ case OOR_ | gc.TINT8,
+ OOR_ | gc.TUINT8,
+ OOR_ | gc.TINT16,
+ OOR_ | gc.TUINT16,
+ OOR_ | gc.TINT32,
+ OOR_ | gc.TUINT32,
+ OOR_ | gc.TPTR32,
+ OOR_ | gc.TINT64,
+ OOR_ | gc.TUINT64,
+ OOR_ | gc.TPTR64:
a = arm64.AORR
- case gc.OXOR<<16 | gc.TINT8,
- gc.OXOR<<16 | gc.TUINT8,
- gc.OXOR<<16 | gc.TINT16,
- gc.OXOR<<16 | gc.TUINT16,
- gc.OXOR<<16 | gc.TINT32,
- gc.OXOR<<16 | gc.TUINT32,
- gc.OXOR<<16 | gc.TPTR32,
- gc.OXOR<<16 | gc.TINT64,
- gc.OXOR<<16 | gc.TUINT64,
- gc.OXOR<<16 | gc.TPTR64:
+ case OXOR_ | gc.TINT8,
+ OXOR_ | gc.TUINT8,
+ OXOR_ | gc.TINT16,
+ OXOR_ | gc.TUINT16,
+ OXOR_ | gc.TINT32,
+ OXOR_ | gc.TUINT32,
+ OXOR_ | gc.TPTR32,
+ OXOR_ | gc.TINT64,
+ OXOR_ | gc.TUINT64,
+ OXOR_ | gc.TPTR64:
a = arm64.AEOR
// TODO(minux): handle rotates
// a = 0//???; RLDC?
// break;
- case gc.OLSH<<16 | gc.TINT8,
- gc.OLSH<<16 | gc.TUINT8,
- gc.OLSH<<16 | gc.TINT16,
- gc.OLSH<<16 | gc.TUINT16,
- gc.OLSH<<16 | gc.TINT32,
- gc.OLSH<<16 | gc.TUINT32,
- gc.OLSH<<16 | gc.TPTR32,
- gc.OLSH<<16 | gc.TINT64,
- gc.OLSH<<16 | gc.TUINT64,
- gc.OLSH<<16 | gc.TPTR64:
+ case OLSH_ | gc.TINT8,
+ OLSH_ | gc.TUINT8,
+ OLSH_ | gc.TINT16,
+ OLSH_ | gc.TUINT16,
+ OLSH_ | gc.TINT32,
+ OLSH_ | gc.TUINT32,
+ OLSH_ | gc.TPTR32,
+ OLSH_ | gc.TINT64,
+ OLSH_ | gc.TUINT64,
+ OLSH_ | gc.TPTR64:
a = arm64.ALSL
- case gc.ORSH<<16 | gc.TUINT8,
- gc.ORSH<<16 | gc.TUINT16,
- gc.ORSH<<16 | gc.TUINT32,
- gc.ORSH<<16 | gc.TPTR32,
- gc.ORSH<<16 | gc.TUINT64,
- gc.ORSH<<16 | gc.TPTR64:
+ case ORSH_ | gc.TUINT8,
+ ORSH_ | gc.TUINT16,
+ ORSH_ | gc.TUINT32,
+ ORSH_ | gc.TPTR32,
+ ORSH_ | gc.TUINT64,
+ ORSH_ | gc.TPTR64:
a = arm64.ALSR
- case gc.ORSH<<16 | gc.TINT8,
- gc.ORSH<<16 | gc.TINT16,
- gc.ORSH<<16 | gc.TINT32,
- gc.ORSH<<16 | gc.TINT64:
+ case ORSH_ | gc.TINT8,
+ ORSH_ | gc.TINT16,
+ ORSH_ | gc.TINT32,
+ ORSH_ | gc.TINT64:
a = arm64.AASR
// TODO(minux): handle rotates
// a = 0//??? RLDC??
// break;
- case gc.OHMUL<<16 | gc.TINT64:
+ case OHMUL_ | gc.TINT64:
a = arm64.ASMULH
- case gc.OHMUL<<16 | gc.TUINT64,
- gc.OHMUL<<16 | gc.TPTR64:
+ case OHMUL_ | gc.TUINT64,
+ OHMUL_ | gc.TPTR64:
a = arm64.AUMULH
- case gc.OMUL<<16 | gc.TINT8,
- gc.OMUL<<16 | gc.TINT16,
- gc.OMUL<<16 | gc.TINT32:
+ case OMUL_ | gc.TINT8,
+ OMUL_ | gc.TINT16,
+ OMUL_ | gc.TINT32:
a = arm64.ASMULL
- case gc.OMUL<<16 | gc.TINT64:
+ case OMUL_ | gc.TINT64:
a = arm64.AMUL
- case gc.OMUL<<16 | gc.TUINT8,
- gc.OMUL<<16 | gc.TUINT16,
- gc.OMUL<<16 | gc.TUINT32,
- gc.OMUL<<16 | gc.TPTR32:
+ case OMUL_ | gc.TUINT8,
+ OMUL_ | gc.TUINT16,
+ OMUL_ | gc.TUINT32,
+ OMUL_ | gc.TPTR32:
// don't use word multiply, the high 32-bit are undefined.
a = arm64.AUMULL
- case gc.OMUL<<16 | gc.TUINT64,
- gc.OMUL<<16 | gc.TPTR64:
+ case OMUL_ | gc.TUINT64,
+ OMUL_ | gc.TPTR64:
a = arm64.AMUL // for 64-bit multiplies, signedness doesn't matter.
- case gc.OMUL<<16 | gc.TFLOAT32:
+ case OMUL_ | gc.TFLOAT32:
a = arm64.AFMULS
- case gc.OMUL<<16 | gc.TFLOAT64:
+ case OMUL_ | gc.TFLOAT64:
a = arm64.AFMULD
- case gc.ODIV<<16 | gc.TINT8,
- gc.ODIV<<16 | gc.TINT16,
- gc.ODIV<<16 | gc.TINT32,
- gc.ODIV<<16 | gc.TINT64:
+ case ODIV_ | gc.TINT8,
+ ODIV_ | gc.TINT16,
+ ODIV_ | gc.TINT32,
+ ODIV_ | gc.TINT64:
a = arm64.ASDIV
- case gc.ODIV<<16 | gc.TUINT8,
- gc.ODIV<<16 | gc.TUINT16,
- gc.ODIV<<16 | gc.TUINT32,
- gc.ODIV<<16 | gc.TPTR32,
- gc.ODIV<<16 | gc.TUINT64,
- gc.ODIV<<16 | gc.TPTR64:
+ case ODIV_ | gc.TUINT8,
+ ODIV_ | gc.TUINT16,
+ ODIV_ | gc.TUINT32,
+ ODIV_ | gc.TPTR32,
+ ODIV_ | gc.TUINT64,
+ ODIV_ | gc.TPTR64:
a = arm64.AUDIV
- case gc.ODIV<<16 | gc.TFLOAT32:
+ case ODIV_ | gc.TFLOAT32:
a = arm64.AFDIVS
- case gc.ODIV<<16 | gc.TFLOAT64:
+ case ODIV_ | gc.TFLOAT64:
a = arm64.AFDIVD
- case gc.OSQRT<<16 | gc.TFLOAT64:
+ case OSQRT_ | gc.TFLOAT64:
a = arm64.AFSQRTD
}
t.Width = -2
t.Align = 0
- et := int32(t.Etype)
+ et := t.Etype
switch et {
case TFUNC, TCHAN, TMAP, TSTRING:
break
// simtype == 0 during bootstrap
default:
if Simtype[t.Etype] != 0 {
- et = int32(Simtype[t.Etype])
+ et = Simtype[t.Etype]
}
}
Fatalf("typeinit before betypeinit")
}
- for i := 0; i < NTYPE; i++ {
- Simtype[i] = uint8(i)
+ for et := EType(0); et < NTYPE; et++ {
+ Simtype[et] = et
}
Types[TPTR32] = typ(TPTR32)
Tptr = TPTR64
}
- for i := TINT8; i <= TUINT64; i++ {
- Isint[i] = true
+ for et := TINT8; et <= TUINT64; et++ {
+ Isint[et] = true
}
Isint[TINT] = true
Isint[TUINT] = true
Issigned[TINT64] = true
// initialize okfor
- for i := 0; i < NTYPE; i++ {
- if Isint[i] || i == TIDEAL {
- okforeq[i] = true
- okforcmp[i] = true
- okforarith[i] = true
- okforadd[i] = true
- okforand[i] = true
- okforconst[i] = true
- issimple[i] = true
- Minintval[i] = new(Mpint)
- Maxintval[i] = new(Mpint)
+ for et := EType(0); et < NTYPE; et++ {
+ if Isint[et] || et == TIDEAL {
+ okforeq[et] = true
+ okforcmp[et] = true
+ okforarith[et] = true
+ okforadd[et] = true
+ okforand[et] = true
+ okforconst[et] = true
+ issimple[et] = true
+ Minintval[et] = new(Mpint)
+ Maxintval[et] = new(Mpint)
}
- if Isfloat[i] {
- okforeq[i] = true
- okforcmp[i] = true
- okforadd[i] = true
- okforarith[i] = true
- okforconst[i] = true
- issimple[i] = true
- minfltval[i] = newMpflt()
- maxfltval[i] = newMpflt()
+ if Isfloat[et] {
+ okforeq[et] = true
+ okforcmp[et] = true
+ okforadd[et] = true
+ okforarith[et] = true
+ okforconst[et] = true
+ issimple[et] = true
+ minfltval[et] = newMpflt()
+ maxfltval[et] = newMpflt()
}
- if Iscomplex[i] {
- okforeq[i] = true
- okforadd[i] = true
- okforarith[i] = true
- okforconst[i] = true
- issimple[i] = true
+ if Iscomplex[et] {
+ okforeq[et] = true
+ okforadd[et] = true
+ okforarith[et] = true
+ okforconst[et] = true
+ issimple[et] = true
}
}
Types[TINTER] = typ(TINTER)
// simple aliases
- Simtype[TMAP] = uint8(Tptr)
+ Simtype[TMAP] = Tptr
- Simtype[TCHAN] = uint8(Tptr)
- Simtype[TFUNC] = uint8(Tptr)
- Simtype[TUNSAFEPTR] = uint8(Tptr)
+ Simtype[TCHAN] = Tptr
+ Simtype[TFUNC] = Tptr
+ Simtype[TUNSAFEPTR] = Tptr
// pick up the backend thearch.typedefs
- var s1 *Sym
- var etype int
- var sameas int
- var s *Sym
for i = range Thearch.Typedefs {
- s = Lookup(Thearch.Typedefs[i].Name)
- s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
+ s := Lookup(Thearch.Typedefs[i].Name)
+ s1 := Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
- etype = Thearch.Typedefs[i].Etype
- if etype < 0 || etype >= len(Types) {
+ etype := Thearch.Typedefs[i].Etype
+ if int(etype) >= len(Types) {
Fatalf("typeinit: %s bad etype", s.Name)
}
- sameas = Thearch.Typedefs[i].Sameas
- if sameas < 0 || sameas >= len(Types) {
+ sameas := Thearch.Typedefs[i].Sameas
+ if int(sameas) >= len(Types) {
Fatalf("typeinit: %s bad sameas", s.Name)
}
- Simtype[etype] = uint8(sameas)
+ Simtype[etype] = sameas
minfltval[etype] = minfltval[sameas]
maxfltval[etype] = maxfltval[sameas]
Minintval[etype] = Minintval[sameas]
return importpkg.Lookup(name)
}
-func (p *importer) newtyp(etype int) *Type {
+func (p *importer) newtyp(etype EType) *Type {
t := typ(etype)
p.typList = append(p.typList, t)
return t
}
if wb {
- if int(Simtype[res.Type.Etype]) != Tptr {
+ if Simtype[res.Type.Etype] != Tptr {
Fatalf("cgen_wb of type %v", res.Type)
}
if n.Ullman >= UINF {
goto sbop
}
- a := Thearch.Optoas(int(n.Op), nl.Type)
+ a := Thearch.Optoas(n.Op, nl.Type)
// unary
var n1 Node
Regalloc(&n1, nl.Type, res)
OXOR,
OADD,
OMUL:
- if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(int(n.Op), nl, nr, res) {
+ if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(n.Op, nl, nr, res) {
break
}
- a = Thearch.Optoas(int(n.Op), nl.Type)
+ a = Thearch.Optoas(n.Op, nl.Type)
goto sbop
// asymmetric binary
case OSUB:
- a = Thearch.Optoas(int(n.Op), nl.Type)
+ a = Thearch.Optoas(n.Op, nl.Type)
goto abop
case OHMUL:
case OMOD, ODIV:
if Isfloat[n.Type.Etype] || Thearch.Dodiv == nil {
- a = Thearch.Optoas(int(n.Op), nl.Type)
+ a = Thearch.Optoas(n.Op, nl.Type)
goto abop
}
var n1 Node
Regalloc(&n1, nl.Type, res)
Cgen(nl, &n1)
- cgen_div(int(n.Op), &n1, nr, res)
+ cgen_div(n.Op, &n1, nr, res)
Regfree(&n1)
} else {
var n2 Node
n2 = *nr
}
- cgen_div(int(n.Op), nl, &n2, res)
+ cgen_div(n.Op, nl, &n2, res)
if n2.Op != OLITERAL {
Regfree(&n2)
}
}
case OLSH, ORSH, OLROT:
- Thearch.Cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
+ Thearch.Cgen_shift(n.Op, n.Bounded, nl, nr, res)
}
return
// n.Op is one of OEQ, ONE, OLT, OGT, OLE, OGE
nl := n.Left
nr := n.Right
- a := int(n.Op)
+ op := n.Op
if !wantTrue {
if Isfloat[nr.Type.Etype] {
return
}
- a = Brcom(a)
+ op = Brcom(op)
}
wantTrue = true
// make simplest on right
if nl.Op == OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < UINF) {
- a = Brrev(a)
+ op = Brrev(op)
nl, nr = nr, nl
}
if Isslice(nl.Type) || Isinter(nl.Type) {
// front end should only leave cmp to literal nil
- if (a != OEQ && a != ONE) || nr.Op != OLITERAL {
+ if (op != OEQ && op != ONE) || nr.Op != OLITERAL {
if Isslice(nl.Type) {
Yyerror("illegal slice comparison")
} else {
Regalloc(&tmp, ptr.Type, &ptr)
Cgen(&ptr, &tmp)
Regfree(&ptr)
- bgenNonZero(&tmp, res, a == OEQ != wantTrue, likely, to)
+ bgenNonZero(&tmp, res, op == OEQ != wantTrue, likely, to)
Regfree(&tmp)
return
}
if Iscomplex[nl.Type.Etype] {
- complexbool(a, nl, nr, res, wantTrue, likely, to)
+ complexbool(op, nl, nr, res, wantTrue, likely, to)
return
}
if !nr.Addable {
nr = CgenTemp(nr)
}
- Thearch.Cmp64(nl, nr, a, likely, to)
+ Thearch.Cmp64(nl, nr, op, likely, to)
return
}
if Smallintconst(nr) && Ctxt.Arch.Thechar != '9' {
Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr)
- bins(nr.Type, res, a, likely, to)
+ bins(nr.Type, res, op, likely, to)
return
}
l, r := nl, nr
// On x86, only < and <= work right with NaN; reverse if needed
- if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (a == OGT || a == OGE) {
+ if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (op == OGT || op == OGE) {
l, r = r, l
- a = Brrev(a)
+ op = Brrev(op)
}
// Do the comparison.
switch n.Op {
case ONE:
Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, likely), to)
- Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+ Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
default:
p := Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, -likely)
- Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+ Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
Patch(p, Pc)
}
return
// On arm64 and ppc64, <= and >= mishandle NaN. Must decompose into < or > and =.
// TODO(josh): Convert a <= b to b > a instead?
case OLE, OGE:
- if a == OLE {
- a = OLT
+ if op == OLE {
+ op = OLT
} else {
- a = OGT
+ op = OGT
}
- Patch(Gbranch(Thearch.Optoas(a, nr.Type), nr.Type, likely), to)
+ Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
Patch(Gbranch(Thearch.Optoas(OEQ, nr.Type), nr.Type, likely), to)
return
}
}
// Not a special case. Insert the conditional jump or value gen.
- bins(nr.Type, res, a, likely, to)
+ bins(nr.Type, res, op, likely, to)
}
func bgenNonZero(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
// TODO: Optimize on systems that can compare to zero easily.
- a := ONE
+ var op Op = ONE
if !wantTrue {
- a = OEQ
+ op = OEQ
}
var zero Node
Nodconst(&zero, n.Type, 0)
Thearch.Gins(Thearch.Optoas(OCMP, n.Type), n, &zero)
- bins(n.Type, res, a, likely, to)
+ bins(n.Type, res, op, likely, to)
}
// bins inserts an instruction to handle the result of a compare.
// If res is non-nil, it inserts appropriate value generation instructions.
// If res is nil, it inserts a branch to to.
-func bins(typ *Type, res *Node, a, likely int, to *obj.Prog) {
- a = Thearch.Optoas(a, typ)
+func bins(typ *Type, res *Node, op Op, likely int, to *obj.Prog) {
+ a := Thearch.Optoas(op, typ)
if res != nil {
// value gen
Thearch.Ginsboolval(a, res)
// generate division according to op, one of:
// res = nl / nr
// res = nl % nr
-func cgen_div(op int, nl *Node, nr *Node, res *Node) {
+func cgen_div(op Op, nl *Node, nr *Node, res *Node) {
var w int
// TODO(rsc): arm64 needs to support the relevant instructions
// avoid constant conversions in switches below
const (
- CTINT_ = uint32(CTINT)
- CTRUNE_ = uint32(CTRUNE)
- CTFLT_ = uint32(CTFLT)
- CTCPLX_ = uint32(CTCPLX)
- CTSTR_ = uint32(CTSTR)
- CTBOOL_ = uint32(CTBOOL)
- CTNIL_ = uint32(CTNIL)
+ CTINT_ = uint32(CTINT)
+ CTRUNE_ = uint32(CTRUNE)
+ CTFLT_ = uint32(CTFLT)
+ CTCPLX_ = uint32(CTCPLX)
+ CTSTR_ = uint32(CTSTR)
+ CTBOOL_ = uint32(CTBOOL)
+ CTNIL_ = uint32(CTNIL)
+ OCONV_ = uint32(OCONV) << 16
+ OARRAYBYTESTR_ = uint32(OARRAYBYTESTR) << 16
+ OPLUS_ = uint32(OPLUS) << 16
+ OMINUS_ = uint32(OMINUS) << 16
+ OCOM_ = uint32(OCOM) << 16
+ ONOT_ = uint32(ONOT) << 16
+ OLSH_ = uint32(OLSH) << 16
+ ORSH_ = uint32(ORSH) << 16
+ OADD_ = uint32(OADD) << 16
+ OSUB_ = uint32(OSUB) << 16
+ OMUL_ = uint32(OMUL) << 16
+ ODIV_ = uint32(ODIV) << 16
+ OMOD_ = uint32(OMOD) << 16
+ OOR_ = uint32(OOR) << 16
+ OAND_ = uint32(OAND) << 16
+ OANDNOT_ = uint32(OANDNOT) << 16
+ OXOR_ = uint32(OXOR) << 16
+ OEQ_ = uint32(OEQ) << 16
+ ONE_ = uint32(ONE) << 16
+ OLT_ = uint32(OLT) << 16
+ OLE_ = uint32(OLE) << 16
+ OGE_ = uint32(OGE) << 16
+ OGT_ = uint32(OGT) << 16
+ OOROR_ = uint32(OOROR) << 16
+ OANDAND_ = uint32(OANDAND) << 16
)
nr := n.Right
}
return
- case OCONV<<16 | CTNIL_,
- OARRAYBYTESTR<<16 | CTNIL_:
+ case OCONV_ | CTNIL_,
+ OARRAYBYTESTR_ | CTNIL_:
if n.Type.Etype == TSTRING {
v = tostr(v)
nl.Type = n.Type
fallthrough
// fall through
- case OCONV<<16 | CTINT_,
- OCONV<<16 | CTRUNE_,
- OCONV<<16 | CTFLT_,
- OCONV<<16 | CTSTR_:
+ case OCONV_ | CTINT_,
+ OCONV_ | CTRUNE_,
+ OCONV_ | CTFLT_,
+ OCONV_ | CTSTR_:
convlit1(&nl, n.Type, true)
v = nl.Val()
- case OPLUS<<16 | CTINT_,
- OPLUS<<16 | CTRUNE_:
+ case OPLUS_ | CTINT_,
+ OPLUS_ | CTRUNE_:
break
- case OMINUS<<16 | CTINT_,
- OMINUS<<16 | CTRUNE_:
+ case OMINUS_ | CTINT_,
+ OMINUS_ | CTRUNE_:
mpnegfix(v.U.(*Mpint))
- case OCOM<<16 | CTINT_,
- OCOM<<16 | CTRUNE_:
+ case OCOM_ | CTINT_,
+ OCOM_ | CTRUNE_:
et := Txxx
if nl.Type != nil {
et = int(nl.Type.Etype)
mpxorfixfix(v.U.(*Mpint), &b)
- case OPLUS<<16 | CTFLT_:
+ case OPLUS_ | CTFLT_:
break
- case OMINUS<<16 | CTFLT_:
+ case OMINUS_ | CTFLT_:
mpnegflt(v.U.(*Mpflt))
- case OPLUS<<16 | CTCPLX_:
+ case OPLUS_ | CTCPLX_:
break
- case OMINUS<<16 | CTCPLX_:
+ case OMINUS_ | CTCPLX_:
mpnegflt(&v.U.(*Mpcplx).Real)
mpnegflt(&v.U.(*Mpcplx).Imag)
- case ONOT<<16 | CTBOOL_:
+ case ONOT_ | CTBOOL_:
if !v.U.(bool) {
goto settrue
}
default:
goto illegal
- case OADD<<16 | CTINT_,
- OADD<<16 | CTRUNE_:
+ case OADD_ | CTINT_,
+ OADD_ | CTRUNE_:
mpaddfixfix(v.U.(*Mpint), rv.U.(*Mpint), 0)
- case OSUB<<16 | CTINT_,
- OSUB<<16 | CTRUNE_:
+ case OSUB_ | CTINT_,
+ OSUB_ | CTRUNE_:
mpsubfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case OMUL<<16 | CTINT_,
- OMUL<<16 | CTRUNE_:
+ case OMUL_ | CTINT_,
+ OMUL_ | CTRUNE_:
mpmulfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case ODIV<<16 | CTINT_,
- ODIV<<16 | CTRUNE_:
+ case ODIV_ | CTINT_,
+ ODIV_ | CTRUNE_:
if mpcmpfixc(rv.U.(*Mpint), 0) == 0 {
Yyerror("division by zero")
mpsetovf(v.U.(*Mpint))
mpdivfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case OMOD<<16 | CTINT_,
- OMOD<<16 | CTRUNE_:
+ case OMOD_ | CTINT_,
+ OMOD_ | CTRUNE_:
if mpcmpfixc(rv.U.(*Mpint), 0) == 0 {
Yyerror("division by zero")
mpsetovf(v.U.(*Mpint))
mpmodfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case OLSH<<16 | CTINT_,
- OLSH<<16 | CTRUNE_:
+ case OLSH_ | CTINT_,
+ OLSH_ | CTRUNE_:
mplshfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case ORSH<<16 | CTINT_,
- ORSH<<16 | CTRUNE_:
+ case ORSH_ | CTINT_,
+ ORSH_ | CTRUNE_:
mprshfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case OOR<<16 | CTINT_,
- OOR<<16 | CTRUNE_:
+ case OOR_ | CTINT_,
+ OOR_ | CTRUNE_:
mporfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case OAND<<16 | CTINT_,
- OAND<<16 | CTRUNE_:
+ case OAND_ | CTINT_,
+ OAND_ | CTRUNE_:
mpandfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case OANDNOT<<16 | CTINT_,
- OANDNOT<<16 | CTRUNE_:
+ case OANDNOT_ | CTINT_,
+ OANDNOT_ | CTRUNE_:
mpandnotfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case OXOR<<16 | CTINT_,
- OXOR<<16 | CTRUNE_:
+ case OXOR_ | CTINT_,
+ OXOR_ | CTRUNE_:
mpxorfixfix(v.U.(*Mpint), rv.U.(*Mpint))
- case OADD<<16 | CTFLT_:
+ case OADD_ | CTFLT_:
mpaddfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
- case OSUB<<16 | CTFLT_:
+ case OSUB_ | CTFLT_:
mpsubfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
- case OMUL<<16 | CTFLT_:
+ case OMUL_ | CTFLT_:
mpmulfltflt(v.U.(*Mpflt), rv.U.(*Mpflt))
- case ODIV<<16 | CTFLT_:
+ case ODIV_ | CTFLT_:
if mpcmpfltc(rv.U.(*Mpflt), 0) == 0 {
Yyerror("division by zero")
Mpmovecflt(v.U.(*Mpflt), 1.0)
// The default case above would print 'ideal % ideal',
// which is not quite an ideal error.
- case OMOD<<16 | CTFLT_:
+ case OMOD_ | CTFLT_:
if n.Diag == 0 {
Yyerror("illegal constant expression: floating-point %% operation")
n.Diag = 1
return
- case OADD<<16 | CTCPLX_:
+ case OADD_ | CTCPLX_:
mpaddfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real)
mpaddfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag)
- case OSUB<<16 | CTCPLX_:
+ case OSUB_ | CTCPLX_:
mpsubfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real)
mpsubfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag)
- case OMUL<<16 | CTCPLX_:
+ case OMUL_ | CTCPLX_:
cmplxmpy(v.U.(*Mpcplx), rv.U.(*Mpcplx))
- case ODIV<<16 | CTCPLX_:
+ case ODIV_ | CTCPLX_:
if mpcmpfltc(&rv.U.(*Mpcplx).Real, 0) == 0 && mpcmpfltc(&rv.U.(*Mpcplx).Imag, 0) == 0 {
Yyerror("complex division by zero")
Mpmovecflt(&rv.U.(*Mpcplx).Real, 1.0)
cmplxdiv(v.U.(*Mpcplx), rv.U.(*Mpcplx))
- case OEQ<<16 | CTNIL_:
+ case OEQ_ | CTNIL_:
goto settrue
- case ONE<<16 | CTNIL_:
+ case ONE_ | CTNIL_:
goto setfalse
- case OEQ<<16 | CTINT_,
- OEQ<<16 | CTRUNE_:
+ case OEQ_ | CTINT_,
+ OEQ_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) == 0 {
goto settrue
}
goto setfalse
- case ONE<<16 | CTINT_,
- ONE<<16 | CTRUNE_:
+ case ONE_ | CTINT_,
+ ONE_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) != 0 {
goto settrue
}
goto setfalse
- case OLT<<16 | CTINT_,
- OLT<<16 | CTRUNE_:
+ case OLT_ | CTINT_,
+ OLT_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) < 0 {
goto settrue
}
goto setfalse
- case OLE<<16 | CTINT_,
- OLE<<16 | CTRUNE_:
+ case OLE_ | CTINT_,
+ OLE_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) <= 0 {
goto settrue
}
goto setfalse
- case OGE<<16 | CTINT_,
- OGE<<16 | CTRUNE_:
+ case OGE_ | CTINT_,
+ OGE_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) >= 0 {
goto settrue
}
goto setfalse
- case OGT<<16 | CTINT_,
- OGT<<16 | CTRUNE_:
+ case OGT_ | CTINT_,
+ OGT_ | CTRUNE_:
if Mpcmpfixfix(v.U.(*Mpint), rv.U.(*Mpint)) > 0 {
goto settrue
}
goto setfalse
- case OEQ<<16 | CTFLT_:
+ case OEQ_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) == 0 {
goto settrue
}
goto setfalse
- case ONE<<16 | CTFLT_:
+ case ONE_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) != 0 {
goto settrue
}
goto setfalse
- case OLT<<16 | CTFLT_:
+ case OLT_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) < 0 {
goto settrue
}
goto setfalse
- case OLE<<16 | CTFLT_:
+ case OLE_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) <= 0 {
goto settrue
}
goto setfalse
- case OGE<<16 | CTFLT_:
+ case OGE_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) >= 0 {
goto settrue
}
goto setfalse
- case OGT<<16 | CTFLT_:
+ case OGT_ | CTFLT_:
if mpcmpfltflt(v.U.(*Mpflt), rv.U.(*Mpflt)) > 0 {
goto settrue
}
goto setfalse
- case OEQ<<16 | CTCPLX_:
+ case OEQ_ | CTCPLX_:
if mpcmpfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real) == 0 && mpcmpfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag) == 0 {
goto settrue
}
goto setfalse
- case ONE<<16 | CTCPLX_:
+ case ONE_ | CTCPLX_:
if mpcmpfltflt(&v.U.(*Mpcplx).Real, &rv.U.(*Mpcplx).Real) != 0 || mpcmpfltflt(&v.U.(*Mpcplx).Imag, &rv.U.(*Mpcplx).Imag) != 0 {
goto settrue
}
goto setfalse
- case OEQ<<16 | CTSTR_:
+ case OEQ_ | CTSTR_:
if strlit(nl) == strlit(nr) {
goto settrue
}
goto setfalse
- case ONE<<16 | CTSTR_:
+ case ONE_ | CTSTR_:
if strlit(nl) != strlit(nr) {
goto settrue
}
goto setfalse
- case OLT<<16 | CTSTR_:
+ case OLT_ | CTSTR_:
if strlit(nl) < strlit(nr) {
goto settrue
}
goto setfalse
- case OLE<<16 | CTSTR_:
+ case OLE_ | CTSTR_:
if strlit(nl) <= strlit(nr) {
goto settrue
}
goto setfalse
- case OGE<<16 | CTSTR_:
+ case OGE_ | CTSTR_:
if strlit(nl) >= strlit(nr) {
goto settrue
}
goto setfalse
- case OGT<<16 | CTSTR_:
+ case OGT_ | CTSTR_:
if strlit(nl) > strlit(nr) {
goto settrue
}
goto setfalse
- case OOROR<<16 | CTBOOL_:
+ case OOROR_ | CTBOOL_:
if v.U.(bool) || rv.U.(bool) {
goto settrue
}
goto setfalse
- case OANDAND<<16 | CTBOOL_:
+ case OANDAND_ | CTBOOL_:
if v.U.(bool) && rv.U.(bool) {
goto settrue
}
goto setfalse
- case OEQ<<16 | CTBOOL_:
+ case OEQ_ | CTBOOL_:
if v.U.(bool) == rv.U.(bool) {
goto settrue
}
goto setfalse
- case ONE<<16 | CTBOOL_:
+ case ONE_ | CTBOOL_:
if v.U.(bool) != rv.U.(bool) {
goto settrue
}
// convert x to type et and back to int64
// for sign extension and truncation.
-func iconv(x int64, et int) int64 {
+func iconv(x int64, et EType) int64 {
switch et {
case TINT8:
x = int64(int8(x))
return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
}
-func complexbool(op int, nl, nr, res *Node, wantTrue bool, likely int, to *obj.Prog) {
+func complexbool(op Op, nl, nr, res *Node, wantTrue bool, likely int, to *obj.Prog) {
// make both sides addable in ullman order
if nr != nil {
if nl.Ullman > nr.Ullman && !nl.Addable {
// build and execute tree
// real(res) = real(nl) op real(nr)
// imag(res) = imag(nl) op imag(nr)
-func complexadd(op int, nl *Node, nr *Node, res *Node) {
+func complexadd(op Op, nl *Node, nr *Node, res *Node) {
var n1 Node
var n2 Node
var n3 Node
subnode(&n5, &n6, res)
var ra Node
- ra.Op = uint8(op)
+ ra.Op = op
ra.Left = &n1
ra.Right = &n3
ra.Type = n1.Type
Cgen(&ra, &n5)
ra = Node{}
- ra.Op = uint8(op)
+ ra.Op = op
ra.Left = &n2
ra.Right = &n4
ra.Type = n2.Type
ft := Simsimtype(f.Type)
tt := Simsimtype(t.Type)
- switch uint32(ft)<<16 | uint32(tt) {
- default:
- Fatalf("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
-
- // complex to complex move/convert.
+ // complex to complex move/convert.
// make f addable.
// also use temporary if possible stack overlap.
- case TCOMPLEX64<<16 | TCOMPLEX64,
- TCOMPLEX64<<16 | TCOMPLEX128,
- TCOMPLEX128<<16 | TCOMPLEX64,
- TCOMPLEX128<<16 | TCOMPLEX128:
+ if (ft == TCOMPLEX64 || ft == TCOMPLEX128) && (tt == TCOMPLEX64 || tt == TCOMPLEX128) {
if !f.Addable || overlap_cplx(f, t) {
var tmp Node
Tempname(&tmp, f.Type)
Cgen(&n1, &n3)
Cgen(&n2, &n4)
+ } else {
+ Fatalf("complexmove: unknown conversion: %v -> %v\n", f.Type, t.Type)
}
}
complexminus(nl, res)
case OADD, OSUB:
- complexadd(int(n.Op), nl, nr, res)
+ complexadd(n.Op, nl, nr, res)
case OMUL:
complexmul(nl, nr, res)
// import
// return the sym for ss, which should match lexical
-func importsym(s *Sym, op int) *Sym {
- if s.Def != nil && int(s.Def.Op) != op {
+func importsym(s *Sym, op Op) *Sym {
+ if s.Def != nil && s.Def.Op != op {
pkgstr := fmt.Sprintf("during import %q", importpkg.Path)
redeclare(s, pkgstr)
}
}
// Fmt "%E": etype
-func Econv(et int, flag int) string {
- if et >= 0 && et < len(etnames) && etnames[et] != "" {
+func Econv(et EType) string {
+ if int(et) < len(etnames) && etnames[et] != "" {
return etnames[et]
}
return fmt.Sprintf("E-%d", et)
if fmtmode == FDbg {
fmtmode = 0
- str := Econv(int(t.Etype), 0) + "-" + typefmt(t, flag)
+ str := Econv(t.Etype) + "-" + typefmt(t, flag)
fmtmode = FDbg
return str
}
}
if fmtmode == FExp {
- Fatalf("missing %v case during export", Econv(int(t.Etype), 0))
+ Fatalf("missing %v case during export", Econv(t.Etype))
}
// Don't know how to handle - fall back to detailed prints.
- return fmt.Sprintf("%v <%v> %v", Econv(int(t.Etype), 0), t.Sym, t.Type)
+ return fmt.Sprintf("%v <%v> %v", Econv(t.Etype), t.Sym, t.Type)
}
// Statements which may be rendered with a simplestmt as init.
-func stmtwithinit(op int) bool {
+func stmtwithinit(op Op) bool {
switch op {
case OIF, OFOR, OSWITCH:
return true
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
- simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
+ simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(n.Op)
// otherwise, print the inits as separate statements
complexinit := n.Ninit != nil && !simpleinit && (fmtmode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
- extrablock := complexinit && stmtwithinit(int(n.Op))
+ extrablock := complexinit && stmtwithinit(n.Op)
if extrablock {
f += "{"
case OASOP:
if n.Implicit {
- if n.Etype == OADD {
+ if Op(n.Etype) == OADD {
f += fmt.Sprintf("%v++", n.Left)
} else {
f += fmt.Sprintf("%v--", n.Left)
case OCMPSTR, OCMPIFACE:
var f string
f += exprfmt(n.Left, nprec)
+ // TODO(marvin): Fix Node.EType type union.
f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
f += exprfmt(n.Right, nprec+1)
return f
numPtr := 0
visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
n++
- if int(Simtype[t.Etype]) == Tptr && t != itable {
+ if Simtype[t.Etype] == Tptr && t != itable {
numPtr++
}
return n <= maxMoves && (!wb || numPtr <= 1)
ptrOffset int64
)
visitComponents(nl.Type, 0, func(t *Type, offset int64) bool {
- if wb && int(Simtype[t.Etype]) == Tptr && t != itable {
+ if wb && Simtype[t.Etype] == Tptr && t != itable {
if ptrType != nil {
Fatalf("componentgen_wb %v", Tconv(nl.Type, 0))
}
}
type Type struct {
- Etype uint8
+ Etype EType
Nointerface bool
Noalg bool
Chan uint8
T *Type
}
+type EType uint8
+
const (
Txxx = iota
type Typedef struct {
Name string
- Etype int
- Sameas int
+ Etype EType
+ Sameas EType
}
type Sig struct {
var trackpkg *Pkg // fake package for field tracking
-var Tptr int // either TPTR32 or TPTR64
+var Tptr EType // either TPTR32 or TPTR64
var myimportpath string
var errortype *Type
-var Simtype [NTYPE]uint8
+var Simtype [NTYPE]EType
var (
Isptr [NTYPE]bool
Bgen_float func(*Node, bool, int, *obj.Prog) // optional
Cgen64 func(*Node, *Node) // only on 32-bit systems
Cgenindex func(*Node, *Node, bool) *obj.Prog
- Cgen_bmul func(int, *Node, *Node, *Node) bool
+ Cgen_bmul func(Op, *Node, *Node, *Node) bool
Cgen_float func(*Node, *Node) // optional
Cgen_hmul func(*Node, *Node, *Node)
- Cgen_shift func(int, bool, *Node, *Node, *Node)
+ Cgen_shift func(Op, bool, *Node, *Node, *Node)
Clearfat func(*Node)
- Cmp64 func(*Node, *Node, int, int, *obj.Prog) // only on 32-bit systems
+ Cmp64 func(*Node, *Node, Op, int, *obj.Prog) // only on 32-bit systems
Defframe func(*obj.Prog)
- Dodiv func(int, *Node, *Node, *Node)
+ Dodiv func(Op, *Node, *Node, *Node)
Excise func(*Flow)
Expandchecks func(*obj.Prog)
Getg func(*Node)
// function calls needed during the evaluation, and on 32-bit systems
// the values are guaranteed not to be 64-bit values, so no in-memory
// temporaries are necessary.
- Ginscmp func(op int, t *Type, n1, n2 *Node, likely int) *obj.Prog
+ Ginscmp func(op Op, t *Type, n1, n2 *Node, likely int) *obj.Prog
// Ginsboolval inserts instructions to convert the result
// of a just-completed comparison to a boolean value.
FtoB func(int) uint64
BtoR func(uint64) int
BtoF func(uint64) int
- Optoas func(int, *Type) int
+ Optoas func(Op, *Type) int
Doregbits func(int) uint64
Regnames func(*int) []string
Use387 bool // should 8g use 387 FP instructions instead of sse2.
| expr LASOP expr
{
$$ = Nod(OASOP, $1, $3);
- $$.Etype = uint8($2); // rathole to pass opcode
+ $$.Etype = EType($2); // rathole to pass opcode
}
| expr_list '=' expr_list
{
{
$$ = Nod(OASOP, $1, Nodintconst(1));
$$.Implicit = true;
- $$.Etype = OADD;
+ // TODO(marvin): Fix Node.EType type union.
+ $$.Etype = EType(OADD);
}
| expr LDEC
{
$$ = Nod(OASOP, $1, Nodintconst(1));
$$.Implicit = true;
- $$.Etype = OSUB;
+ // TODO(marvin): Fix Node.EType type union.
+ $$.Etype = EType(OSUB);
}
case:
// n->left is PHEAP ONAME for stack parameter.
// compute address of actual parameter on stack.
case OPARAM:
- a.Etype = Simtype[n.Left.Type.Etype]
+ a.Etype = uint8(Simtype[n.Left.Type.Etype])
a.Width = n.Left.Type.Width
a.Offset = n.Xoffset
case ONAME:
a.Etype = 0
if n.Type != nil {
- a.Etype = Simtype[n.Type.Etype]
+ a.Etype = uint8(Simtype[n.Type.Etype])
}
a.Offset = n.Xoffset
s := n.Sym
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // ptr(nil)
}
- a.Etype = Simtype[Tptr]
+ a.Etype = uint8(Simtype[Tptr])
a.Offset += int64(Array_array)
a.Width = int64(Widthptr)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // len(nil)
}
- a.Etype = Simtype[TUINT]
+ a.Etype = uint8(Simtype[TUINT])
a.Offset += int64(Array_nel)
if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
a.Width = int64(Widthint)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // cap(nil)
}
- a.Etype = Simtype[TUINT]
+ a.Etype = uint8(Simtype[TUINT])
a.Offset += int64(Array_cap)
if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
a.Width = int64(Widthint)
if t == nil {
Fatalf("regalloc: t nil")
}
- et := int(Simtype[t.Etype])
+ et := Simtype[t.Etype]
if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
Fatalf("regalloc 64bit")
}
case ODEFER, OPROC:
switch n.Left.Op {
case OCALLFUNC, OCALLMETH:
- n.Left.Etype = n.Op
+ // TODO(marvin): Fix Node.EType type union.
+ n.Left.Etype = EType(n.Op)
}
fallthrough
// switch at the top of this function.
switch n.Op {
case OCALLFUNC, OCALLMETH:
- if n.Etype == OPROC || n.Etype == ODEFER {
+ // TODO(marvin): Fix Node.EType type union.
+ if n.Etype == EType(OPROC) || n.Etype == EType(ODEFER) {
return
}
}
}
if c1 == '=' {
- c = ODIV
+ c = int(ODIV)
goto asop
}
case ':':
c1 = getc()
if c1 == '=' {
- c = LCOLAS
+ c = int(LCOLAS)
yylval.i = int(lexlineno)
goto lx
}
case '*':
c1 = getc()
if c1 == '=' {
- c = OMUL
+ c = int(OMUL)
goto asop
}
case '%':
c1 = getc()
if c1 == '=' {
- c = OMOD
+ c = int(OMOD)
goto asop
}
case '+':
c1 = getc()
if c1 == '+' {
- c = LINC
+ c = int(LINC)
goto lx
}
if c1 == '=' {
- c = OADD
+ c = int(OADD)
goto asop
}
case '-':
c1 = getc()
if c1 == '-' {
- c = LDEC
+ c = int(LDEC)
goto lx
}
if c1 == '=' {
- c = OSUB
+ c = int(OSUB)
goto asop
}
case '>':
c1 = getc()
if c1 == '>' {
- c = LRSH
+ c = int(LRSH)
c1 = getc()
if c1 == '=' {
- c = ORSH
+ c = int(ORSH)
goto asop
}
}
if c1 == '=' {
- c = LGE
+ c = int(LGE)
goto lx
}
- c = LGT
+ c = int(LGT)
case '<':
c1 = getc()
if c1 == '<' {
- c = LLSH
+ c = int(LLSH)
c1 = getc()
if c1 == '=' {
- c = OLSH
+ c = int(OLSH)
goto asop
}
}
if c1 == '=' {
- c = LLE
+ c = int(LLE)
goto lx
}
if c1 == '-' {
- c = LCOMM
+ c = int(LCOMM)
goto lx
}
- c = LLT
+ c = int(LLT)
case '=':
c1 = getc()
if c1 == '=' {
- c = LEQ
+ c = int(LEQ)
goto lx
}
case '!':
c1 = getc()
if c1 == '=' {
- c = LNE
+ c = int(LNE)
goto lx
}
case '&':
c1 = getc()
if c1 == '&' {
- c = LANDAND
+ c = int(LANDAND)
goto lx
}
if c1 == '^' {
- c = LANDNOT
+ c = int(LANDNOT)
c1 = getc()
if c1 == '=' {
- c = OANDNOT
+ c = int(OANDNOT)
goto asop
}
}
if c1 == '=' {
- c = OAND
+ c = int(OAND)
goto asop
}
case '|':
c1 = getc()
if c1 == '|' {
- c = LOROR
+ c = int(LOROR)
goto lx
}
if c1 == '=' {
- c = OOR
+ c = int(OOR)
goto asop
}
case '^':
c1 = getc()
if c1 == '=' {
- c = OXOR
+ c = int(OXOR)
goto asop
}
var syms = []struct {
name string
lexical int
- etype int
- op int
+ etype EType
+ op Op
}{
// basic types
{"int8", LNAME, TINT8, OXXX},
s1.Lexical = uint16(lex)
if etype := s.etype; etype != Txxx {
- if etype < 0 || etype >= len(Types) {
+ if int(etype) >= len(Types) {
Fatalf("lexinit: %s bad etype", s.name)
}
s2 := Pkglookup(s.name, builtinpkg)
continue
}
+ // TODO(marvin): Fix Node.EType type union.
if etype := s.op; etype != OXXX {
s2 := Pkglookup(s.name, builtinpkg)
s2.Lexical = LNAME
s2.Def = Nod(ONAME, nil, nil)
s2.Def.Sym = s2
- s2.Def.Etype = uint8(etype)
+ s2.Def.Etype = EType(etype)
}
}
}
func lexfini() {
- var s *Sym
- var lex int
- var etype int
- var i int
-
- for i = 0; i < len(syms); i++ {
- lex = syms[i].lexical
+ for i := range syms {
+ lex := syms[i].lexical
if lex != LNAME {
continue
}
- s = Lookup(syms[i].name)
+ s := Lookup(syms[i].name)
s.Lexical = uint16(lex)
- etype = syms[i].etype
+ etype := syms[i].etype
if etype != Txxx && (etype != TANY || Debug['A'] != 0) && s.Def == nil {
s.Def = typenod(Types[etype])
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
- etype = syms[i].op
- if etype != OXXX && s.Def == nil {
+ // TODO(marvin): Fix Node.EType type union.
+ etype = EType(syms[i].op)
+ if etype != EType(OXXX) && s.Def == nil {
s.Def = Nod(ONAME, nil, nil)
s.Def.Sym = s
- s.Def.Etype = uint8(etype)
+ s.Def.Etype = etype
s.Origpkg = builtinpkg
}
}
// backend-specific builtin types (e.g. int).
- for i = range Thearch.Typedefs {
- s = Lookup(Thearch.Typedefs[i].Name)
+ for i := range Thearch.Typedefs {
+ s := Lookup(Thearch.Typedefs[i].Name)
if s.Def == nil {
s.Def = typenod(Types[Thearch.Typedefs[i].Etype])
s.Def.Name = new(Name)
// there's only so much table-driven we can handle.
// these are special cases.
- s = Lookup("byte")
-
- if s.Def == nil {
+ if s := Lookup("byte"); s.Def == nil {
s.Def = typenod(bytetype)
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
- s = Lookup("error")
- if s.Def == nil {
+ if s := Lookup("error"); s.Def == nil {
s.Def = typenod(errortype)
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
- s = Lookup("rune")
- if s.Def == nil {
+ if s := Lookup("rune"); s.Def == nil {
s.Def = typenod(runetype)
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
- s = Lookup("nil")
- if s.Def == nil {
+ if s := Lookup("nil"); s.Def == nil {
var v Val
v.U = new(NilVal)
s.Def = nodlit(v)
s.Origpkg = builtinpkg
}
- s = Lookup("iota")
- if s.Def == nil {
+ if s := Lookup("iota"); s.Def == nil {
s.Def = Nod(OIOTA, nil, nil)
s.Def.Sym = s
s.Origpkg = builtinpkg
}
- s = Lookup("true")
- if s.Def == nil {
+ if s := Lookup("true"); s.Def == nil {
s.Def = Nodbool(true)
s.Def.Sym = s
s.Def.Name = new(Name)
s.Origpkg = builtinpkg
}
- s = Lookup("false")
- if s.Def == nil {
+ if s := Lookup("false"); s.Def == nil {
s.Def = Nodbool(false)
s.Def.Sym = s
s.Def.Name = new(Name)
a.Sym = Linksym(symdata)
a.Node = symdata.Def
a.Offset = 0
- a.Etype = Simtype[TINT]
+ a.Etype = uint8(Simtype[TINT])
}
func datagostring(sval string, a *obj.Addr) {
a.Sym = Linksym(symhdr)
a.Node = symhdr.Def
a.Offset = 0
- a.Etype = TSTRING
+ a.Etype = uint8(TSTRING)
}
func dgostringptr(s *Sym, off int, str string) int {
p.From3.Offset = int64(Widthptr)
datagostring(*lit, &p.To)
p.To.Type = obj.TYPE_ADDR
- p.To.Etype = Simtype[TINT]
+ p.To.Etype = uint8(Simtype[TINT])
off += Widthptr
return off
}
func gdatacomplex(nam *Node, cval *Mpcplx) {
- w := cplxsubtype(int(nam.Type.Etype))
- w = int(Types[w].Width)
+ cst := cplxsubtype(nam.Type.Etype)
+ w := int(Types[cst].Width)
p := Thearch.Gins(obj.ADATA, nam, nil)
p.From3 = new(obj.Addr)
tmp1.Etype = 0 // now an rvalue not an lvalue
}
tmp1 = ordercopyexpr(tmp1, n.Left.Type, order, 0)
- n.Right = Nod(int(n.Etype), tmp1, n.Right)
+ // TODO(marvin): Fix Node.EType type union.
+ n.Right = Nod(Op(n.Etype), tmp1, n.Right)
typecheck(&n.Right, Erv)
orderexpr(&n.Right, order, nil)
n.Etype = 0
// another possible choice would be package main,
// but using runtime means fewer copies in .6 files.
if compiling_runtime != 0 {
- for i := 1; i <= TBOOL; i++ {
+ for i := EType(1); i <= TBOOL; i++ {
dtypesym(Ptrto(Types[i]))
}
dtypesym(Ptrto(Types[TSTRING]))
width int
id int // index in vars
name int8
- etype int8
+ etype EType
addr int8
}
if node.Sym == nil || node.Sym.Name[0] == '.' {
return zbits
}
- et := int(a.Etype)
+ et := EType(a.Etype)
o := a.Offset
w := a.Width
if w < 0 {
v = &vars[i]
if v.node == node && int(v.name) == n {
if v.offset == o {
- if int(v.etype) == et {
+ if v.etype == et {
if int64(v.width) == w {
// TODO(rsc): Remove special case for arm here.
if flag == 0 || Thearch.Thechar != '5' {
v.id = i
v.offset = o
v.name = int8(n)
- v.etype = int8(et)
+ v.etype = et
v.width = int(w)
v.addr = int8(flag) // funny punning
v.node = node
}
if Debug['R'] != 0 {
- fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(int(et), 0), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
+ fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(et), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
}
Ostats.Nvar++
r.regno = 0
switch v.etype {
default:
- Fatalf("unknown etype %d/%v", Bitno(b), Econv(int(v.etype), 0))
+ Fatalf("unknown etype %d/%v", Bitno(b), Econv(v.etype))
case TINT8,
TUINT8,
}
if Debug['R'] != 0 && Debug['v'] != 0 {
- fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(int(v.etype), 0), v.width, v.node, v.offset)
+ fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(v.etype), v.width, v.node, v.offset)
}
}
if rgp.regno != 0 {
if Debug['R'] != 0 && Debug['v'] != 0 {
v := &vars[rgp.varno]
- fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(int(v.etype), 0), obj.Rconv(int(rgp.regno)), usedreg, vreg)
+ fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(v.etype), obj.Rconv(int(rgp.regno)), usedreg, vreg)
}
paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
}
}
-func Nod(op int, nleft *Node, nright *Node) *Node {
+func Nod(op Op, nleft *Node, nright *Node) *Node {
n := new(Node)
- n.Op = uint8(op)
+ n.Op = op
n.Left = nleft
n.Right = nright
n.Lineno = int32(parserline())
if n.Orig != nil {
return
}
- norig := Nod(int(n.Op), nil, nil)
+ norig := Nod(n.Op, nil, nil)
*norig = *n
n.Orig = norig
}
if key != nil {
var bad *Type
atype := algtype1(key, &bad)
- var mtype int
+ var mtype EType
if bad == nil {
- mtype = int(key.Etype)
+ mtype = key.Etype
} else {
- mtype = int(bad.Etype)
+ mtype = bad.Etype
}
switch mtype {
default:
return t
}
-func typ(et int) *Type {
+func typ(et EType) *Type {
t := new(Type)
- t.Etype = uint8(et)
+ t.Etype = et
t.Width = BADWIDTH
t.Lineno = int(lineno)
t.Orig = t
return true
}
-func isptrto(t *Type, et int) bool {
+func isptrto(t *Type, et EType) bool {
if t == nil {
return false
}
if t == nil {
return false
}
- if int(t.Etype) != et {
+ if t.Etype != et {
return false
}
return true
}
-func Istype(t *Type, et int) bool {
- return t != nil && int(t.Etype) == et
+func Istype(t *Type, et EType) bool {
+ return t != nil && t.Etype == et
}
func Isfixedarray(t *Type) bool {
return t
}
-func cplxsubtype(et int) int {
+func cplxsubtype(et EType) EType {
switch et {
case TCOMPLEX64:
return TFLOAT32
return TFLOAT64
}
- Fatalf("cplxsubtype: %v\n", Econv(int(et), 0))
+ Fatalf("cplxsubtype: %v\n", Econv(et))
return 0
}
// Is type src assignment compatible to type dst?
// If so, return op code to use in conversion.
// If not, return 0.
-func assignop(src *Type, dst *Type, why *string) int {
+func assignop(src *Type, dst *Type, why *string) Op {
if why != nil {
*why = ""
}
// Can we convert a value of type src to a value of type dst?
// If so, return op code to use in conversion (maybe OCONVNOP).
// If not, return 0.
-func convertop(src *Type, dst *Type, why *string) int {
+func convertop(src *Type, dst *Type, why *string) Op {
if why != nil {
*why = ""
}
// Is a conversion between t1 and t2 a no-op?
func Noconv(t1 *Type, t2 *Type) bool {
- e1 := int(Simtype[t1.Etype])
- e2 := int(Simtype[t2.Etype])
+ e1 := Simtype[t1.Etype]
+ e2 := Simtype[t2.Etype]
switch e1 {
case TINT8, TUINT8:
n.Ullman = uint8(ul)
}
-func badtype(o int, tl *Type, tr *Type) {
+func badtype(op Op, tl *Type, tr *Type) {
fmt_ := ""
if tl != nil {
fmt_ += fmt.Sprintf("\n\t%v", tl)
}
s := fmt_
- Yyerror("illegal types for operand: %v%s", Oconv(int(o), 0), s)
+ Yyerror("illegal types for operand: %v%s", Oconv(int(op), 0), s)
}
// iterator to walk a structure declaration
// Brcom returns !(op).
// For example, Brcom(==) is !=.
-func Brcom(a int) int {
- switch a {
+func Brcom(op Op) Op {
+ switch op {
case OEQ:
return ONE
case ONE:
case OGE:
return OLT
}
- Fatalf("brcom: no com for %v\n", Oconv(a, 0))
- return a
+ Fatalf("brcom: no com for %v\n", Oconv(int(op), 0))
+ return op
}
// Brrev returns reverse(op).
// For example, Brrev(<) is >.
-func Brrev(a int) int {
- switch a {
+func Brrev(op Op) Op {
+ switch op {
case OEQ:
return OEQ
case ONE:
case OGE:
return OLE
}
- Fatalf("brrev: no rev for %v\n", Oconv(a, 0))
- return a
+ Fatalf("brrev: no rev for %v\n", Oconv(int(op), 0))
+ return op
}
// return side effect-free n, appending side effects to init.
// even simpler simtype; get rid of ptr, bool.
// assuming that the front end has rejected
// all the invalid conversions (like ptr -> bool)
-func Simsimtype(t *Type) int {
+func Simsimtype(t *Type) EType {
if t == nil {
return 0
}
- et := int(Simtype[t.Etype])
+ et := Simtype[t.Etype]
switch et {
case TPTR32:
et = TUINT32
Esc uint16 // EscXXX
- Op uint8
+ Op Op
Nointerface bool
Ullman uint8 // sethi/ullman number
Addable bool // addressable
- Etype uint8 // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg
+ Etype EType // op for OASOP, etype for OTYPE, exclam for export, 6g saved reg
Bounded bool // bounds check unnecessary
Class Class // PPARAM, PAUTO, PEXTERN, etc
Embedded uint8 // ODCLFIELD embedded type
Systemstack bool // must run on system stack
}
+type Op uint8
+
// Node ops.
const (
- OXXX = iota
+ OXXX = Op(iota)
// names
ONAME // var, const or func name
if Isslice(t) {
return "slice"
}
- et := int(t.Etype)
- if 0 <= et && et < len(_typekind) {
+ et := t.Etype
+ if int(et) < len(_typekind) {
s := _typekind[et]
if s != "" {
return s
}
t := typ(TCHAN)
t.Type = l.Type
- t.Chan = n.Etype
+ // TODO(marvin): Fix Node.EType type union.
+ t.Chan = uint8(n.Etype)
n.Op = OTYPE
n.Type = t
n.Left = nil
OSUB,
OXOR:
var l *Node
- var op int
+ var op Op
var r *Node
if n.Op == OASOP {
ok |= Etop
n.Type = nil
return
}
- op = int(n.Etype)
+ // TODO(marvin): Fix Node.EType type union.
+ op = Op(n.Etype)
} else {
ok |= Erv
l = typecheck(&n.Left, Erv|top&Eiota)
n.Type = nil
return
}
- op = int(n.Op)
+ op = n.Op
}
if op == OLSH || op == ORSH {
defaultlit(&r, Types[TUINT])
if t.Etype == TIDEAL {
t = r.Type
}
- et := int(t.Etype)
+ et := t.Etype
if et == TIDEAL {
et = TINT
}
- aop := 0
+ var aop Op = OXXX
if iscmp[n.Op] && t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
// comparison is okay as long as one side is
// assignable to the other. convert so they have
}
converted:
- et = int(t.Etype)
+ et = t.Etype
}
if t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
if et == TSTRING {
if iscmp[n.Op] {
- n.Etype = n.Op
+ // TODO(marvin): Fix Node.EType type union.
+ n.Etype = EType(n.Op)
n.Op = OCMPSTR
} else if n.Op == OADD {
// create OADDSTR node with list of strings in x + y + z + (w + v) + ...
} else if r.Op == OLITERAL && r.Val().Ctype() == CTNIL {
} else // leave alone for back end
if Isinter(r.Type) == Isinter(l.Type) {
- n.Etype = n.Op
+ // TODO(marvin): Fix Node.EType type union.
+ n.Etype = EType(n.Op)
n.Op = OCMPIFACE
}
}
n.Diag |= n.Left.Diag
l = n.Left
if l.Op == ONAME && l.Etype != 0 {
- if n.Isddd && l.Etype != OAPPEND {
+ // TODO(marvin): Fix Node.EType type union.
+ if n.Isddd && Op(l.Etype) != OAPPEND {
Yyerror("invalid use of ... with builtin %v", l)
}
// builtin: OLEN, OCAP, etc.
- n.Op = l.Etype
+ // TODO(marvin): Fix Node.EType type union.
+ n.Op = Op(l.Etype)
n.Left = n.Right
n.Right = nil
n.Orig = r
}
- n.Type = Types[cplxsubtype(int(t.Etype))]
+ n.Type = Types[cplxsubtype(t.Etype)]
break OpSwitch
}
return
}
var why string
- n.Op = uint8(convertop(t, n.Type, &why))
- if (n.Op) == 0 {
+ n.Op = convertop(t, n.Type, &why)
+ if n.Op == 0 {
if n.Diag == 0 && !n.Type.Broke {
Yyerror("cannot convert %v to type %v%s", Nconv(n.Left, obj.FmtLong), n.Type, why)
n.Diag = 1
}
func derefall(t *Type) *Type {
- for t != nil && int(t.Etype) == Tptr {
+ for t != nil && t.Etype == Tptr {
t = t.Type
}
return t
dowidth(tt)
rcvr := getthisx(f2.Type).Type.Type
if !Eqtype(rcvr, tt) {
- if int(rcvr.Etype) == Tptr && Eqtype(rcvr.Type, tt) {
+ if rcvr.Etype == Tptr && Eqtype(rcvr.Type, tt) {
checklvalue(n.Left, "call pointer method on")
n.Left = Nod(OADDR, n.Left, nil)
n.Left.Implicit = true
typecheck(&n.Left, Etype|Erv)
- } else if int(tt.Etype) == Tptr && int(rcvr.Etype) != Tptr && Eqtype(tt.Type, rcvr) {
+ } else if tt.Etype == Tptr && rcvr.Etype != Tptr && Eqtype(tt.Type, rcvr) {
n.Left = Nod(OIND, n.Left, nil)
n.Left.Implicit = true
typecheck(&n.Left, Etype|Erv)
- } else if int(tt.Etype) == Tptr && int(tt.Type.Etype) == Tptr && Eqtype(derefall(tt), derefall(rcvr)) {
+ } else if tt.Etype == Tptr && tt.Type.Etype == Tptr && Eqtype(derefall(tt), derefall(rcvr)) {
Yyerror("calling method %v with receiver %v requires explicit dereference", n.Right, Nconv(n.Left, obj.FmtLong))
- for int(tt.Etype) == Tptr {
+ for tt.Etype == Tptr {
// Stop one level early for method with pointer receiver.
- if int(rcvr.Etype) == Tptr && int(tt.Type.Etype) != Tptr {
+ if rcvr.Etype == Tptr && tt.Type.Etype != Tptr {
break
}
n.Left = Nod(OIND, n.Left, nil)
}
// typecheck assignment: type list = expression list
-func typecheckaste(op int, call *Node, isddd bool, tstruct *Type, nl *NodeList, desc func() string) {
+func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl *NodeList, desc func() string) {
var t *Type
var n *Node
var n1 int
}
// Save original node (including n->right)
- norig := Nod(int(n.Op), nil, nil)
+ norig := Nod(n.Op, nil, nil)
*norig = *n
if f.Op != OCALLFUNC && f.Op != OCALLMETH && f.Op != OCALLINTER {
Fatalf("expected return of call, have %v", f)
}
- n.List = concat(list1(f), ascompatet(int(n.Op), rl, &f.Type, 0, &n.Ninit))
+ n.List = concat(list1(f), ascompatet(n.Op, rl, &f.Type, 0, &n.Ninit))
break
}
// move function calls out, to make reorder3's job easier.
walkexprlistsafe(n.List, &n.Ninit)
- ll := ascompatee(int(n.Op), rl, n.List, &n.Ninit)
+ ll := ascompatee(n.Op, rl, n.List, &n.Ninit)
n.List = reorder3(ll)
break
}
- ll := ascompatte(int(n.Op), nil, false, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
+ ll := ascompatte(n.Op, nil, false, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
n.List = ll
case ORETJMP:
}
walkexpr(&n.Left, init)
walkexprlist(n.List, init)
- ll := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+ ll := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
n.List = reorder1(ll)
case OCALLFUNC:
}
}
- ll := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+ ll := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
n.List = reorder1(ll)
case OCALLMETH:
}
walkexpr(&n.Left, init)
walkexprlist(n.List, init)
- ll := ascompatte(int(n.Op), n, false, getthis(t), list1(n.Left.Left), 0, init)
- lr := ascompatte(int(n.Op), n, n.Isddd, getinarg(t), n.List, 0, init)
+ ll := ascompatte(n.Op, n, false, getthis(t), list1(n.Left.Left), 0, init)
+ lr := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
ll = concat(ll, lr)
n.Left.Left = nil
ullmancalc(n.Left)
walkexprlistsafe(n.List, init)
walkexpr(&r, init)
- ll := ascompatet(int(n.Op), n.List, &r.Type, 0, init)
+ ll := ascompatet(n.Op, n.List, &r.Type, 0, init)
for lr := ll; lr != nil; lr = lr.Next {
lr.N = applywritebarrier(lr.N, init)
}
walkexpr(&n.Right, init)
// rewrite complex div into function call.
- et := int(n.Left.Type.Etype)
+ et := n.Left.Type.Etype
if Iscomplex[et] && n.Op == ODIV {
t := n.Type
// without the function call.
case OCMPSTR:
if (Isconst(n.Left, CTSTR) && len(n.Left.Val().U.(string)) == 0) || (Isconst(n.Right, CTSTR) && len(n.Right.Val().U.(string)) == 0) {
- r := Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
+ // TODO(marvin): Fix Node.EType type union.
+ r := Nod(Op(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
typecheck(&r, Erv)
walkexpr(&r, init)
r.Type = n.Type
}
// s + "badgerbadgerbadger" == "badgerbadgerbadger"
- if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && strlit(n.Right) == strlit(n.Left.List.Next.N) {
- r := Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
+ if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && strlit(n.Right) == strlit(n.Left.List.Next.N) {
+ // TODO(marvin): Fix Node.EType type union.
+ r := Nod(Op(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
typecheck(&r, Erv)
walkexpr(&r, init)
r.Type = n.Type
}
var r *Node
- if n.Etype == OEQ || n.Etype == ONE {
+ // TODO(marvin): Fix Node.EType type union.
+ if Op(n.Etype) == OEQ || Op(n.Etype) == ONE {
// prepare for rewrite below
n.Left = cheapexpr(n.Left, init)
// quick check of len before full compare for == or !=
// eqstring assumes that the lengths are equal
- if n.Etype == OEQ {
+ // TODO(marvin): Fix Node.EType type union.
+ if Op(n.Etype) == OEQ {
// len(left) == len(right) && eqstring(left, right)
r = Nod(OANDAND, Nod(OEQ, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
} else {
// sys_cmpstring(s1, s2) :: 0
r = mkcall("cmpstring", Types[TINT], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
- r = Nod(int(n.Etype), r, Nodintconst(0))
+ // TODO(marvin): Fix Node.EType type union.
+ r = Nod(Op(n.Etype), r, Nodintconst(0))
}
typecheck(&r, Erv)
n.Left = cheapexpr(n.Left, init)
substArgTypes(fn, n.Right.Type, n.Left.Type)
r := mkcall1(fn, n.Type, init, n.Left, n.Right)
- if n.Etype == ONE {
+ // TODO(marvin): Fix Node.EType type union.
+ if Op(n.Etype) == ONE {
r = Nod(ONOT, r, nil)
}
// check itable/type before full compare.
- if n.Etype == OEQ {
+ // TODO(marvin): Fix Node.EType type union.
+ if Op(n.Etype) == OEQ {
r = Nod(OANDAND, Nod(OEQ, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
} else {
r = Nod(OOROR, Nod(ONE, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
return n
}
-func ascompatee1(op int, l *Node, r *Node, init **NodeList) *Node {
+func ascompatee1(op Op, l *Node, r *Node, init **NodeList) *Node {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
n := Nod(OAS, l, r)
return convas(n, init)
}
-func ascompatee(op int, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
+func ascompatee(op Op, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
// check assign expression list to
// a expression list. called in
// expr-list = expr-list
return true
}
-func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
+func ascompatet(op Op, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
var l *Node
var tmp *Node
var a *Node
// a type list. called in
// return expr-list
// func(expr-list)
-func ascompatte(op int, call *Node, isddd bool, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
+func ascompatte(op Op, call *Node, isddd bool, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
var savel Iter
lr0 := lr
var n *Node
var on *Node
var t *Type
- var et int
+ var et EType
- op := int(nn.Op)
+ op := nn.Op
all := nn.List
var calls *NodeList
notfirst := false
}
t = n.Type
- et = int(n.Type.Etype)
+ et = n.Type.Etype
if Isinter(n.Type) {
if isnilinter(n.Type) {
on = syslook("printeface", 1)
typecheck(&a, Etop)
*init = list(*init, a)
- andor := OANDAND
+ var andor Op = OANDAND
if n.Op == ONE {
andor = OOROR
}
for i := 0; int64(i) < t.Bound; i++ {
li = Nod(OINDEX, l, Nodintconst(int64(i)))
ri = Nod(OINDEX, r, Nodintconst(int64(i)))
- a = Nod(int(n.Op), li, ri)
+ a = Nod(n.Op, li, ri)
if expr == nil {
expr = a
} else {
}
li = Nod(OXDOT, l, newname(t1.Sym))
ri = Nod(OXDOT, r, newname(t1.Sym))
- a = Nod(int(n.Op), li, ri)
+ a = Nod(n.Op, li, ri)
if expr == nil {
expr = a
} else {
Curfn = nil
funchdr(fn)
- a = Nod(int(n.Op), nil, nil)
+ a = Nod(n.Op, nil, nil)
a.List = printargs
typecheck(&a, Etop)
walkstmt(&a)
//line go.y:489
{
yyVAL.node = Nod(OASOP, yyDollar[1].node, yyDollar[3].node)
- yyVAL.node.Etype = uint8(yyDollar[2].i) // rathole to pass opcode
+ yyVAL.node.Etype = EType(yyDollar[2].i) // rathole to pass opcode
}
case 51:
yyDollar = yyS[yypt-3 : yypt+1]
{
yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
yyVAL.node.Implicit = true
- yyVAL.node.Etype = OADD
+ yyVAL.node.Etype = EType(OADD)
}
case 54:
yyDollar = yyS[yypt-2 : yypt+1]
{
yyVAL.node = Nod(OASOP, yyDollar[1].node, Nodintconst(1))
yyVAL.node.Implicit = true
- yyVAL.node.Etype = OSUB
+ yyVAL.node.Etype = EType(OSUB)
}
case 55:
yyDollar = yyS[yypt-3 : yypt+1]
* res = nl % nr
* according to op.
*/
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will generate undefined result.
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
gc.Regfree(&ntmp)
}
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
+ // avoid constant conversions in switches below
+ const (
+ OMINUS_ = uint32(gc.OMINUS) << 16
+ OLSH_ = uint32(gc.OLSH) << 16
+ ORSH_ = uint32(gc.ORSH) << 16
+ OADD_ = uint32(gc.OADD) << 16
+ OSUB_ = uint32(gc.OSUB) << 16
+ OMUL_ = uint32(gc.OMUL) << 16
+ ODIV_ = uint32(gc.ODIV) << 16
+ OOR_ = uint32(gc.OOR) << 16
+ OAND_ = uint32(gc.OAND) << 16
+ OXOR_ = uint32(gc.OXOR) << 16
+ OEQ_ = uint32(gc.OEQ) << 16
+ ONE_ = uint32(gc.ONE) << 16
+ OLT_ = uint32(gc.OLT) << 16
+ OLE_ = uint32(gc.OLE) << 16
+ OGE_ = uint32(gc.OGE) << 16
+ OGT_ = uint32(gc.OGT) << 16
+ OCMP_ = uint32(gc.OCMP) << 16
+ OAS_ = uint32(gc.OAS) << 16
+ OHMUL_ = uint32(gc.OHMUL) << 16
+ )
+
a := int(obj.AXXX)
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
- case gc.OEQ<<16 | gc.TBOOL,
- gc.OEQ<<16 | gc.TINT8,
- gc.OEQ<<16 | gc.TUINT8,
- gc.OEQ<<16 | gc.TINT16,
- gc.OEQ<<16 | gc.TUINT16,
- gc.OEQ<<16 | gc.TINT32,
- gc.OEQ<<16 | gc.TUINT32,
- gc.OEQ<<16 | gc.TINT64,
- gc.OEQ<<16 | gc.TUINT64,
- gc.OEQ<<16 | gc.TPTR32,
- gc.OEQ<<16 | gc.TPTR64,
- gc.OEQ<<16 | gc.TFLOAT32,
- gc.OEQ<<16 | gc.TFLOAT64:
+ case OEQ_ | gc.TBOOL,
+ OEQ_ | gc.TINT8,
+ OEQ_ | gc.TUINT8,
+ OEQ_ | gc.TINT16,
+ OEQ_ | gc.TUINT16,
+ OEQ_ | gc.TINT32,
+ OEQ_ | gc.TUINT32,
+ OEQ_ | gc.TINT64,
+ OEQ_ | gc.TUINT64,
+ OEQ_ | gc.TPTR32,
+ OEQ_ | gc.TPTR64,
+ OEQ_ | gc.TFLOAT32,
+ OEQ_ | gc.TFLOAT64:
a = ppc64.ABEQ
- case gc.ONE<<16 | gc.TBOOL,
- gc.ONE<<16 | gc.TINT8,
- gc.ONE<<16 | gc.TUINT8,
- gc.ONE<<16 | gc.TINT16,
- gc.ONE<<16 | gc.TUINT16,
- gc.ONE<<16 | gc.TINT32,
- gc.ONE<<16 | gc.TUINT32,
- gc.ONE<<16 | gc.TINT64,
- gc.ONE<<16 | gc.TUINT64,
- gc.ONE<<16 | gc.TPTR32,
- gc.ONE<<16 | gc.TPTR64,
- gc.ONE<<16 | gc.TFLOAT32,
- gc.ONE<<16 | gc.TFLOAT64:
+ case ONE_ | gc.TBOOL,
+ ONE_ | gc.TINT8,
+ ONE_ | gc.TUINT8,
+ ONE_ | gc.TINT16,
+ ONE_ | gc.TUINT16,
+ ONE_ | gc.TINT32,
+ ONE_ | gc.TUINT32,
+ ONE_ | gc.TINT64,
+ ONE_ | gc.TUINT64,
+ ONE_ | gc.TPTR32,
+ ONE_ | gc.TPTR64,
+ ONE_ | gc.TFLOAT32,
+ ONE_ | gc.TFLOAT64:
a = ppc64.ABNE
- case gc.OLT<<16 | gc.TINT8, // ACMP
- gc.OLT<<16 | gc.TINT16,
- gc.OLT<<16 | gc.TINT32,
- gc.OLT<<16 | gc.TINT64,
- gc.OLT<<16 | gc.TUINT8,
+ case OLT_ | gc.TINT8, // ACMP
+ OLT_ | gc.TINT16,
+ OLT_ | gc.TINT32,
+ OLT_ | gc.TINT64,
+ OLT_ | gc.TUINT8,
// ACMPU
- gc.OLT<<16 | gc.TUINT16,
- gc.OLT<<16 | gc.TUINT32,
- gc.OLT<<16 | gc.TUINT64,
- gc.OLT<<16 | gc.TFLOAT32,
+ OLT_ | gc.TUINT16,
+ OLT_ | gc.TUINT32,
+ OLT_ | gc.TUINT64,
+ OLT_ | gc.TFLOAT32,
// AFCMPU
- gc.OLT<<16 | gc.TFLOAT64:
+ OLT_ | gc.TFLOAT64:
a = ppc64.ABLT
- case gc.OLE<<16 | gc.TINT8, // ACMP
- gc.OLE<<16 | gc.TINT16,
- gc.OLE<<16 | gc.TINT32,
- gc.OLE<<16 | gc.TINT64,
- gc.OLE<<16 | gc.TUINT8,
+ case OLE_ | gc.TINT8, // ACMP
+ OLE_ | gc.TINT16,
+ OLE_ | gc.TINT32,
+ OLE_ | gc.TINT64,
+ OLE_ | gc.TUINT8,
// ACMPU
- gc.OLE<<16 | gc.TUINT16,
- gc.OLE<<16 | gc.TUINT32,
- gc.OLE<<16 | gc.TUINT64:
+ OLE_ | gc.TUINT16,
+ OLE_ | gc.TUINT32,
+ OLE_ | gc.TUINT64:
// No OLE for floats, because it mishandles NaN.
// Front end must reverse comparison or use OLT and OEQ together.
a = ppc64.ABLE
- case gc.OGT<<16 | gc.TINT8,
- gc.OGT<<16 | gc.TINT16,
- gc.OGT<<16 | gc.TINT32,
- gc.OGT<<16 | gc.TINT64,
- gc.OGT<<16 | gc.TUINT8,
- gc.OGT<<16 | gc.TUINT16,
- gc.OGT<<16 | gc.TUINT32,
- gc.OGT<<16 | gc.TUINT64,
- gc.OGT<<16 | gc.TFLOAT32,
- gc.OGT<<16 | gc.TFLOAT64:
+ case OGT_ | gc.TINT8,
+ OGT_ | gc.TINT16,
+ OGT_ | gc.TINT32,
+ OGT_ | gc.TINT64,
+ OGT_ | gc.TUINT8,
+ OGT_ | gc.TUINT16,
+ OGT_ | gc.TUINT32,
+ OGT_ | gc.TUINT64,
+ OGT_ | gc.TFLOAT32,
+ OGT_ | gc.TFLOAT64:
a = ppc64.ABGT
- case gc.OGE<<16 | gc.TINT8,
- gc.OGE<<16 | gc.TINT16,
- gc.OGE<<16 | gc.TINT32,
- gc.OGE<<16 | gc.TINT64,
- gc.OGE<<16 | gc.TUINT8,
- gc.OGE<<16 | gc.TUINT16,
- gc.OGE<<16 | gc.TUINT32,
- gc.OGE<<16 | gc.TUINT64:
+ case OGE_ | gc.TINT8,
+ OGE_ | gc.TINT16,
+ OGE_ | gc.TINT32,
+ OGE_ | gc.TINT64,
+ OGE_ | gc.TUINT8,
+ OGE_ | gc.TUINT16,
+ OGE_ | gc.TUINT32,
+ OGE_ | gc.TUINT64:
// No OGE for floats, because it mishandles NaN.
// Front end must reverse comparison or use OLT and OEQ together.
a = ppc64.ABGE
- case gc.OCMP<<16 | gc.TBOOL,
- gc.OCMP<<16 | gc.TINT8,
- gc.OCMP<<16 | gc.TINT16,
- gc.OCMP<<16 | gc.TINT32,
- gc.OCMP<<16 | gc.TPTR32,
- gc.OCMP<<16 | gc.TINT64:
+ case OCMP_ | gc.TBOOL,
+ OCMP_ | gc.TINT8,
+ OCMP_ | gc.TINT16,
+ OCMP_ | gc.TINT32,
+ OCMP_ | gc.TPTR32,
+ OCMP_ | gc.TINT64:
a = ppc64.ACMP
- case gc.OCMP<<16 | gc.TUINT8,
- gc.OCMP<<16 | gc.TUINT16,
- gc.OCMP<<16 | gc.TUINT32,
- gc.OCMP<<16 | gc.TUINT64,
- gc.OCMP<<16 | gc.TPTR64:
+ case OCMP_ | gc.TUINT8,
+ OCMP_ | gc.TUINT16,
+ OCMP_ | gc.TUINT32,
+ OCMP_ | gc.TUINT64,
+ OCMP_ | gc.TPTR64:
a = ppc64.ACMPU
- case gc.OCMP<<16 | gc.TFLOAT32,
- gc.OCMP<<16 | gc.TFLOAT64:
+ case OCMP_ | gc.TFLOAT32,
+ OCMP_ | gc.TFLOAT64:
a = ppc64.AFCMPU
- case gc.OAS<<16 | gc.TBOOL,
- gc.OAS<<16 | gc.TINT8:
+ case OAS_ | gc.TBOOL,
+ OAS_ | gc.TINT8:
a = ppc64.AMOVB
- case gc.OAS<<16 | gc.TUINT8:
+ case OAS_ | gc.TUINT8:
a = ppc64.AMOVBZ
- case gc.OAS<<16 | gc.TINT16:
+ case OAS_ | gc.TINT16:
a = ppc64.AMOVH
- case gc.OAS<<16 | gc.TUINT16:
+ case OAS_ | gc.TUINT16:
a = ppc64.AMOVHZ
- case gc.OAS<<16 | gc.TINT32:
+ case OAS_ | gc.TINT32:
a = ppc64.AMOVW
- case gc.OAS<<16 | gc.TUINT32,
- gc.OAS<<16 | gc.TPTR32:
+ case OAS_ | gc.TUINT32,
+ OAS_ | gc.TPTR32:
a = ppc64.AMOVWZ
- case gc.OAS<<16 | gc.TINT64,
- gc.OAS<<16 | gc.TUINT64,
- gc.OAS<<16 | gc.TPTR64:
+ case OAS_ | gc.TINT64,
+ OAS_ | gc.TUINT64,
+ OAS_ | gc.TPTR64:
a = ppc64.AMOVD
- case gc.OAS<<16 | gc.TFLOAT32:
+ case OAS_ | gc.TFLOAT32:
a = ppc64.AFMOVS
- case gc.OAS<<16 | gc.TFLOAT64:
+ case OAS_ | gc.TFLOAT64:
a = ppc64.AFMOVD
- case gc.OADD<<16 | gc.TINT8,
- gc.OADD<<16 | gc.TUINT8,
- gc.OADD<<16 | gc.TINT16,
- gc.OADD<<16 | gc.TUINT16,
- gc.OADD<<16 | gc.TINT32,
- gc.OADD<<16 | gc.TUINT32,
- gc.OADD<<16 | gc.TPTR32,
- gc.OADD<<16 | gc.TINT64,
- gc.OADD<<16 | gc.TUINT64,
- gc.OADD<<16 | gc.TPTR64:
+ case OADD_ | gc.TINT8,
+ OADD_ | gc.TUINT8,
+ OADD_ | gc.TINT16,
+ OADD_ | gc.TUINT16,
+ OADD_ | gc.TINT32,
+ OADD_ | gc.TUINT32,
+ OADD_ | gc.TPTR32,
+ OADD_ | gc.TINT64,
+ OADD_ | gc.TUINT64,
+ OADD_ | gc.TPTR64:
a = ppc64.AADD
- case gc.OADD<<16 | gc.TFLOAT32:
+ case OADD_ | gc.TFLOAT32:
a = ppc64.AFADDS
- case gc.OADD<<16 | gc.TFLOAT64:
+ case OADD_ | gc.TFLOAT64:
a = ppc64.AFADD
- case gc.OSUB<<16 | gc.TINT8,
- gc.OSUB<<16 | gc.TUINT8,
- gc.OSUB<<16 | gc.TINT16,
- gc.OSUB<<16 | gc.TUINT16,
- gc.OSUB<<16 | gc.TINT32,
- gc.OSUB<<16 | gc.TUINT32,
- gc.OSUB<<16 | gc.TPTR32,
- gc.OSUB<<16 | gc.TINT64,
- gc.OSUB<<16 | gc.TUINT64,
- gc.OSUB<<16 | gc.TPTR64:
+ case OSUB_ | gc.TINT8,
+ OSUB_ | gc.TUINT8,
+ OSUB_ | gc.TINT16,
+ OSUB_ | gc.TUINT16,
+ OSUB_ | gc.TINT32,
+ OSUB_ | gc.TUINT32,
+ OSUB_ | gc.TPTR32,
+ OSUB_ | gc.TINT64,
+ OSUB_ | gc.TUINT64,
+ OSUB_ | gc.TPTR64:
a = ppc64.ASUB
- case gc.OSUB<<16 | gc.TFLOAT32:
+ case OSUB_ | gc.TFLOAT32:
a = ppc64.AFSUBS
- case gc.OSUB<<16 | gc.TFLOAT64:
+ case OSUB_ | gc.TFLOAT64:
a = ppc64.AFSUB
- case gc.OMINUS<<16 | gc.TINT8,
- gc.OMINUS<<16 | gc.TUINT8,
- gc.OMINUS<<16 | gc.TINT16,
- gc.OMINUS<<16 | gc.TUINT16,
- gc.OMINUS<<16 | gc.TINT32,
- gc.OMINUS<<16 | gc.TUINT32,
- gc.OMINUS<<16 | gc.TPTR32,
- gc.OMINUS<<16 | gc.TINT64,
- gc.OMINUS<<16 | gc.TUINT64,
- gc.OMINUS<<16 | gc.TPTR64:
+ case OMINUS_ | gc.TINT8,
+ OMINUS_ | gc.TUINT8,
+ OMINUS_ | gc.TINT16,
+ OMINUS_ | gc.TUINT16,
+ OMINUS_ | gc.TINT32,
+ OMINUS_ | gc.TUINT32,
+ OMINUS_ | gc.TPTR32,
+ OMINUS_ | gc.TINT64,
+ OMINUS_ | gc.TUINT64,
+ OMINUS_ | gc.TPTR64:
a = ppc64.ANEG
- case gc.OAND<<16 | gc.TINT8,
- gc.OAND<<16 | gc.TUINT8,
- gc.OAND<<16 | gc.TINT16,
- gc.OAND<<16 | gc.TUINT16,
- gc.OAND<<16 | gc.TINT32,
- gc.OAND<<16 | gc.TUINT32,
- gc.OAND<<16 | gc.TPTR32,
- gc.OAND<<16 | gc.TINT64,
- gc.OAND<<16 | gc.TUINT64,
- gc.OAND<<16 | gc.TPTR64:
+ case OAND_ | gc.TINT8,
+ OAND_ | gc.TUINT8,
+ OAND_ | gc.TINT16,
+ OAND_ | gc.TUINT16,
+ OAND_ | gc.TINT32,
+ OAND_ | gc.TUINT32,
+ OAND_ | gc.TPTR32,
+ OAND_ | gc.TINT64,
+ OAND_ | gc.TUINT64,
+ OAND_ | gc.TPTR64:
a = ppc64.AAND
- case gc.OOR<<16 | gc.TINT8,
- gc.OOR<<16 | gc.TUINT8,
- gc.OOR<<16 | gc.TINT16,
- gc.OOR<<16 | gc.TUINT16,
- gc.OOR<<16 | gc.TINT32,
- gc.OOR<<16 | gc.TUINT32,
- gc.OOR<<16 | gc.TPTR32,
- gc.OOR<<16 | gc.TINT64,
- gc.OOR<<16 | gc.TUINT64,
- gc.OOR<<16 | gc.TPTR64:
+ case OOR_ | gc.TINT8,
+ OOR_ | gc.TUINT8,
+ OOR_ | gc.TINT16,
+ OOR_ | gc.TUINT16,
+ OOR_ | gc.TINT32,
+ OOR_ | gc.TUINT32,
+ OOR_ | gc.TPTR32,
+ OOR_ | gc.TINT64,
+ OOR_ | gc.TUINT64,
+ OOR_ | gc.TPTR64:
a = ppc64.AOR
- case gc.OXOR<<16 | gc.TINT8,
- gc.OXOR<<16 | gc.TUINT8,
- gc.OXOR<<16 | gc.TINT16,
- gc.OXOR<<16 | gc.TUINT16,
- gc.OXOR<<16 | gc.TINT32,
- gc.OXOR<<16 | gc.TUINT32,
- gc.OXOR<<16 | gc.TPTR32,
- gc.OXOR<<16 | gc.TINT64,
- gc.OXOR<<16 | gc.TUINT64,
- gc.OXOR<<16 | gc.TPTR64:
+ case OXOR_ | gc.TINT8,
+ OXOR_ | gc.TUINT8,
+ OXOR_ | gc.TINT16,
+ OXOR_ | gc.TUINT16,
+ OXOR_ | gc.TINT32,
+ OXOR_ | gc.TUINT32,
+ OXOR_ | gc.TPTR32,
+ OXOR_ | gc.TINT64,
+ OXOR_ | gc.TUINT64,
+ OXOR_ | gc.TPTR64:
a = ppc64.AXOR
// TODO(minux): handle rotates
// a = 0//???; RLDC?
// break;
- case gc.OLSH<<16 | gc.TINT8,
- gc.OLSH<<16 | gc.TUINT8,
- gc.OLSH<<16 | gc.TINT16,
- gc.OLSH<<16 | gc.TUINT16,
- gc.OLSH<<16 | gc.TINT32,
- gc.OLSH<<16 | gc.TUINT32,
- gc.OLSH<<16 | gc.TPTR32,
- gc.OLSH<<16 | gc.TINT64,
- gc.OLSH<<16 | gc.TUINT64,
- gc.OLSH<<16 | gc.TPTR64:
+ case OLSH_ | gc.TINT8,
+ OLSH_ | gc.TUINT8,
+ OLSH_ | gc.TINT16,
+ OLSH_ | gc.TUINT16,
+ OLSH_ | gc.TINT32,
+ OLSH_ | gc.TUINT32,
+ OLSH_ | gc.TPTR32,
+ OLSH_ | gc.TINT64,
+ OLSH_ | gc.TUINT64,
+ OLSH_ | gc.TPTR64:
a = ppc64.ASLD
- case gc.ORSH<<16 | gc.TUINT8,
- gc.ORSH<<16 | gc.TUINT16,
- gc.ORSH<<16 | gc.TUINT32,
- gc.ORSH<<16 | gc.TPTR32,
- gc.ORSH<<16 | gc.TUINT64,
- gc.ORSH<<16 | gc.TPTR64:
+ case ORSH_ | gc.TUINT8,
+ ORSH_ | gc.TUINT16,
+ ORSH_ | gc.TUINT32,
+ ORSH_ | gc.TPTR32,
+ ORSH_ | gc.TUINT64,
+ ORSH_ | gc.TPTR64:
a = ppc64.ASRD
- case gc.ORSH<<16 | gc.TINT8,
- gc.ORSH<<16 | gc.TINT16,
- gc.ORSH<<16 | gc.TINT32,
- gc.ORSH<<16 | gc.TINT64:
+ case ORSH_ | gc.TINT8,
+ ORSH_ | gc.TINT16,
+ ORSH_ | gc.TINT32,
+ ORSH_ | gc.TINT64:
a = ppc64.ASRAD
// TODO(minux): handle rotates
// a = 0//??? RLDC??
// break;
- case gc.OHMUL<<16 | gc.TINT64:
+ case OHMUL_ | gc.TINT64:
a = ppc64.AMULHD
- case gc.OHMUL<<16 | gc.TUINT64,
- gc.OHMUL<<16 | gc.TPTR64:
+ case OHMUL_ | gc.TUINT64,
+ OHMUL_ | gc.TPTR64:
a = ppc64.AMULHDU
- case gc.OMUL<<16 | gc.TINT8,
- gc.OMUL<<16 | gc.TINT16,
- gc.OMUL<<16 | gc.TINT32,
- gc.OMUL<<16 | gc.TINT64:
+ case OMUL_ | gc.TINT8,
+ OMUL_ | gc.TINT16,
+ OMUL_ | gc.TINT32,
+ OMUL_ | gc.TINT64:
a = ppc64.AMULLD
- case gc.OMUL<<16 | gc.TUINT8,
- gc.OMUL<<16 | gc.TUINT16,
- gc.OMUL<<16 | gc.TUINT32,
- gc.OMUL<<16 | gc.TPTR32,
+ case OMUL_ | gc.TUINT8,
+ OMUL_ | gc.TUINT16,
+ OMUL_ | gc.TUINT32,
+ OMUL_ | gc.TPTR32,
// don't use word multiply, the high 32-bit are undefined.
- gc.OMUL<<16 | gc.TUINT64,
- gc.OMUL<<16 | gc.TPTR64:
+ OMUL_ | gc.TUINT64,
+ OMUL_ | gc.TPTR64:
// for 64-bit multiplies, signedness doesn't matter.
a = ppc64.AMULLD
- case gc.OMUL<<16 | gc.TFLOAT32:
+ case OMUL_ | gc.TFLOAT32:
a = ppc64.AFMULS
- case gc.OMUL<<16 | gc.TFLOAT64:
+ case OMUL_ | gc.TFLOAT64:
a = ppc64.AFMUL
- case gc.ODIV<<16 | gc.TINT8,
- gc.ODIV<<16 | gc.TINT16,
- gc.ODIV<<16 | gc.TINT32,
- gc.ODIV<<16 | gc.TINT64:
+ case ODIV_ | gc.TINT8,
+ ODIV_ | gc.TINT16,
+ ODIV_ | gc.TINT32,
+ ODIV_ | gc.TINT64:
a = ppc64.ADIVD
- case gc.ODIV<<16 | gc.TUINT8,
- gc.ODIV<<16 | gc.TUINT16,
- gc.ODIV<<16 | gc.TUINT32,
- gc.ODIV<<16 | gc.TPTR32,
- gc.ODIV<<16 | gc.TUINT64,
- gc.ODIV<<16 | gc.TPTR64:
+ case ODIV_ | gc.TUINT8,
+ ODIV_ | gc.TUINT16,
+ ODIV_ | gc.TUINT32,
+ ODIV_ | gc.TPTR32,
+ ODIV_ | gc.TUINT64,
+ ODIV_ | gc.TPTR64:
a = ppc64.ADIVDU
- case gc.ODIV<<16 | gc.TFLOAT32:
+ case ODIV_ | gc.TFLOAT32:
a = ppc64.AFDIVS
- case gc.ODIV<<16 | gc.TFLOAT64:
+ case ODIV_ | gc.TFLOAT64:
a = ppc64.AFDIV
}
* res = nl % nr
* according to op.
*/
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will generate undefined result.
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
gc.Regfree(&ntmp)
}
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant last.
op = gc.Brrev(op)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
+ // avoid constant conversions in switches below
+ const (
+ OMINUS_ = uint32(gc.OMINUS) << 16
+ OLSH_ = uint32(gc.OLSH) << 16
+ ORSH_ = uint32(gc.ORSH) << 16
+ OADD_ = uint32(gc.OADD) << 16
+ OSUB_ = uint32(gc.OSUB) << 16
+ OMUL_ = uint32(gc.OMUL) << 16
+ ODIV_ = uint32(gc.ODIV) << 16
+ OOR_ = uint32(gc.OOR) << 16
+ OAND_ = uint32(gc.OAND) << 16
+ OXOR_ = uint32(gc.OXOR) << 16
+ OEQ_ = uint32(gc.OEQ) << 16
+ ONE_ = uint32(gc.ONE) << 16
+ OLT_ = uint32(gc.OLT) << 16
+ OLE_ = uint32(gc.OLE) << 16
+ OGE_ = uint32(gc.OGE) << 16
+ OGT_ = uint32(gc.OGT) << 16
+ OCMP_ = uint32(gc.OCMP) << 16
+ OAS_ = uint32(gc.OAS) << 16
+ OHMUL_ = uint32(gc.OHMUL) << 16
+ )
+
a := int(obj.AXXX)
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
- case gc.OEQ<<16 | gc.TBOOL,
- gc.OEQ<<16 | gc.TINT8,
- gc.OEQ<<16 | gc.TUINT8,
- gc.OEQ<<16 | gc.TINT16,
- gc.OEQ<<16 | gc.TUINT16,
- gc.OEQ<<16 | gc.TINT32,
- gc.OEQ<<16 | gc.TUINT32,
- gc.OEQ<<16 | gc.TINT64,
- gc.OEQ<<16 | gc.TUINT64,
- gc.OEQ<<16 | gc.TPTR32,
- gc.OEQ<<16 | gc.TPTR64,
- gc.OEQ<<16 | gc.TFLOAT32,
- gc.OEQ<<16 | gc.TFLOAT64:
+ case OEQ_ | gc.TBOOL,
+ OEQ_ | gc.TINT8,
+ OEQ_ | gc.TUINT8,
+ OEQ_ | gc.TINT16,
+ OEQ_ | gc.TUINT16,
+ OEQ_ | gc.TINT32,
+ OEQ_ | gc.TUINT32,
+ OEQ_ | gc.TINT64,
+ OEQ_ | gc.TUINT64,
+ OEQ_ | gc.TPTR32,
+ OEQ_ | gc.TPTR64,
+ OEQ_ | gc.TFLOAT32,
+ OEQ_ | gc.TFLOAT64:
a = ppc64.ABEQ
- case gc.ONE<<16 | gc.TBOOL,
- gc.ONE<<16 | gc.TINT8,
- gc.ONE<<16 | gc.TUINT8,
- gc.ONE<<16 | gc.TINT16,
- gc.ONE<<16 | gc.TUINT16,
- gc.ONE<<16 | gc.TINT32,
- gc.ONE<<16 | gc.TUINT32,
- gc.ONE<<16 | gc.TINT64,
- gc.ONE<<16 | gc.TUINT64,
- gc.ONE<<16 | gc.TPTR32,
- gc.ONE<<16 | gc.TPTR64,
- gc.ONE<<16 | gc.TFLOAT32,
- gc.ONE<<16 | gc.TFLOAT64:
+ case ONE_ | gc.TBOOL,
+ ONE_ | gc.TINT8,
+ ONE_ | gc.TUINT8,
+ ONE_ | gc.TINT16,
+ ONE_ | gc.TUINT16,
+ ONE_ | gc.TINT32,
+ ONE_ | gc.TUINT32,
+ ONE_ | gc.TINT64,
+ ONE_ | gc.TUINT64,
+ ONE_ | gc.TPTR32,
+ ONE_ | gc.TPTR64,
+ ONE_ | gc.TFLOAT32,
+ ONE_ | gc.TFLOAT64:
a = ppc64.ABNE
- case gc.OLT<<16 | gc.TINT8, // ACMP
- gc.OLT<<16 | gc.TINT16,
- gc.OLT<<16 | gc.TINT32,
- gc.OLT<<16 | gc.TINT64,
- gc.OLT<<16 | gc.TUINT8,
+ case OLT_ | gc.TINT8, // ACMP
+ OLT_ | gc.TINT16,
+ OLT_ | gc.TINT32,
+ OLT_ | gc.TINT64,
+ OLT_ | gc.TUINT8,
// ACMPU
- gc.OLT<<16 | gc.TUINT16,
- gc.OLT<<16 | gc.TUINT32,
- gc.OLT<<16 | gc.TUINT64,
- gc.OLT<<16 | gc.TFLOAT32,
+ OLT_ | gc.TUINT16,
+ OLT_ | gc.TUINT32,
+ OLT_ | gc.TUINT64,
+ OLT_ | gc.TFLOAT32,
// AFCMPU
- gc.OLT<<16 | gc.TFLOAT64:
+ OLT_ | gc.TFLOAT64:
a = ppc64.ABLT
- case gc.OLE<<16 | gc.TINT8, // ACMP
- gc.OLE<<16 | gc.TINT16,
- gc.OLE<<16 | gc.TINT32,
- gc.OLE<<16 | gc.TINT64,
- gc.OLE<<16 | gc.TUINT8,
+ case OLE_ | gc.TINT8, // ACMP
+ OLE_ | gc.TINT16,
+ OLE_ | gc.TINT32,
+ OLE_ | gc.TINT64,
+ OLE_ | gc.TUINT8,
// ACMPU
- gc.OLE<<16 | gc.TUINT16,
- gc.OLE<<16 | gc.TUINT32,
- gc.OLE<<16 | gc.TUINT64:
+ OLE_ | gc.TUINT16,
+ OLE_ | gc.TUINT32,
+ OLE_ | gc.TUINT64:
// No OLE for floats, because it mishandles NaN.
// Front end must reverse comparison or use OLT and OEQ together.
a = ppc64.ABLE
- case gc.OGT<<16 | gc.TINT8,
- gc.OGT<<16 | gc.TINT16,
- gc.OGT<<16 | gc.TINT32,
- gc.OGT<<16 | gc.TINT64,
- gc.OGT<<16 | gc.TUINT8,
- gc.OGT<<16 | gc.TUINT16,
- gc.OGT<<16 | gc.TUINT32,
- gc.OGT<<16 | gc.TUINT64,
- gc.OGT<<16 | gc.TFLOAT32,
- gc.OGT<<16 | gc.TFLOAT64:
+ case OGT_ | gc.TINT8,
+ OGT_ | gc.TINT16,
+ OGT_ | gc.TINT32,
+ OGT_ | gc.TINT64,
+ OGT_ | gc.TUINT8,
+ OGT_ | gc.TUINT16,
+ OGT_ | gc.TUINT32,
+ OGT_ | gc.TUINT64,
+ OGT_ | gc.TFLOAT32,
+ OGT_ | gc.TFLOAT64:
a = ppc64.ABGT
- case gc.OGE<<16 | gc.TINT8,
- gc.OGE<<16 | gc.TINT16,
- gc.OGE<<16 | gc.TINT32,
- gc.OGE<<16 | gc.TINT64,
- gc.OGE<<16 | gc.TUINT8,
- gc.OGE<<16 | gc.TUINT16,
- gc.OGE<<16 | gc.TUINT32,
- gc.OGE<<16 | gc.TUINT64:
+ case OGE_ | gc.TINT8,
+ OGE_ | gc.TINT16,
+ OGE_ | gc.TINT32,
+ OGE_ | gc.TINT64,
+ OGE_ | gc.TUINT8,
+ OGE_ | gc.TUINT16,
+ OGE_ | gc.TUINT32,
+ OGE_ | gc.TUINT64:
// No OGE for floats, because it mishandles NaN.
// Front end must reverse comparison or use OLT and OEQ together.
a = ppc64.ABGE
- case gc.OCMP<<16 | gc.TBOOL,
- gc.OCMP<<16 | gc.TINT8,
- gc.OCMP<<16 | gc.TINT16,
- gc.OCMP<<16 | gc.TINT32,
- gc.OCMP<<16 | gc.TPTR32,
- gc.OCMP<<16 | gc.TINT64:
+ case OCMP_ | gc.TBOOL,
+ OCMP_ | gc.TINT8,
+ OCMP_ | gc.TINT16,
+ OCMP_ | gc.TINT32,
+ OCMP_ | gc.TPTR32,
+ OCMP_ | gc.TINT64:
a = ppc64.ACMP
- case gc.OCMP<<16 | gc.TUINT8,
- gc.OCMP<<16 | gc.TUINT16,
- gc.OCMP<<16 | gc.TUINT32,
- gc.OCMP<<16 | gc.TUINT64,
- gc.OCMP<<16 | gc.TPTR64:
+ case OCMP_ | gc.TUINT8,
+ OCMP_ | gc.TUINT16,
+ OCMP_ | gc.TUINT32,
+ OCMP_ | gc.TUINT64,
+ OCMP_ | gc.TPTR64:
a = ppc64.ACMPU
- case gc.OCMP<<16 | gc.TFLOAT32,
- gc.OCMP<<16 | gc.TFLOAT64:
+ case OCMP_ | gc.TFLOAT32,
+ OCMP_ | gc.TFLOAT64:
a = ppc64.AFCMPU
- case gc.OAS<<16 | gc.TBOOL,
- gc.OAS<<16 | gc.TINT8:
+ case OAS_ | gc.TBOOL,
+ OAS_ | gc.TINT8:
a = ppc64.AMOVB
- case gc.OAS<<16 | gc.TUINT8:
+ case OAS_ | gc.TUINT8:
a = ppc64.AMOVBZ
- case gc.OAS<<16 | gc.TINT16:
+ case OAS_ | gc.TINT16:
a = ppc64.AMOVH
- case gc.OAS<<16 | gc.TUINT16:
+ case OAS_ | gc.TUINT16:
a = ppc64.AMOVHZ
- case gc.OAS<<16 | gc.TINT32:
+ case OAS_ | gc.TINT32:
a = ppc64.AMOVW
- case gc.OAS<<16 | gc.TUINT32,
- gc.OAS<<16 | gc.TPTR32:
+ case OAS_ | gc.TUINT32,
+ OAS_ | gc.TPTR32:
a = ppc64.AMOVWZ
- case gc.OAS<<16 | gc.TINT64,
- gc.OAS<<16 | gc.TUINT64,
- gc.OAS<<16 | gc.TPTR64:
+ case OAS_ | gc.TINT64,
+ OAS_ | gc.TUINT64,
+ OAS_ | gc.TPTR64:
a = ppc64.AMOVD
- case gc.OAS<<16 | gc.TFLOAT32:
+ case OAS_ | gc.TFLOAT32:
a = ppc64.AFMOVS
- case gc.OAS<<16 | gc.TFLOAT64:
+ case OAS_ | gc.TFLOAT64:
a = ppc64.AFMOVD
- case gc.OADD<<16 | gc.TINT8,
- gc.OADD<<16 | gc.TUINT8,
- gc.OADD<<16 | gc.TINT16,
- gc.OADD<<16 | gc.TUINT16,
- gc.OADD<<16 | gc.TINT32,
- gc.OADD<<16 | gc.TUINT32,
- gc.OADD<<16 | gc.TPTR32,
- gc.OADD<<16 | gc.TINT64,
- gc.OADD<<16 | gc.TUINT64,
- gc.OADD<<16 | gc.TPTR64:
+ case OADD_ | gc.TINT8,
+ OADD_ | gc.TUINT8,
+ OADD_ | gc.TINT16,
+ OADD_ | gc.TUINT16,
+ OADD_ | gc.TINT32,
+ OADD_ | gc.TUINT32,
+ OADD_ | gc.TPTR32,
+ OADD_ | gc.TINT64,
+ OADD_ | gc.TUINT64,
+ OADD_ | gc.TPTR64:
a = ppc64.AADD
- case gc.OADD<<16 | gc.TFLOAT32:
+ case OADD_ | gc.TFLOAT32:
a = ppc64.AFADDS
- case gc.OADD<<16 | gc.TFLOAT64:
+ case OADD_ | gc.TFLOAT64:
a = ppc64.AFADD
- case gc.OSUB<<16 | gc.TINT8,
- gc.OSUB<<16 | gc.TUINT8,
- gc.OSUB<<16 | gc.TINT16,
- gc.OSUB<<16 | gc.TUINT16,
- gc.OSUB<<16 | gc.TINT32,
- gc.OSUB<<16 | gc.TUINT32,
- gc.OSUB<<16 | gc.TPTR32,
- gc.OSUB<<16 | gc.TINT64,
- gc.OSUB<<16 | gc.TUINT64,
- gc.OSUB<<16 | gc.TPTR64:
+ case OSUB_ | gc.TINT8,
+ OSUB_ | gc.TUINT8,
+ OSUB_ | gc.TINT16,
+ OSUB_ | gc.TUINT16,
+ OSUB_ | gc.TINT32,
+ OSUB_ | gc.TUINT32,
+ OSUB_ | gc.TPTR32,
+ OSUB_ | gc.TINT64,
+ OSUB_ | gc.TUINT64,
+ OSUB_ | gc.TPTR64:
a = ppc64.ASUB
- case gc.OSUB<<16 | gc.TFLOAT32:
+ case OSUB_ | gc.TFLOAT32:
a = ppc64.AFSUBS
- case gc.OSUB<<16 | gc.TFLOAT64:
+ case OSUB_ | gc.TFLOAT64:
a = ppc64.AFSUB
- case gc.OMINUS<<16 | gc.TINT8,
- gc.OMINUS<<16 | gc.TUINT8,
- gc.OMINUS<<16 | gc.TINT16,
- gc.OMINUS<<16 | gc.TUINT16,
- gc.OMINUS<<16 | gc.TINT32,
- gc.OMINUS<<16 | gc.TUINT32,
- gc.OMINUS<<16 | gc.TPTR32,
- gc.OMINUS<<16 | gc.TINT64,
- gc.OMINUS<<16 | gc.TUINT64,
- gc.OMINUS<<16 | gc.TPTR64:
+ case OMINUS_ | gc.TINT8,
+ OMINUS_ | gc.TUINT8,
+ OMINUS_ | gc.TINT16,
+ OMINUS_ | gc.TUINT16,
+ OMINUS_ | gc.TINT32,
+ OMINUS_ | gc.TUINT32,
+ OMINUS_ | gc.TPTR32,
+ OMINUS_ | gc.TINT64,
+ OMINUS_ | gc.TUINT64,
+ OMINUS_ | gc.TPTR64:
a = ppc64.ANEG
- case gc.OAND<<16 | gc.TINT8,
- gc.OAND<<16 | gc.TUINT8,
- gc.OAND<<16 | gc.TINT16,
- gc.OAND<<16 | gc.TUINT16,
- gc.OAND<<16 | gc.TINT32,
- gc.OAND<<16 | gc.TUINT32,
- gc.OAND<<16 | gc.TPTR32,
- gc.OAND<<16 | gc.TINT64,
- gc.OAND<<16 | gc.TUINT64,
- gc.OAND<<16 | gc.TPTR64:
+ case OAND_ | gc.TINT8,
+ OAND_ | gc.TUINT8,
+ OAND_ | gc.TINT16,
+ OAND_ | gc.TUINT16,
+ OAND_ | gc.TINT32,
+ OAND_ | gc.TUINT32,
+ OAND_ | gc.TPTR32,
+ OAND_ | gc.TINT64,
+ OAND_ | gc.TUINT64,
+ OAND_ | gc.TPTR64:
a = ppc64.AAND
- case gc.OOR<<16 | gc.TINT8,
- gc.OOR<<16 | gc.TUINT8,
- gc.OOR<<16 | gc.TINT16,
- gc.OOR<<16 | gc.TUINT16,
- gc.OOR<<16 | gc.TINT32,
- gc.OOR<<16 | gc.TUINT32,
- gc.OOR<<16 | gc.TPTR32,
- gc.OOR<<16 | gc.TINT64,
- gc.OOR<<16 | gc.TUINT64,
- gc.OOR<<16 | gc.TPTR64:
+ case OOR_ | gc.TINT8,
+ OOR_ | gc.TUINT8,
+ OOR_ | gc.TINT16,
+ OOR_ | gc.TUINT16,
+ OOR_ | gc.TINT32,
+ OOR_ | gc.TUINT32,
+ OOR_ | gc.TPTR32,
+ OOR_ | gc.TINT64,
+ OOR_ | gc.TUINT64,
+ OOR_ | gc.TPTR64:
a = ppc64.AOR
- case gc.OXOR<<16 | gc.TINT8,
- gc.OXOR<<16 | gc.TUINT8,
- gc.OXOR<<16 | gc.TINT16,
- gc.OXOR<<16 | gc.TUINT16,
- gc.OXOR<<16 | gc.TINT32,
- gc.OXOR<<16 | gc.TUINT32,
- gc.OXOR<<16 | gc.TPTR32,
- gc.OXOR<<16 | gc.TINT64,
- gc.OXOR<<16 | gc.TUINT64,
- gc.OXOR<<16 | gc.TPTR64:
+ case OXOR_ | gc.TINT8,
+ OXOR_ | gc.TUINT8,
+ OXOR_ | gc.TINT16,
+ OXOR_ | gc.TUINT16,
+ OXOR_ | gc.TINT32,
+ OXOR_ | gc.TUINT32,
+ OXOR_ | gc.TPTR32,
+ OXOR_ | gc.TINT64,
+ OXOR_ | gc.TUINT64,
+ OXOR_ | gc.TPTR64:
a = ppc64.AXOR
// TODO(minux): handle rotates
// a = 0//???; RLDC?
// break;
- case gc.OLSH<<16 | gc.TINT8,
- gc.OLSH<<16 | gc.TUINT8,
- gc.OLSH<<16 | gc.TINT16,
- gc.OLSH<<16 | gc.TUINT16,
- gc.OLSH<<16 | gc.TINT32,
- gc.OLSH<<16 | gc.TUINT32,
- gc.OLSH<<16 | gc.TPTR32,
- gc.OLSH<<16 | gc.TINT64,
- gc.OLSH<<16 | gc.TUINT64,
- gc.OLSH<<16 | gc.TPTR64:
+ case OLSH_ | gc.TINT8,
+ OLSH_ | gc.TUINT8,
+ OLSH_ | gc.TINT16,
+ OLSH_ | gc.TUINT16,
+ OLSH_ | gc.TINT32,
+ OLSH_ | gc.TUINT32,
+ OLSH_ | gc.TPTR32,
+ OLSH_ | gc.TINT64,
+ OLSH_ | gc.TUINT64,
+ OLSH_ | gc.TPTR64:
a = ppc64.ASLD
- case gc.ORSH<<16 | gc.TUINT8,
- gc.ORSH<<16 | gc.TUINT16,
- gc.ORSH<<16 | gc.TUINT32,
- gc.ORSH<<16 | gc.TPTR32,
- gc.ORSH<<16 | gc.TUINT64,
- gc.ORSH<<16 | gc.TPTR64:
+ case ORSH_ | gc.TUINT8,
+ ORSH_ | gc.TUINT16,
+ ORSH_ | gc.TUINT32,
+ ORSH_ | gc.TPTR32,
+ ORSH_ | gc.TUINT64,
+ ORSH_ | gc.TPTR64:
a = ppc64.ASRD
- case gc.ORSH<<16 | gc.TINT8,
- gc.ORSH<<16 | gc.TINT16,
- gc.ORSH<<16 | gc.TINT32,
- gc.ORSH<<16 | gc.TINT64:
+ case ORSH_ | gc.TINT8,
+ ORSH_ | gc.TINT16,
+ ORSH_ | gc.TINT32,
+ ORSH_ | gc.TINT64:
a = ppc64.ASRAD
// TODO(minux): handle rotates
// a = 0//??? RLDC??
// break;
- case gc.OHMUL<<16 | gc.TINT64:
+ case OHMUL_ | gc.TINT64:
a = ppc64.AMULHD
- case gc.OHMUL<<16 | gc.TUINT64,
- gc.OHMUL<<16 | gc.TPTR64:
+ case OHMUL_ | gc.TUINT64,
+ OHMUL_ | gc.TPTR64:
a = ppc64.AMULHDU
- case gc.OMUL<<16 | gc.TINT8,
- gc.OMUL<<16 | gc.TINT16,
- gc.OMUL<<16 | gc.TINT32,
- gc.OMUL<<16 | gc.TINT64:
+ case OMUL_ | gc.TINT8,
+ OMUL_ | gc.TINT16,
+ OMUL_ | gc.TINT32,
+ OMUL_ | gc.TINT64:
a = ppc64.AMULLD
- case gc.OMUL<<16 | gc.TUINT8,
- gc.OMUL<<16 | gc.TUINT16,
- gc.OMUL<<16 | gc.TUINT32,
- gc.OMUL<<16 | gc.TPTR32,
+ case OMUL_ | gc.TUINT8,
+ OMUL_ | gc.TUINT16,
+ OMUL_ | gc.TUINT32,
+ OMUL_ | gc.TPTR32,
// don't use word multiply, the high 32-bit are undefined.
- gc.OMUL<<16 | gc.TUINT64,
- gc.OMUL<<16 | gc.TPTR64:
+ OMUL_ | gc.TUINT64,
+ OMUL_ | gc.TPTR64:
// for 64-bit multiplies, signedness doesn't matter.
a = ppc64.AMULLD
- case gc.OMUL<<16 | gc.TFLOAT32:
+ case OMUL_ | gc.TFLOAT32:
a = ppc64.AFMULS
- case gc.OMUL<<16 | gc.TFLOAT64:
+ case OMUL_ | gc.TFLOAT64:
a = ppc64.AFMUL
- case gc.ODIV<<16 | gc.TINT8,
- gc.ODIV<<16 | gc.TINT16,
- gc.ODIV<<16 | gc.TINT32,
- gc.ODIV<<16 | gc.TINT64:
+ case ODIV_ | gc.TINT8,
+ ODIV_ | gc.TINT16,
+ ODIV_ | gc.TINT32,
+ ODIV_ | gc.TINT64:
a = ppc64.ADIVD
- case gc.ODIV<<16 | gc.TUINT8,
- gc.ODIV<<16 | gc.TUINT16,
- gc.ODIV<<16 | gc.TUINT32,
- gc.ODIV<<16 | gc.TPTR32,
- gc.ODIV<<16 | gc.TUINT64,
- gc.ODIV<<16 | gc.TPTR64:
+ case ODIV_ | gc.TUINT8,
+ ODIV_ | gc.TUINT16,
+ ODIV_ | gc.TUINT32,
+ ODIV_ | gc.TPTR32,
+ ODIV_ | gc.TUINT64,
+ ODIV_ | gc.TPTR64:
a = ppc64.ADIVDU
- case gc.ODIV<<16 | gc.TFLOAT32:
+ case ODIV_ | gc.TFLOAT32:
a = ppc64.AFDIVS
- case gc.ODIV<<16 | gc.TFLOAT64:
+ case ODIV_ | gc.TFLOAT64:
a = ppc64.AFDIV
}
gins(x86.AMOVL, &lo1, &ax)
gins(x86.AMOVL, &hi1, &dx)
- gins(optoas(int(n.Op), lo1.Type), &lo2, &ax)
- gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
+ gins(optoas(n.Op, lo1.Type), &lo2, &ax)
+ gins(optoas(n.Op, lo1.Type), &hi2, &dx)
}
if gc.Is64(r.Type) {
* generate comparison of nl, nr, both 64-bit.
* nl is memory; nr is constant or memory.
*/
-func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
+func cmp64(nl *gc.Node, nr *gc.Node, op gc.Op, likely int, to *obj.Prog) {
var lo1 gc.Node
var hi1 gc.Node
var lo2 gc.Node
* res = nl % nr
* according to op.
*/
-func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.Node) {
// Have to be careful about handling
// most negative int divided by -1 correctly.
// The hardware will trap.
* res = nl / nr
* res = nl % nr
*/
-func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_div(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if gc.Is64(nl.Type) {
gc.Fatalf("cgen_div %v", nl.Type)
}
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nl.Type.Width > 4 {
gc.Fatalf("cgen_shift %v", nl.Type)
}
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
-func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
+func cgen_bmul(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
if optoas(op, nl.Type) != x86.AIMULB {
return false
}
if nl.Ullman >= nr.Ullman {
gc.Cgen(nl, &f0)
if nr.Addable {
- gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
+ gins(foptoas(n.Op, n.Type, 0), nr, &f0)
} else {
gc.Cgen(nr, &f0)
- gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
+ gins(foptoas(n.Op, n.Type, Fpop), &f0, &f1)
}
} else {
gc.Cgen(nr, &f0)
if nl.Addable {
- gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
+ gins(foptoas(n.Op, n.Type, Frev), nl, &f0)
} else {
gc.Cgen(nl, &f0)
- gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
+ gins(foptoas(n.Op, n.Type, Frev|Fpop), &f0, &f1)
}
}
gc.Cgen(nl, &f0)
if n.Op != gc.OCONV && n.Op != gc.OPLUS {
- gins(foptoas(int(n.Op), n.Type, 0), nil, nil)
+ gins(foptoas(n.Op, n.Type, 0), nil, nil)
}
gmove(&f0, res)
return
// symmetric binary
case gc.OADD,
gc.OMUL:
- a = foptoas(int(n.Op), nl.Type, 0)
+ a = foptoas(n.Op, nl.Type, 0)
goto sbop
case gc.OSUB,
gc.OMOD,
gc.ODIV:
- a = foptoas(int(n.Op), nl.Type, 0)
+ a = foptoas(n.Op, nl.Type, 0)
goto abop
}
func bgen_float(n *gc.Node, wantTrue bool, likely int, to *obj.Prog) {
nl := n.Left
nr := n.Right
- a := int(n.Op)
+ op := n.Op
if !wantTrue {
// brcom is not valid on floats when NaN is involved.
p1 := gc.Gbranch(obj.AJMP, nil, 0)
}
if gc.Thearch.Use387 {
- a = gc.Brrev(a) // because the args are stacked
- if a == gc.OGE || a == gc.OGT {
+ op = gc.Brrev(op) // because the args are stacked
+ if op == gc.OGE || op == gc.OGT {
// only < and <= work right with NaN; reverse if needed
nl, nr = nr, nl
- a = gc.Brrev(a)
+ op = gc.Brrev(op)
}
var ax, n2, tmp gc.Node
nl = &n3
}
- if a == gc.OGE || a == gc.OGT {
- // only < and <= work right with NaN; reverse if needed
+ if op == gc.OGE || op == gc.OGT {
+ // only < and <= work right with NopN; reverse if needed
nl, nr = nr, nl
- a = gc.Brrev(a)
+ op = gc.Brrev(op)
}
gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
gc.Regfree(nr)
}
- switch a {
+ switch op {
case gc.OEQ:
// neither NE nor P
p1 := gc.Gbranch(x86.AJNE, nil, -likely)
gc.Patch(gc.Gbranch(x86.AJNE, nil, likely), to)
gc.Patch(gc.Gbranch(x86.AJPS, nil, likely), to)
default:
- gc.Patch(gc.Gbranch(optoas(a, nr.Type), nil, likely), to)
+ gc.Patch(gc.Gbranch(optoas(op, nr.Type), nil, likely), to)
}
}
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op int, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) int {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
+ // avoid constant conversions in switches below
+ const (
+ OMINUS_ = uint32(gc.OMINUS) << 16
+ OLSH_ = uint32(gc.OLSH) << 16
+ ORSH_ = uint32(gc.ORSH) << 16
+ OADD_ = uint32(gc.OADD) << 16
+ OSUB_ = uint32(gc.OSUB) << 16
+ OMUL_ = uint32(gc.OMUL) << 16
+ ODIV_ = uint32(gc.ODIV) << 16
+ OMOD_ = uint32(gc.OMOD) << 16
+ OOR_ = uint32(gc.OOR) << 16
+ OAND_ = uint32(gc.OAND) << 16
+ OXOR_ = uint32(gc.OXOR) << 16
+ OEQ_ = uint32(gc.OEQ) << 16
+ ONE_ = uint32(gc.ONE) << 16
+ OLT_ = uint32(gc.OLT) << 16
+ OLE_ = uint32(gc.OLE) << 16
+ OGE_ = uint32(gc.OGE) << 16
+ OGT_ = uint32(gc.OGT) << 16
+ OCMP_ = uint32(gc.OCMP) << 16
+ OAS_ = uint32(gc.OAS) << 16
+ OHMUL_ = uint32(gc.OHMUL) << 16
+ OADDR_ = uint32(gc.OADDR) << 16
+ OINC_ = uint32(gc.OINC) << 16
+ ODEC_ = uint32(gc.ODEC) << 16
+ OLROT_ = uint32(gc.OLROT) << 16
+ OEXTEND_ = uint32(gc.OEXTEND) << 16
+ OCOM_ = uint32(gc.OCOM) << 16
+ )
+
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
- case gc.OADDR<<16 | gc.TPTR32:
+ case OADDR_ | gc.TPTR32:
a = x86.ALEAL
- case gc.OEQ<<16 | gc.TBOOL,
- gc.OEQ<<16 | gc.TINT8,
- gc.OEQ<<16 | gc.TUINT8,
- gc.OEQ<<16 | gc.TINT16,
- gc.OEQ<<16 | gc.TUINT16,
- gc.OEQ<<16 | gc.TINT32,
- gc.OEQ<<16 | gc.TUINT32,
- gc.OEQ<<16 | gc.TINT64,
- gc.OEQ<<16 | gc.TUINT64,
- gc.OEQ<<16 | gc.TPTR32,
- gc.OEQ<<16 | gc.TPTR64,
- gc.OEQ<<16 | gc.TFLOAT32,
- gc.OEQ<<16 | gc.TFLOAT64:
+ case OEQ_ | gc.TBOOL,
+ OEQ_ | gc.TINT8,
+ OEQ_ | gc.TUINT8,
+ OEQ_ | gc.TINT16,
+ OEQ_ | gc.TUINT16,
+ OEQ_ | gc.TINT32,
+ OEQ_ | gc.TUINT32,
+ OEQ_ | gc.TINT64,
+ OEQ_ | gc.TUINT64,
+ OEQ_ | gc.TPTR32,
+ OEQ_ | gc.TPTR64,
+ OEQ_ | gc.TFLOAT32,
+ OEQ_ | gc.TFLOAT64:
a = x86.AJEQ
- case gc.ONE<<16 | gc.TBOOL,
- gc.ONE<<16 | gc.TINT8,
- gc.ONE<<16 | gc.TUINT8,
- gc.ONE<<16 | gc.TINT16,
- gc.ONE<<16 | gc.TUINT16,
- gc.ONE<<16 | gc.TINT32,
- gc.ONE<<16 | gc.TUINT32,
- gc.ONE<<16 | gc.TINT64,
- gc.ONE<<16 | gc.TUINT64,
- gc.ONE<<16 | gc.TPTR32,
- gc.ONE<<16 | gc.TPTR64,
- gc.ONE<<16 | gc.TFLOAT32,
- gc.ONE<<16 | gc.TFLOAT64:
+ case ONE_ | gc.TBOOL,
+ ONE_ | gc.TINT8,
+ ONE_ | gc.TUINT8,
+ ONE_ | gc.TINT16,
+ ONE_ | gc.TUINT16,
+ ONE_ | gc.TINT32,
+ ONE_ | gc.TUINT32,
+ ONE_ | gc.TINT64,
+ ONE_ | gc.TUINT64,
+ ONE_ | gc.TPTR32,
+ ONE_ | gc.TPTR64,
+ ONE_ | gc.TFLOAT32,
+ ONE_ | gc.TFLOAT64:
a = x86.AJNE
- case gc.OLT<<16 | gc.TINT8,
- gc.OLT<<16 | gc.TINT16,
- gc.OLT<<16 | gc.TINT32,
- gc.OLT<<16 | gc.TINT64:
+ case OLT_ | gc.TINT8,
+ OLT_ | gc.TINT16,
+ OLT_ | gc.TINT32,
+ OLT_ | gc.TINT64:
a = x86.AJLT
- case gc.OLT<<16 | gc.TUINT8,
- gc.OLT<<16 | gc.TUINT16,
- gc.OLT<<16 | gc.TUINT32,
- gc.OLT<<16 | gc.TUINT64:
+ case OLT_ | gc.TUINT8,
+ OLT_ | gc.TUINT16,
+ OLT_ | gc.TUINT32,
+ OLT_ | gc.TUINT64:
a = x86.AJCS
- case gc.OLE<<16 | gc.TINT8,
- gc.OLE<<16 | gc.TINT16,
- gc.OLE<<16 | gc.TINT32,
- gc.OLE<<16 | gc.TINT64:
+ case OLE_ | gc.TINT8,
+ OLE_ | gc.TINT16,
+ OLE_ | gc.TINT32,
+ OLE_ | gc.TINT64:
a = x86.AJLE
- case gc.OLE<<16 | gc.TUINT8,
- gc.OLE<<16 | gc.TUINT16,
- gc.OLE<<16 | gc.TUINT32,
- gc.OLE<<16 | gc.TUINT64:
+ case OLE_ | gc.TUINT8,
+ OLE_ | gc.TUINT16,
+ OLE_ | gc.TUINT32,
+ OLE_ | gc.TUINT64:
a = x86.AJLS
- case gc.OGT<<16 | gc.TINT8,
- gc.OGT<<16 | gc.TINT16,
- gc.OGT<<16 | gc.TINT32,
- gc.OGT<<16 | gc.TINT64:
+ case OGT_ | gc.TINT8,
+ OGT_ | gc.TINT16,
+ OGT_ | gc.TINT32,
+ OGT_ | gc.TINT64:
a = x86.AJGT
- case gc.OGT<<16 | gc.TUINT8,
- gc.OGT<<16 | gc.TUINT16,
- gc.OGT<<16 | gc.TUINT32,
- gc.OGT<<16 | gc.TUINT64,
- gc.OLT<<16 | gc.TFLOAT32,
- gc.OLT<<16 | gc.TFLOAT64:
+ case OGT_ | gc.TUINT8,
+ OGT_ | gc.TUINT16,
+ OGT_ | gc.TUINT32,
+ OGT_ | gc.TUINT64,
+ OLT_ | gc.TFLOAT32,
+ OLT_ | gc.TFLOAT64:
a = x86.AJHI
- case gc.OGE<<16 | gc.TINT8,
- gc.OGE<<16 | gc.TINT16,
- gc.OGE<<16 | gc.TINT32,
- gc.OGE<<16 | gc.TINT64:
+ case OGE_ | gc.TINT8,
+ OGE_ | gc.TINT16,
+ OGE_ | gc.TINT32,
+ OGE_ | gc.TINT64:
a = x86.AJGE
- case gc.OGE<<16 | gc.TUINT8,
- gc.OGE<<16 | gc.TUINT16,
- gc.OGE<<16 | gc.TUINT32,
- gc.OGE<<16 | gc.TUINT64,
- gc.OLE<<16 | gc.TFLOAT32,
- gc.OLE<<16 | gc.TFLOAT64:
+ case OGE_ | gc.TUINT8,
+ OGE_ | gc.TUINT16,
+ OGE_ | gc.TUINT32,
+ OGE_ | gc.TUINT64,
+ OLE_ | gc.TFLOAT32,
+ OLE_ | gc.TFLOAT64:
a = x86.AJCC
- case gc.OCMP<<16 | gc.TBOOL,
- gc.OCMP<<16 | gc.TINT8,
- gc.OCMP<<16 | gc.TUINT8:
+ case OCMP_ | gc.TBOOL,
+ OCMP_ | gc.TINT8,
+ OCMP_ | gc.TUINT8:
a = x86.ACMPB
- case gc.OCMP<<16 | gc.TINT16,
- gc.OCMP<<16 | gc.TUINT16:
+ case OCMP_ | gc.TINT16,
+ OCMP_ | gc.TUINT16:
a = x86.ACMPW
- case gc.OCMP<<16 | gc.TINT32,
- gc.OCMP<<16 | gc.TUINT32,
- gc.OCMP<<16 | gc.TPTR32:
+ case OCMP_ | gc.TINT32,
+ OCMP_ | gc.TUINT32,
+ OCMP_ | gc.TPTR32:
a = x86.ACMPL
- case gc.OAS<<16 | gc.TBOOL,
- gc.OAS<<16 | gc.TINT8,
- gc.OAS<<16 | gc.TUINT8:
+ case OAS_ | gc.TBOOL,
+ OAS_ | gc.TINT8,
+ OAS_ | gc.TUINT8:
a = x86.AMOVB
- case gc.OAS<<16 | gc.TINT16,
- gc.OAS<<16 | gc.TUINT16:
+ case OAS_ | gc.TINT16,
+ OAS_ | gc.TUINT16:
a = x86.AMOVW
- case gc.OAS<<16 | gc.TINT32,
- gc.OAS<<16 | gc.TUINT32,
- gc.OAS<<16 | gc.TPTR32:
+ case OAS_ | gc.TINT32,
+ OAS_ | gc.TUINT32,
+ OAS_ | gc.TPTR32:
a = x86.AMOVL
- case gc.OAS<<16 | gc.TFLOAT32:
+ case OAS_ | gc.TFLOAT32:
a = x86.AMOVSS
- case gc.OAS<<16 | gc.TFLOAT64:
+ case OAS_ | gc.TFLOAT64:
a = x86.AMOVSD
- case gc.OADD<<16 | gc.TINT8,
- gc.OADD<<16 | gc.TUINT8:
+ case OADD_ | gc.TINT8,
+ OADD_ | gc.TUINT8:
a = x86.AADDB
- case gc.OADD<<16 | gc.TINT16,
- gc.OADD<<16 | gc.TUINT16:
+ case OADD_ | gc.TINT16,
+ OADD_ | gc.TUINT16:
a = x86.AADDW
- case gc.OADD<<16 | gc.TINT32,
- gc.OADD<<16 | gc.TUINT32,
- gc.OADD<<16 | gc.TPTR32:
+ case OADD_ | gc.TINT32,
+ OADD_ | gc.TUINT32,
+ OADD_ | gc.TPTR32:
a = x86.AADDL
- case gc.OSUB<<16 | gc.TINT8,
- gc.OSUB<<16 | gc.TUINT8:
+ case OSUB_ | gc.TINT8,
+ OSUB_ | gc.TUINT8:
a = x86.ASUBB
- case gc.OSUB<<16 | gc.TINT16,
- gc.OSUB<<16 | gc.TUINT16:
+ case OSUB_ | gc.TINT16,
+ OSUB_ | gc.TUINT16:
a = x86.ASUBW
- case gc.OSUB<<16 | gc.TINT32,
- gc.OSUB<<16 | gc.TUINT32,
- gc.OSUB<<16 | gc.TPTR32:
+ case OSUB_ | gc.TINT32,
+ OSUB_ | gc.TUINT32,
+ OSUB_ | gc.TPTR32:
a = x86.ASUBL
- case gc.OINC<<16 | gc.TINT8,
- gc.OINC<<16 | gc.TUINT8:
+ case OINC_ | gc.TINT8,
+ OINC_ | gc.TUINT8:
a = x86.AINCB
- case gc.OINC<<16 | gc.TINT16,
- gc.OINC<<16 | gc.TUINT16:
+ case OINC_ | gc.TINT16,
+ OINC_ | gc.TUINT16:
a = x86.AINCW
- case gc.OINC<<16 | gc.TINT32,
- gc.OINC<<16 | gc.TUINT32,
- gc.OINC<<16 | gc.TPTR32:
+ case OINC_ | gc.TINT32,
+ OINC_ | gc.TUINT32,
+ OINC_ | gc.TPTR32:
a = x86.AINCL
- case gc.ODEC<<16 | gc.TINT8,
- gc.ODEC<<16 | gc.TUINT8:
+ case ODEC_ | gc.TINT8,
+ ODEC_ | gc.TUINT8:
a = x86.ADECB
- case gc.ODEC<<16 | gc.TINT16,
- gc.ODEC<<16 | gc.TUINT16:
+ case ODEC_ | gc.TINT16,
+ ODEC_ | gc.TUINT16:
a = x86.ADECW
- case gc.ODEC<<16 | gc.TINT32,
- gc.ODEC<<16 | gc.TUINT32,
- gc.ODEC<<16 | gc.TPTR32:
+ case ODEC_ | gc.TINT32,
+ ODEC_ | gc.TUINT32,
+ ODEC_ | gc.TPTR32:
a = x86.ADECL
- case gc.OCOM<<16 | gc.TINT8,
- gc.OCOM<<16 | gc.TUINT8:
+ case OCOM_ | gc.TINT8,
+ OCOM_ | gc.TUINT8:
a = x86.ANOTB
- case gc.OCOM<<16 | gc.TINT16,
- gc.OCOM<<16 | gc.TUINT16:
+ case OCOM_ | gc.TINT16,
+ OCOM_ | gc.TUINT16:
a = x86.ANOTW
- case gc.OCOM<<16 | gc.TINT32,
- gc.OCOM<<16 | gc.TUINT32,
- gc.OCOM<<16 | gc.TPTR32:
+ case OCOM_ | gc.TINT32,
+ OCOM_ | gc.TUINT32,
+ OCOM_ | gc.TPTR32:
a = x86.ANOTL
- case gc.OMINUS<<16 | gc.TINT8,
- gc.OMINUS<<16 | gc.TUINT8:
+ case OMINUS_ | gc.TINT8,
+ OMINUS_ | gc.TUINT8:
a = x86.ANEGB
- case gc.OMINUS<<16 | gc.TINT16,
- gc.OMINUS<<16 | gc.TUINT16:
+ case OMINUS_ | gc.TINT16,
+ OMINUS_ | gc.TUINT16:
a = x86.ANEGW
- case gc.OMINUS<<16 | gc.TINT32,
- gc.OMINUS<<16 | gc.TUINT32,
- gc.OMINUS<<16 | gc.TPTR32:
+ case OMINUS_ | gc.TINT32,
+ OMINUS_ | gc.TUINT32,
+ OMINUS_ | gc.TPTR32:
a = x86.ANEGL
- case gc.OAND<<16 | gc.TINT8,
- gc.OAND<<16 | gc.TUINT8:
+ case OAND_ | gc.TINT8,
+ OAND_ | gc.TUINT8:
a = x86.AANDB
- case gc.OAND<<16 | gc.TINT16,
- gc.OAND<<16 | gc.TUINT16:
+ case OAND_ | gc.TINT16,
+ OAND_ | gc.TUINT16:
a = x86.AANDW
- case gc.OAND<<16 | gc.TINT32,
- gc.OAND<<16 | gc.TUINT32,
- gc.OAND<<16 | gc.TPTR32:
+ case OAND_ | gc.TINT32,
+ OAND_ | gc.TUINT32,
+ OAND_ | gc.TPTR32:
a = x86.AANDL
- case gc.OOR<<16 | gc.TINT8,
- gc.OOR<<16 | gc.TUINT8:
+ case OOR_ | gc.TINT8,
+ OOR_ | gc.TUINT8:
a = x86.AORB
- case gc.OOR<<16 | gc.TINT16,
- gc.OOR<<16 | gc.TUINT16:
+ case OOR_ | gc.TINT16,
+ OOR_ | gc.TUINT16:
a = x86.AORW
- case gc.OOR<<16 | gc.TINT32,
- gc.OOR<<16 | gc.TUINT32,
- gc.OOR<<16 | gc.TPTR32:
+ case OOR_ | gc.TINT32,
+ OOR_ | gc.TUINT32,
+ OOR_ | gc.TPTR32:
a = x86.AORL
- case gc.OXOR<<16 | gc.TINT8,
- gc.OXOR<<16 | gc.TUINT8:
+ case OXOR_ | gc.TINT8,
+ OXOR_ | gc.TUINT8:
a = x86.AXORB
- case gc.OXOR<<16 | gc.TINT16,
- gc.OXOR<<16 | gc.TUINT16:
+ case OXOR_ | gc.TINT16,
+ OXOR_ | gc.TUINT16:
a = x86.AXORW
- case gc.OXOR<<16 | gc.TINT32,
- gc.OXOR<<16 | gc.TUINT32,
- gc.OXOR<<16 | gc.TPTR32:
+ case OXOR_ | gc.TINT32,
+ OXOR_ | gc.TUINT32,
+ OXOR_ | gc.TPTR32:
a = x86.AXORL
- case gc.OLROT<<16 | gc.TINT8,
- gc.OLROT<<16 | gc.TUINT8:
+ case OLROT_ | gc.TINT8,
+ OLROT_ | gc.TUINT8:
a = x86.AROLB
- case gc.OLROT<<16 | gc.TINT16,
- gc.OLROT<<16 | gc.TUINT16:
+ case OLROT_ | gc.TINT16,
+ OLROT_ | gc.TUINT16:
a = x86.AROLW
- case gc.OLROT<<16 | gc.TINT32,
- gc.OLROT<<16 | gc.TUINT32,
- gc.OLROT<<16 | gc.TPTR32:
+ case OLROT_ | gc.TINT32,
+ OLROT_ | gc.TUINT32,
+ OLROT_ | gc.TPTR32:
a = x86.AROLL
- case gc.OLSH<<16 | gc.TINT8,
- gc.OLSH<<16 | gc.TUINT8:
+ case OLSH_ | gc.TINT8,
+ OLSH_ | gc.TUINT8:
a = x86.ASHLB
- case gc.OLSH<<16 | gc.TINT16,
- gc.OLSH<<16 | gc.TUINT16:
+ case OLSH_ | gc.TINT16,
+ OLSH_ | gc.TUINT16:
a = x86.ASHLW
- case gc.OLSH<<16 | gc.TINT32,
- gc.OLSH<<16 | gc.TUINT32,
- gc.OLSH<<16 | gc.TPTR32:
+ case OLSH_ | gc.TINT32,
+ OLSH_ | gc.TUINT32,
+ OLSH_ | gc.TPTR32:
a = x86.ASHLL
- case gc.ORSH<<16 | gc.TUINT8:
+ case ORSH_ | gc.TUINT8:
a = x86.ASHRB
- case gc.ORSH<<16 | gc.TUINT16:
+ case ORSH_ | gc.TUINT16:
a = x86.ASHRW
- case gc.ORSH<<16 | gc.TUINT32,
- gc.ORSH<<16 | gc.TPTR32:
+ case ORSH_ | gc.TUINT32,
+ ORSH_ | gc.TPTR32:
a = x86.ASHRL
- case gc.ORSH<<16 | gc.TINT8:
+ case ORSH_ | gc.TINT8:
a = x86.ASARB
- case gc.ORSH<<16 | gc.TINT16:
+ case ORSH_ | gc.TINT16:
a = x86.ASARW
- case gc.ORSH<<16 | gc.TINT32:
+ case ORSH_ | gc.TINT32:
a = x86.ASARL
- case gc.OHMUL<<16 | gc.TINT8,
- gc.OMUL<<16 | gc.TINT8,
- gc.OMUL<<16 | gc.TUINT8:
+ case OHMUL_ | gc.TINT8,
+ OMUL_ | gc.TINT8,
+ OMUL_ | gc.TUINT8:
a = x86.AIMULB
- case gc.OHMUL<<16 | gc.TINT16,
- gc.OMUL<<16 | gc.TINT16,
- gc.OMUL<<16 | gc.TUINT16:
+ case OHMUL_ | gc.TINT16,
+ OMUL_ | gc.TINT16,
+ OMUL_ | gc.TUINT16:
a = x86.AIMULW
- case gc.OHMUL<<16 | gc.TINT32,
- gc.OMUL<<16 | gc.TINT32,
- gc.OMUL<<16 | gc.TUINT32,
- gc.OMUL<<16 | gc.TPTR32:
+ case OHMUL_ | gc.TINT32,
+ OMUL_ | gc.TINT32,
+ OMUL_ | gc.TUINT32,
+ OMUL_ | gc.TPTR32:
a = x86.AIMULL
- case gc.OHMUL<<16 | gc.TUINT8:
+ case OHMUL_ | gc.TUINT8:
a = x86.AMULB
- case gc.OHMUL<<16 | gc.TUINT16:
+ case OHMUL_ | gc.TUINT16:
a = x86.AMULW
- case gc.OHMUL<<16 | gc.TUINT32,
- gc.OHMUL<<16 | gc.TPTR32:
+ case OHMUL_ | gc.TUINT32,
+ OHMUL_ | gc.TPTR32:
a = x86.AMULL
- case gc.ODIV<<16 | gc.TINT8,
- gc.OMOD<<16 | gc.TINT8:
+ case ODIV_ | gc.TINT8,
+ OMOD_ | gc.TINT8:
a = x86.AIDIVB
- case gc.ODIV<<16 | gc.TUINT8,
- gc.OMOD<<16 | gc.TUINT8:
+ case ODIV_ | gc.TUINT8,
+ OMOD_ | gc.TUINT8:
a = x86.ADIVB
- case gc.ODIV<<16 | gc.TINT16,
- gc.OMOD<<16 | gc.TINT16:
+ case ODIV_ | gc.TINT16,
+ OMOD_ | gc.TINT16:
a = x86.AIDIVW
- case gc.ODIV<<16 | gc.TUINT16,
- gc.OMOD<<16 | gc.TUINT16:
+ case ODIV_ | gc.TUINT16,
+ OMOD_ | gc.TUINT16:
a = x86.ADIVW
- case gc.ODIV<<16 | gc.TINT32,
- gc.OMOD<<16 | gc.TINT32:
+ case ODIV_ | gc.TINT32,
+ OMOD_ | gc.TINT32:
a = x86.AIDIVL
- case gc.ODIV<<16 | gc.TUINT32,
- gc.ODIV<<16 | gc.TPTR32,
- gc.OMOD<<16 | gc.TUINT32,
- gc.OMOD<<16 | gc.TPTR32:
+ case ODIV_ | gc.TUINT32,
+ ODIV_ | gc.TPTR32,
+ OMOD_ | gc.TUINT32,
+ OMOD_ | gc.TPTR32:
a = x86.ADIVL
- case gc.OEXTEND<<16 | gc.TINT16:
+ case OEXTEND_ | gc.TINT16:
a = x86.ACWD
- case gc.OEXTEND<<16 | gc.TINT32:
+ case OEXTEND_ | gc.TINT32:
a = x86.ACDQ
}
return a
}
-func foptoas(op int, t *gc.Type, flg int) int {
+func foptoas(op gc.Op, t *gc.Type, flg int) int {
a := obj.AXXX
- et := int(gc.Simtype[t.Etype])
+ et := gc.Simtype[t.Etype]
+
+ // avoid constant conversions in switches below
+ const (
+ OCMP_ = uint32(gc.OCMP) << 16
+ OAS_ = uint32(gc.OAS) << 16
+ OADD_ = uint32(gc.OADD) << 16
+ OSUB_ = uint32(gc.OSUB) << 16
+ OMUL_ = uint32(gc.OMUL) << 16
+ ODIV_ = uint32(gc.ODIV) << 16
+ OMINUS_ = uint32(gc.OMINUS) << 16
+ )
if !gc.Thearch.Use387 {
switch uint32(op)<<16 | uint32(et) {
default:
gc.Fatalf("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), t)
- case gc.OCMP<<16 | gc.TFLOAT32:
+ case OCMP_ | gc.TFLOAT32:
a = x86.AUCOMISS
- case gc.OCMP<<16 | gc.TFLOAT64:
+ case OCMP_ | gc.TFLOAT64:
a = x86.AUCOMISD
- case gc.OAS<<16 | gc.TFLOAT32:
+ case OAS_ | gc.TFLOAT32:
a = x86.AMOVSS
- case gc.OAS<<16 | gc.TFLOAT64:
+ case OAS_ | gc.TFLOAT64:
a = x86.AMOVSD
- case gc.OADD<<16 | gc.TFLOAT32:
+ case OADD_ | gc.TFLOAT32:
a = x86.AADDSS
- case gc.OADD<<16 | gc.TFLOAT64:
+ case OADD_ | gc.TFLOAT64:
a = x86.AADDSD
- case gc.OSUB<<16 | gc.TFLOAT32:
+ case OSUB_ | gc.TFLOAT32:
a = x86.ASUBSS
- case gc.OSUB<<16 | gc.TFLOAT64:
+ case OSUB_ | gc.TFLOAT64:
a = x86.ASUBSD
- case gc.OMUL<<16 | gc.TFLOAT32:
+ case OMUL_ | gc.TFLOAT32:
a = x86.AMULSS
- case gc.OMUL<<16 | gc.TFLOAT64:
+ case OMUL_ | gc.TFLOAT64:
a = x86.AMULSD
- case gc.ODIV<<16 | gc.TFLOAT32:
+ case ODIV_ | gc.TFLOAT32:
a = x86.ADIVSS
- case gc.ODIV<<16 | gc.TFLOAT64:
+ case ODIV_ | gc.TFLOAT64:
a = x86.ADIVSD
}
}
switch uint32(op)<<16 | (uint32(et)<<8 | uint32(flg)) {
- case gc.OADD<<16 | (gc.TFLOAT32<<8 | 0):
+ case OADD_ | (gc.TFLOAT32<<8 | 0):
return x86.AFADDF
- case gc.OADD<<16 | (gc.TFLOAT64<<8 | 0):
+ case OADD_ | (gc.TFLOAT64<<8 | 0):
return x86.AFADDD
- case gc.OADD<<16 | (gc.TFLOAT64<<8 | Fpop):
+ case OADD_ | (gc.TFLOAT64<<8 | Fpop):
return x86.AFADDDP
- case gc.OSUB<<16 | (gc.TFLOAT32<<8 | 0):
+ case OSUB_ | (gc.TFLOAT32<<8 | 0):
return x86.AFSUBF
- case gc.OSUB<<16 | (gc.TFLOAT32<<8 | Frev):
+ case OSUB_ | (gc.TFLOAT32<<8 | Frev):
return x86.AFSUBRF
- case gc.OSUB<<16 | (gc.TFLOAT64<<8 | 0):
+ case OSUB_ | (gc.TFLOAT64<<8 | 0):
return x86.AFSUBD
- case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Frev):
+ case OSUB_ | (gc.TFLOAT64<<8 | Frev):
return x86.AFSUBRD
- case gc.OSUB<<16 | (gc.TFLOAT64<<8 | Fpop):
+ case OSUB_ | (gc.TFLOAT64<<8 | Fpop):
return x86.AFSUBDP
- case gc.OSUB<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+ case OSUB_ | (gc.TFLOAT64<<8 | (Fpop | Frev)):
return x86.AFSUBRDP
- case gc.OMUL<<16 | (gc.TFLOAT32<<8 | 0):
+ case OMUL_ | (gc.TFLOAT32<<8 | 0):
return x86.AFMULF
- case gc.OMUL<<16 | (gc.TFLOAT64<<8 | 0):
+ case OMUL_ | (gc.TFLOAT64<<8 | 0):
return x86.AFMULD
- case gc.OMUL<<16 | (gc.TFLOAT64<<8 | Fpop):
+ case OMUL_ | (gc.TFLOAT64<<8 | Fpop):
return x86.AFMULDP
- case gc.ODIV<<16 | (gc.TFLOAT32<<8 | 0):
+ case ODIV_ | (gc.TFLOAT32<<8 | 0):
return x86.AFDIVF
- case gc.ODIV<<16 | (gc.TFLOAT32<<8 | Frev):
+ case ODIV_ | (gc.TFLOAT32<<8 | Frev):
return x86.AFDIVRF
- case gc.ODIV<<16 | (gc.TFLOAT64<<8 | 0):
+ case ODIV_ | (gc.TFLOAT64<<8 | 0):
return x86.AFDIVD
- case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Frev):
+ case ODIV_ | (gc.TFLOAT64<<8 | Frev):
return x86.AFDIVRD
- case gc.ODIV<<16 | (gc.TFLOAT64<<8 | Fpop):
+ case ODIV_ | (gc.TFLOAT64<<8 | Fpop):
return x86.AFDIVDP
- case gc.ODIV<<16 | (gc.TFLOAT64<<8 | (Fpop | Frev)):
+ case ODIV_ | (gc.TFLOAT64<<8 | (Fpop | Frev)):
return x86.AFDIVRDP
- case gc.OCMP<<16 | (gc.TFLOAT32<<8 | 0):
+ case OCMP_ | (gc.TFLOAT32<<8 | 0):
return x86.AFCOMF
- case gc.OCMP<<16 | (gc.TFLOAT32<<8 | Fpop):
+ case OCMP_ | (gc.TFLOAT32<<8 | Fpop):
return x86.AFCOMFP
- case gc.OCMP<<16 | (gc.TFLOAT64<<8 | 0):
+ case OCMP_ | (gc.TFLOAT64<<8 | 0):
return x86.AFCOMD
- case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop):
+ case OCMP_ | (gc.TFLOAT64<<8 | Fpop):
return x86.AFCOMDP
- case gc.OCMP<<16 | (gc.TFLOAT64<<8 | Fpop2):
+ case OCMP_ | (gc.TFLOAT64<<8 | Fpop2):
return x86.AFCOMDPP
- case gc.OMINUS<<16 | (gc.TFLOAT32<<8 | 0):
+ case OMINUS_ | (gc.TFLOAT32<<8 | 0):
return x86.AFCHS
- case gc.OMINUS<<16 | (gc.TFLOAT64<<8 | 0):
+ case OMINUS_ | (gc.TFLOAT64<<8 | 0):
return x86.AFCHS
}
gins(as, &n1, n2)
}
-func ginscmp(op int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
- if gc.Isint[t.Etype] || int(t.Etype) == gc.Tptr {
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+ if gc.Isint[t.Etype] || t.Etype == gc.Tptr {
if (n1.Op == gc.OLITERAL || n1.Op == gc.OADDR && n1.Left.Op == gc.ONAME) && n2.Op != gc.OLITERAL {
// Reverse comparison to place constant (including address constant) last.
op = gc.Brrev(op)